diff --git "a/516.jsonl" "b/516.jsonl" new file mode 100644--- /dev/null +++ "b/516.jsonl" @@ -0,0 +1,633 @@ +{"seq_id":"269593867","text":"import h2o\nfrom h2o.estimators.deepwater import H2ODeepWaterEstimator\n\n# Start or connect to H2O\nh2o.init(nthreads=-1, strict_version_check=False)\n\n# Import data and transform data\ntrain = h2o.import_file(\"bigdata/laptop/mnist/train.csv.gz\")\n\nfeatures = list(range(0,784))\ntarget = 784\n\ntrain[target] = train[target].asfactor()\n\n# Set up grid\nhidden_opt = [[200,200], [1024,1024]]\nlearn_rate_opt = [1e-6, 1e-5]\nhyper_parameters = {\"hidden\": hidden_opt, \"learning_rate\":learn_rate_opt}\n\n# Build model and train model grid\nfrom h2o.grid.grid_search import H2OGridSearch\nmodel_grid = H2OGridSearch(H2ODeepWaterEstimator, hyper_params=hyper_parameters)\n\nmodel_grid.train(x=features, y=target, training_frame=train, epochs=100, activation=\"Rectifier\", ignore_const_cols=False, mini_batch_size=256, input_dropout_ratio=0.1, hidden_dropout_ratios=[0.5,0.5], stopping_rounds=3, stopping_tolerance=0.05, stopping_metric=\"misclassification\", score_interval=2, score_duty_cycle=0.5, score_training_samples=1000, score_validation_samples=1000, nfolds=5, gpu=True, seed=1234)\n\n# Evaluate model\nprint(model_grid)\n","sub_path":"h2o-docs/src/booklets/v2_2015/source/DW_Vignette_code_examples/mnist-grid-cart.py","file_name":"mnist-grid-cart.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"577197395","text":"import numpy as np\nimport sympy as sp\n\nfrom .exceptions import ModelException\nfrom .algebra import inverse_coord_maps, create_ds\nimport logging\nlogger = logging.getLogger(__name__)\nj = None\nde = None\n\n\ndef start_julia():\n global j\n global de\n\n logger.info(\"Starting Julia Interpreter.\")\n from diffeqpy import de as de\n\n import julia\n\n j = julia.Julia()\n logger.info(\"Julia Interpreter Loaded.\")\n\n\ndef simulate(system,\n timespan,\n x0,\n dx0=None,\n control_vars=None):\n \"\"\"\n Simulate the system dynamics.\n\n Args:\n system obj:`BondGraph`:\n timespan tuple(float):\n initial list(float):\n control_vars (str,list(str), dict(str)):\n\n Returns:\n\n \"\"\"\n if system.ports:\n raise ModelException(\n \"Cannot Simulate %s: unconnected ports %s\",\n system, system.ports)\n\n if system.control_vars and not control_vars:\n raise ModelException(\"Control variable not specified\")\n\n if not de:\n start_julia()\n\n tspan = tuple(float(t) for t in timespan)\n X0 = np.array(x0, dtype=np.float64)\n assert len(X0) == len(system.state_vars)\n try:\n func = _build_ode(system, control_vars)\n problem = de.ODEProblem(func, X0, tspan)\n except NotImplementedError:\n func, diffs = _build_dae(system, control_vars)\n if dx0:\n DX0 = np.array(dx0, dtype=np.float64)\n else:\n DX0 = np.zeros(X0.shape, dtype=np.float64)\n problem = de.DAEProblem(func, DX0, X0, tspan, differential_vars=diffs)\n\n sol = de.solve(problem, dense=True)\n\n if sol.retcode not in (\"Default\", \"Success\"):\n raise SolverException(\"Integration error: Solver returned %s \"\n % sol.retcode, sol)\n\n t = np.transpose(sol.t)\n\n return np.resize(t, (len(t), 1)), np.transpose(sol.u).T\n\n\ndef _build_ode(system, control_vars=None):\n coords, mappings, linear, nonlinear, constraints = system.system_model()\n\n ss_map, js_map, cv_map = mappings\n m = len(ss_map)\n offset = m + 2*len(js_map)\n\n A = linear[0:m, 0:m]\n B = linear[0:m, m:offset]\n\n if not B.is_zero or not (A - sp.eye(m)).is_zero:\n raise NotImplementedError(\"DAE's not yet implemented\")\n x, subs, string_subs = _generate_cv_subs(mappings, control_vars)\n\n L = -linear[0:m, offset:offset+m]\n\n Lu = linear[0:m, offset+m:].dot(coords[offset + m:])\n\n if isinstance(Lu, sp.Symbol):\n Lu = [Lu]\n\n Nu = nonlinear[0:m, :]\n N = [-sp.Add(left, right).subs(subs) for left, right in zip(Lu, Nu)]\n\n # DX = LX + N(X, t)\n julia_string = \"\"\"function dxdt(dX, X, p, t)\\n\"\"\"\n\n for var, var_string in string_subs.items():\n julia_string += f\" {var} = {var_string}\\n\"\n\n for i in range(m):\n julia_string += f\" dX[{i+1}] =\"\n lx = sp.simplify(L[i,:].dot(x))\n nl = sp.sympify(N[i])\n\n if lx:\n julia_string += f\"{repr(lx)}\"\n\n julia_string += repr(nl) + \"\\n\"\n\n julia_string += \"end\"\n julia_string = julia_string.replace(\"**\",\"^\")\n func = j.eval(julia_string)\n\n return func\n\n\ndef _generate_cv_subs(mappings, control_vars=None):\n\n ss_map, js_map, cv_map = mappings\n m = len(ss_map)\n k = len(cv_map)\n\n x = [sp.symbols(f\"x_{i}\") for i in range(m)]\n X = [sp.symbols(f\"X[{i+1}]\") for i in range(m)]\n subs = list(zip([sp.symbols(f\"dx_{i}\") for i in range(m)],\n [sp.symbols(f\"dX[{i+1}]\") for i in range(m)]))\n\n subs += list(zip(x, X))\n t = sp.S('t')\n string_subs = {}\n\n if isinstance(control_vars, (float, int, complex)) and\\\n len(k) == 1:\n subs += [\n (sp.Symbol('u_0'), control_vars),\n (sp.Symbol('du_0'), 0)\n ]\n\n elif isinstance(control_vars, list) and len(control_vars) == k:\n for i, cv_string in enumerate(control_vars):\n u = sp.Symbol(f'u_{i}')\n du = sp.Symbol(f'du_{i}')\n try:\n fx = sp.sympify(cv_string)\n dfx = sum(fx.diff(x_i) for x_i in x) + fx.diff(t)\n subs.append((u, fx))\n subs.append((du, dfx))\n except sp.SympifyError:\n u_str = f\"u{i+1}\"\n for i in reversed(range(m)):\n cv_string = cv_string.replace(\n f\"x_{i}\", f\"X[{i+1}]\"\n )\n string_subs[u_str] = cv_string\n subs.append(sp.symbols(f\"u_{i}, {u_str}\"))\n\n elif isinstance(control_vars, dict):\n for key, cv_string in control_vars:\n u = sp.Symbol(f'{key}')\n du = sp.Symbol(f'd{key}')\n try:\n fx = sp.sympify(cv_string)\n dfx = sum(fx.diff(x_i) for x_i in x) + fx.diff(t)\n pair = [(du, dfx), (u, fx)]\n for pp in pair:\n subs = [\n s.subs(pp) for s in subs\n ]\n subs.append(pp)\n\n except sp.SympifyError:\n u_str = f\"u{len(string_subs)}\"\n for i in reversed(range(m)):\n cv_string = cv_string.replace(\n f\"x_{i}\", f\"X[{i+1}]\"\n )\n\n string_subs[u_str] = cv_string\n subs.append(sp.symbols(f\"{key}, {u_str}\"))\n else:\n raise ValueError(\"Invalid control variables: %s\", repr(control_vars))\n\n return X, subs, string_subs\n\n\ndef _build_dae(system, control_vars=None):\n\n mappings, coords = inverse_coord_maps(*system.basis_vectors)\n ss_map, js_map, cv_map = mappings\n\n m = len(ss_map)\n\n if len(js_map) > 0:\n raise NotImplementedError(\"Bond Graph has unconnected Ports\")\n\n derivatives = set(coords[0:m])\n differential_vars = []\n\n # construct julia coords\n\n # x = [sp.symbols(f\"x_{i}\") for i in range(m)]\n # subs = list(zip([sp.symbols(f\"dx_{i}\") for i in range(m)],\n # [sp.symbols(f\"dX[{i+1}]\") for i in range(m)]))\n #\n # subs += list(zip(x, [sp.symbols(f\"X[{i+1}]\") for i in range(m)]))\n\n # subs, cv_text = _generate_cv_subs(control_vars, subs)\n x, subs, string_subs = _generate_cv_subs(mappings, control_vars)\n\n julia_string = \"function f(dX, X, p, t)\\n\"\n end_string = \" return [\"\n i = 0\n\n for var, var_string in string_subs.items():\n julia_string += f\" {var} = {var_string}\\n\"\n\n for relation in system.constitutive_relations:\n r = relation.subs(subs)\n if not r:\n continue\n\n differential_vars.append(\n derivatives & relation.atoms() != set()\n )\n\n temp_string = str(r)\n\n julia_string += f\" res{i+1} = {temp_string}\\n\"\n\n if i > 0:\n end_string += ', '\n end_string += f\"res{i+1}\"\n i += 1\n assert len(differential_vars) == i\n end_string += \"]\\nend\"\n julia_string += end_string\n logger.warning(\"Julia Function\")\n logger.warning(julia_string)\n if not j: start_julia()\n\n func = j.eval(julia_string)\n\n return func, differential_vars\n\n\n\ndef julia():\n global j\n\n\nclass Simulation(object):\n def __init__(self, model,\n timespan=None,\n x0=None,\n dx_0=None,\n control_vars=None):\n\n coords, mapping, linear, nonlinear, constraints = model.system_model(\n control_vars=control_vars\n )\n\n self._state_map, self._port_map, self._cv_map = mapping\n\n d_system, port_func, constraints = create_ds(\n coords, mapping, linear, nonlinear, constraints\n )\n\n self._solver = None\n self._julia_func = None\n\n def run(self, x0, timespan):\n pass\n\n\n\n\nclass SolverException(Exception):\n pass","sub_path":"BondGraphTools/sim_tools.py","file_name":"sim_tools.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160543492","text":"from __future__ import division, print_function\nfrom pkg_resources import Requirement, resource_stream\nimport numpy as np\nimport itertools\nimport logging\n\nnt = { 'a': 0, 'c': 1, 'g': 2, 't': 3 }\ntops = lambda s: 4*s[:,:-1]+s[:,1:]\nclass energyfuncs:\n \"\"\"\n Energy functions based on SantaLucia's 2004 paper.\n\n mismatchtype is one of 'max', 'loop', or 'dangle', specifying how to\n consider mismatches. 'max' is probably the best choice, but is slowest -\n it takes the maximum interaction of the 'loop' and 'dangle' options.\n \"\"\"\n def __init__(self, mismatchtype='max', targetdG=7):\n import os\n try:\n dsb = resource_stream('stickydesign', 'stickydesign/params/dnastackingbig.csv')\n except:\n try:\n dsb = resource_stream('stickydesign', 'params/dnastackingbig.csv')\n except IOError:\n raise IOError(\"Error loading dnastackingbig.csv\")\n try:\n dgl = resource_stream('piperine', 'data/dnadangle.csv')\n except:\n try:\n this_dir, this_filename = os.path.split(__file__)\n dgl = open( os.path.join(this_dir, \"data\", \"dnadangle.csv\") )\n except IOError:\n raise IOError(\"Error loading dnadangle.csv\")\n self.targetdG=targetdG\n self.nndG_full = -np.loadtxt(dsb ,delimiter=',')\n self.dgldG_full = -np.loadtxt(dgl ,delimiter=',')\n self.taildG = 1.3\n dsb.close()\n dgl.close()\n self.initdG = 0.0 # 1.96 DISABLED FOR NOW\n self.nndG = self.nndG_full[np.arange(0,16),15-np.arange(0,16)]\n # 30-01-15: The only dangle contexts we are interested in are 3' dangle\n # s. Select those from the Santa Lucia table. We'll have to flip the\n # order of the vector, though, to mach the 5->3 orientation of the gene\n # rated toeholds. To flip, we need to count up to 15 with the opposite-\n # endian order, in terms of quaternary representation\n indcs = 4*np.tile(np.arange(4), 4) + np.repeat(np.arange(4), 4)\n self.dgldG = self.dgldG_full[1, indcs]\n # 30-01-15: As of now, the dangle base is set to C, so make a lookup ta\n # ble ordered by terminating toehold base\n self.dgldG_fixedC = self.dgldG_full[1, np.arange(4) + 4 * nt['c']]\n if mismatchtype == 'max':\n self.uniform = lambda x,y: np.maximum( self.uniform_loopmismatch(x,y), \\\n self.uniform_danglemismatch(x,y) \\\n )\n elif mismatchtype == 'loop':\n self.uniform = self.uniform_loopmismatch\n elif mismatchtype == 'dangle':\n self.uniform = self.uniform_danglemismatch\n else:\n raise InputError(\"Mismatchtype {0} is not supported.\".format(mismatchtype))\n\n def th_external_dG(self, seqs):\n # Convert nearest-neighbor stacks to dG-table lookup indices.\n # Sum up the near-neighbor energy contributions\n # Add context-specific dG values, eg tail or dangle contributions\n seqs_len = np.size(seqs, 1)\n # The external context involves a 3' dangle, so exclude the 3' flank\n # base.\n cols_external = np.arange(seqs_len-1)\n tops_external = tops(seqs[:, cols_external])\n nndG_external = np.sum(self.nndG[tops_external], 1)\n # The external-context dangle is fixed at C.\n dgldG_external = self.dgldG_fixedC[seqs[:, seqs_len-2]]\n return nndG_external + dgldG_external - self.taildG - self.initdG\n\n def th_internal_dG(self, seqs):\n # Convert nearest-neighbor stacks to dG-table lookup indices\n # Sum up and return the near-neighbor energy contributions\n # Add context-specific dG values, eg tail or dangle contributions\n seqs_len = np.size(seqs, 1)\n # The internal context involves a truncated toehold. Remove first 3' to\n # ehold base.\n cols_internal = np.concatenate((np.arange(seqs_len-2), [seqs_len-1]))\n tops_internal = tops(seqs[:, cols_internal])\n nndG_internal = np.sum(self.nndG[tops_internal], 1)\n return nndG_internal - self.taildG - self.initdG\n\n def matching_uniform(self, seqs):\n # Make a boolean vector representing which toeholds' external context dG\n # is further from the target dG than than their internal context dG\n dG_external = self.th_external_dG(seqs)\n dG_internal = self.th_internal_dG(seqs)\n external_further_bool = np.abs(dG_external - self.targetdG) >\\\n np.abs(dG_internal - self.targetdG)\n return np.choose(external_further_bool, [dG_internal, dG_external])\n\n def uniform_loopmismatch(self, seqs1, seqs2):\n if seqs1.shape != seqs2.shape:\n if seqs1.ndim == 1:\n seqs1 = endarray( np.repeat(np.array([seqs1]),seqs2.shape[0],0), seqs1.endtype )\n else:\n raise InputError(\"Lengths of sequence arrays are not acceptable.\")\n assert seqs1.endtype == seqs2.endtype\n endtype = seqs1.endtype\n\n endlen = seqs1.endlen\n plen = endlen-1\n\n # Run through the\n # TODO: replace this with cleaner code\n if endtype=='DT':\n ps1 = seqs1[:,1:-1]*4+seqs1[:,2:]\n pa1 = seqs1[:,0]*4+seqs1[:,1]\n pac1 = (3-seqs1[:,0])*4+seqs2[:,-1]\n ps2 = seqs2[:,::-1][:,:-2]*4+seqs2[:,::-1][:,1:-1]\n pa2 = seqs2[:,0]*4+seqs2[:,1]\n pac2 = (3-seqs2[:,0])*4+seqs1[:,-1]\n if endtype=='TD':\n ps1 = seqs1[:,:-2]*4+seqs1[:,1:-1]\n pa1 = seqs1[:,-2]*4+seqs1[:,-1]\n pac1 = seqs2[:,0]*4+(3-seqs1[:,-1])\n ps2 = seqs2[:,::-1][:,1:-1]*4+seqs2[:,::-1][:,2:]\n pa2 = seqs2[:,-2]*4+seqs2[:,-1]\n pac2 = (seqs1[:,0])*4+(3-seqs2[:,-1])\n\n # Shift here is considering the first strand as fixed, and the second one as\n # shifting. The shift is the offset of the bottom one in terms of pair\n # sequences (thus +2 and -1 instead of +1 and 0).\n en = np.zeros( (ps1.shape[0], 2*plen) )\n for shift in range(-plen+1,plen):\n #import pdb\n #pdb.set_trace()\n en[:,plen+shift-1] = np.sum( \\\n self.nndG_full[ ps1[:,max(shift,0):plen+shift], \\\n ps2[:,max(-shift,0):plen-shift] ], \\\n axis=1)\n en[:,plen-1] = en[:,plen-1] + self.nndG_full[pa1,pac1] + self.nndG_full[pa2,pac2]\n return np.amax(en,1) - self.initdG\n\n def uniform_danglemismatch(self, seqs1,seqs2,fast=True):\n if seqs1.shape != seqs2.shape:\n if seqs1.ndim == 1:\n seqs1 = endarray( np.repeat(np.array([seqs1]),seqs2.shape[0],0), seqs1.endtype )\n else:\n raise InputError(\"Lengths of sequence arrays are not acceptable.\")\n assert seqs1.endtype == seqs2.endtype\n endtype = seqs1.endtype\n s1 = tops(seqs1)\n s2 = tops(seqs2)\n l = s1.shape[1]\n s2r = np.fliplr(np.invert(s2)%16)\n s2r = s2r//4 + 4*(s2r%4)\n m = np.zeros((s1.shape[0],2*np.sum(np.arange(2,l+1))+l+1))\n r = np.zeros(m.shape[0])\n z = 0;\n if endtype == 'TD':\n s1c = s1[:,0:-1]\n s2rc = s2r[:,1:]\n s1l = np.hstack(( (4*(s2r[:,0]//4) + s1[:,0]//4).reshape(-1,1) , s1 ))\n s2rl = np.hstack(( s2r , (4*(s2r[:,-1]%4) + s1[:,-1]%4).reshape(-1,1) ))\n elif endtype == 'DT':\n s1c = s1[:,1:]\n s2rc = s2r[:,0:-1]\n s2rl = np.hstack(( (4*(s1[:,0]//4) + s2r[:,0]//4).reshape(-1,1) , s2r ))\n s1l = np.hstack(( s1 , (4*(s1[:,-1]%4) + s2r[:,-1]%4).reshape(-1,1) ))\n for o in range(1,l-1):\n zn = l-1-o\n m[:,z:z+zn] = ( s1c[:,:-o]==s2rc[:,o:] ) * self.nndG[s1c[:,:-o]]\n z = z+zn+2\n m[:,z:z+zn] = ( s2rc[:,:-o]==s1c[:,o:] ) * self.nndG[s2rc[:,:-o]]\n z = z+zn+2\n m[:,z:z+l+1] = (s1l == s2rl) * self.nndG[s1l]\n i = 0\n im = len(m)\n # This needs to be changed to something faster\n if not fast:\n for xi in range(0,m.shape[0]):\n gm = 0\n g = 0\n for y in m[xi,:]:\n if y == 0:\n g = 0\n else:\n g += y\n if gm > g:\n gm = g\n r[xi] = gm\n i+=1\n if not i%1000:\n print(\"%d/%d\" % (i,im))\n else:\n from stickydesign import _stickyext\n x = m\n _stickyext.fastsub(x,r)\n\n return r-self.initdG\n\n\n","sub_path":"piperine/energyfuncs_james.py","file_name":"energyfuncs_james.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641781248","text":"\"\"\"\n1240. Tiling a Rectangle with the Fewest Squares\n\nGiven a rectangle of size n x m, find the minimum number of integer-sided squares that tile the rectangle.\n\nExample 1:\n\nInput: n = 2, m = 3\nOutput: 3\nExplanation: 3 squares are necessary to cover the rectangle.\n2 (squares of 1x1)\n1 (square of 2x2)\nExample 2:\n\nInput: n = 5, m = 8\nOutput: 5\nExample 3:\n\nInput: n = 11, m = 13\nOutput: 6\n\"\"\"\n\n\n\n\"\"\"\nThe basic idea is to fill the entire block bottom up. \nIn every step, find the lowest unfilled square first, and select a square with different possible sizes to fill it. \nWhat is the nodes in the graph? It is a height array (skyline) height_arr!!!!! \nThe start_node is height_arr = [0, 0, 0...], the end_node is height_arr = [m, m, m...].\nPruning:\n1. When the current cnt has exceeded the value of the current global optimal solution, then no need to move forward.\n2. Try largest square possible first (improves time by a lot).\n\"\"\"\nclass Solution:\n def tilingRectangle(self, m: int, n: int) -> int:\n def backtrack(curr_height, curr_cnt):\n if all(h == m for h in curr_height):\n self.min_cnt = min(self.min_cnt, curr_cnt)\n return\n \n if curr_cnt >= self.min_cnt: # pruning 1\n return\n \n # 套���板is to find next candidate. what is the next candidate? It's the lowest unfilled area\n # below is to find the lowest unfilled area\n min_h = min(curr_height)\n left_idx = curr_height.index(min_h) # the left idx of the min_h\n right_idx = left_idx \n while right_idx + 1 < n and curr_height[right_idx+1] == min_h: # get the right_idx of the min_h\n right_idx += 1\n width = right_idx - left_idx + 1 # now we have found the width and height of the lowest unfilled area,\n height = m - min_h # we need to put our next square into the area\n \n for side_lens in range(min(width, height), 0, -1): # 注意我们需要遍历所有可能的side_lens for the square. 逆序遍历is pruning 2\n next_height = [h for h in curr_height] # 注意has to be a deep copy. Otherwise curr_height will be changed\n for lens in range(side_lens):\n next_height[left_idx + lens] += side_lens\n backtrack(next_height, curr_cnt + 1) \n \n \n self.min_cnt = m * n\n backtrack([0 for _ in range(n)], 0)\n return self.min_cnt\n","sub_path":"Solutions/1240.Tiling-a-Rectangle-with-the-Fewest-Squares.py","file_name":"1240.Tiling-a-Rectangle-with-the-Fewest-Squares.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"200252321","text":"#! python 3\n\"\"\"\nProject: duolingo\nFilename: home_page.py\nCreated by: PJC\nCreated on: March 04, 2017\n\"\"\"\n\n\nfrom pages.base_page import BasePage\nfrom pages.base_page import InvalidPageException\nfrom selenium.common.exceptions import NoSuchElementException\nimport re\n\n\nclass HomePage(BasePage):\n \"\"\"\n Simulates the Home page on Duolingo\n \"\"\"\n\n title = 'Duolingo: Home'\n\n def __init__(self, driver):\n super(HomePage, self).__init__(driver)\n self.page_title = self.driver.title\n\n def _validate_page(self, driver):\n\n try:\n\n if self.title == driver.title:\n return True\n\n except:\n\n raise (InvalidPageException('Home page not loaded'))\n\n def click_change_daily_goal_button(self):\n \"\"\"\n Click the Change Daily Goal button.\n\n :return: Returns True if the Change Daily Goal button was clicked, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\nclick_change_daily_goal')\n\n try:\n\n div_element = self.driver.find_element_by_css.selector('div.vocab-test')\n h2_element = div_element.find_element_by_tag_name('h2')\n link_element = h2_element.find_element_by_tag_name('a')\n link_element.click()\n\n self.logger.debug('Clicked Change Daily Goal Button')\n\n return True\n\n except NoSuchElementException:\n\n self.logger.exception('click_change_daily_goal: Expected Element Not Found.')\n return False\n\n def click_change_language_button(self):\n \"\"\"\n Click the Change Language button.\n\n :return: Returns True if the Change Language button was clicked, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\nclick_change_language_button')\n\n try:\n\n change_language_button = self.driver.find_element_by_css.selector('div.tree')\n change_language_button.click()\n\n self.logger.debug('Clicked Change Language Button')\n\n return True\n\n except NoSuchElementException:\n\n self.logger.exception('click_change_language_button: Expected Element Not Found.')\n return False\n\n def click_lingot_store_button(self):\n \"\"\"\n Click the Lingot Store button.\n\n :return: Returns True if the Lingot Store button was clicked, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\nclick_lingot_store_button')\n\n try:\n\n button_element = self.driver.find_element_by_css.selector('button.btn-store')\n button_element.click()\n\n self.logger.debug('Clicked Lingot Store Button')\n\n return True\n\n except NoSuchElementException:\n\n self.logger.exception('click_lingot_store_button: Expected Element Not Found.')\n return False\n\n def click_strengthen_skills_button(self):\n \"\"\"\n Click the Strengthen Skills button.\n\n :return: Returns True if the Strengthen Skills button was clicked, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\nclick_strengthen_skills_button')\n\n try:\n\n strengthen_skills_button = self.driver.find_element_by_css.selector('a.btn-strengthen-skills')\n strengthen_skills_button.click()\n\n self.logger.debug('Clicked Strengthen Skills Button')\n\n return True\n\n except NoSuchElementException:\n\n self.logger.exception('click_strengthen_skills_button: Expected Element Not Found.')\n return False\n\n def gather_daily_stats(self):\n \"\"\"\n Gather the user's daily stats. The user's Day Streak and Hours Left will be returned. The results are returned\n in a dictionary with the following Keys; 'Streak' and 'Hours'.\n\n :return: Returns a dictionary with the user's stats if successful, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\ngather_daily_stats')\n\n daily_stats = {}\n\n try:\n\n div_element = self.driver.find_element_by_class_name('daily-goal-stats')\n span_elements = div_element.find_elements_by_class_name('stat-text')\n daily_stats['Streak'] = span_elements[0].text\n daily_stats['Hours'] = span_elements[1].text\n\n self.logger.debug('Gathered Daily Stats: Day Streak {0} Hours Left {1}'.format(daily_stats['Streak'],\n daily_stats['Hours']))\n\n return daily_stats\n\n except NoSuchElementException:\n\n self.logger.exception('gather_daily_stats: Expected Element Not Found.')\n return False\n\n def gather_skill_info(self):\n \"\"\"\n Gather the Language and Skill Level listed in the Skill Tree section.\n\n :return: Returns a list that contains the Language and Skill Level. On failure will return False.\n \"\"\"\n\n self.logger.debug('\\ngather_skill_info')\n\n skill_tree_info = []\n\n try:\n\n span_element = self.driver.find_element_by_class('skill-tree-header')\n # Grab the Language\n language = span_element.find_element_by_tag_name('h1').text()\n language = re.sub(' skills$', '', language)\n skill_tree_info.append(language)\n\n # Grab the Skill Level\n level = span_element.find_element_by_class_name('level-text')\n skill_tree_info.append(level)\n\n self.logger.debug('Gathered Skill Tree Info: {0} {1}'.format(language, level))\n\n return skill_tree_info\n\n except NoSuchElementException:\n\n self.logger.exception('gather_skill_info: Expected Element Not Found.')\n return False\n\n def gather_user_xp(self):\n \"\"\"\n Gather amount of user's XP.\n\n :return: Returns the user's XP number if successful, otherwise returns False.\n \"\"\"\n\n self.logger.debug('\\ngather_user_xp')\n\n try:\n\n div_element = self.driver.find_element_by_id('daily-donut-text')\n xp_points = div_element.find_element_by_class_name('stat-text').text\n\n self.logger.debug('Gathered XP Points: {0}'.format(xp_points))\n\n return xp_points\n\n except NoSuchElementException:\n\n self.logger.exception('gather_user_xp: Expected Element Not Found.')\n return False\n\n def is_skill_locked(self, skill_element):\n \"\"\"\n Return the whether a skill is locked or not.\n\n :return: Returns True if the skill is locked, otherwise returns False. If neither is found returns Error.\n \"\"\"\n\n self.logger.debug('\\nis_skill_locked')\n\n try:\n\n # Look for locked skill\n skill_element.find_element_by_css_selector('span.locked')\n self.logger.debug('Skill Is Locked')\n\n return True\n\n except NoSuchElementException:\n\n try:\n\n # Look for unlocked skill\n skill_element.find_element_by_css_selector('span.unlocked')\n self.logger.debug('Skill Is Unlocked')\n\n return False\n\n except NoSuchElementException:\n\n return 'Error'\n\n # Skill Tree\n # TODO: Confirm Lessons Left\n # TODO: Click Test Out Button\n\n\n # Friends\n # TODO: Click Find Friends On Facebook\n # TODO: Click Send Invite\n # TODO: Click Search\n\n # Follow Duolingo\n # TODO: Click Facebook\n # TODO: Click Twitter\n # TODO: Click Google","sub_path":"pages/home_page.py","file_name":"home_page.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"199237816","text":"import discord\nfrom discord.ext import commands\nfrom .utils.dataIO import dataIO\nfrom .utils import checks\nfrom __main__ import send_cmd_help, settings\nfrom cogs.utils.chat_formatting import escape_mass_mentions, box\nimport os\nimport logging\nimport asyncio\nfrom discord.ext import commands\n\n\nnumbs = {\n \"next\": \"➡\",\n \"back\": \"⬅\",\n \"exit\": \"✅\"\n}\nclass newcomer:\n \"\"\"A racist cog\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.filepath = \"data/register/settings.json\"\n self.settings = dataIO.load_json(self.filepath) \n\n async def slow_deletion(self, messages):\n for message in messages:\n try:\n await self.bot.delete_message(message)\n except:\n pass\n\n async def _embedz(self, ctx, author : discord.Member):\n #make it more elegant and test it out later at 9 pm and also fix embed colors\n message1 = (\"***Please respect the people who made this place possible as they worked very hard to make this place fun and glorious***\\n\"\n \"Thank you for joining the server\\n\")\n message2 = (\"#1: Users need to abide by discord Terms of Service (https://discordapp.com/tos).\\n\"\n \"\\n\"\n \"#2. Be respectful! Any hate speech, racist remarks, or just disrespect will not be tolerated.\\n\"\n \"\\n\"\n \"#3. NSFW content is off-limits. This is a NO TOLERANCE RULE. Reading this is your only warning.\\n\"\n \"\\n\"\n \"#4. If an Administrator or a Moderator asks you to stop something, you should stop.\\n\"\n \"\\n\"\n \"#5. Do not spam tag the Teachers or the admins and helpers. It's super cool that they're here with us; but unless they're in the conversation try not to take them from their busy lives for trivial matters.\\n\"\n \"\\n\")\n message3 = (\"#6. Do you know what happens if you start spamming? You get put in the pusnished role - no activity for 24 hours (or at staff discretion) so be warned - you'll pretty much be a sitting duck for that entire time. No typing or talking. Try to avoid that.\\n\"\n \"\\n\"\n \"#7. Need Help with anything? - we are here to help! And if you want to contact us, just mention @Staff and one of us will pick up the query!\\n\"\n \"\\n\"\n \"#8.Use the music commands only in the MUSIC text channel.(Its wont even work try as much as you want!)\\n\"\n \"\\n\"\n \"#9.The use of bad words is strictly banned.\\n \"\n \"\\n\")\n message4 = (\"#10. You will be banned if you do something stupid(duh..)\\n\"\n \"\\n\"\n \"#11. No backseat modding. We've got this. You just do your thing!\\n\"\n \"\\n\"\n \"#12.Please Remember that Your mobile device is Just a companion for the discord.Recomended to use it on your computer(Works with windows,Linux,Mac and also browsers including chrome,Firefox,Internet explorer)\\n\") \n message5 = 'Cool now that you are done going through the rules \\n, press ✅ to exit out of this'\n embeds = []\n embed_message = [message1, message2, message3, message4, message5]\n\n for message in embed_message:\n footer_text = \"Please use the arrows to scroll through\" \n embed = discord.Embed(colour=0x6F4BC5, description=message) #set a proper color\n embed.title = 'Rules and Info'\n embed.set_author(name=str(author.name), icon_url=author.avatar_url)\n embed.set_footer(text=footer_text)\n embeds.append(embed) \n return embeds \n @commands.group(pass_context=True)\n async def register(self, ctx):\n \"\"\"Lets users register to the server\"\"\" \n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)\n\n @register.command(pass_context=True, name = 'student')\n async def _student(self,ctx):\n \"\"\"Lets you register as a student\"\"\" \n channel = ctx.message.channel\n author = ctx.message.author\n server = author.server\n user = author\n is_bot = self.bot.user.bot\n to_delete = []\n await self.bot.delete_message(ctx.message)\n self.settings[user.id] = {} \n for r in server.roles:\n if r.name == 'Student':\n await self.bot.say(\"{}, Hi welcome to the server,make sure you read through these rules\\n\".format(author.mention))\n embeds = await self._embedz(ctx, author)\n await self.cogs_menu(ctx, embeds, message=None, page=0, timeout=300)\n await self.bot.say(\"Cool now that you have read the **rules and info**, Please Input your __**First name**__\") \n real_name = await self.bot.wait_for_message(author=author, channel=channel, timeout=30)\n if real_name == None:\n self.bot.say(\"Please Use the command again\")\n else:\n self.settings[user.id].update({'realname' : real_name.clean_content})\n dataIO.save_json(self.filepath, self.settings)\n await self.bot.say('Thanks {}, Now please input __**only**__ **your section**'.format(author.mention))\n user_section = await self.bot.wait_for_message(author=author, channel=channel, timeout=30)\n if user_section == None:\n await self.bot.say('Welcome the Faips discord server, Hope you have fun here')\n else:\n self.settings[user.id].update({'section' : user_section.clean_content})\n dataIO.save_json(self.filepath, self.settings)\n n1ck = '{}[12-{}]'.format(real_name.clean_content, user_section.clean_content)\n await self.bot.change_nickname(user, n1ck)\n await self.bot.send_message(user, \"You are now a Student!\")\n await self.bot.add_roles(user, r)\n async for message in self.bot.logs_from(channel, limit=8):\n to_delete.append(message)\n to_remove_role_names = ['Newcomers', 'Registering']\n to_remove_roles = [r for r in server.roles if r.name in to_remove_role_names]\n if len(to_remove_roles) > 0:\n await self.bot.remove_roles(user, *to_remove_roles) \n async for message in self.bot.logs_from(channel, limit=8):\n to_delete.append(message)\n await self.slow_deletion(to_delete)\n await self.bot.say(\"@everyone ,In order to register,please type `,register student` if you are a student and `,register teacher`if you are a teacher\")\n @register.command(pass_context=True, name = 'teacher')\n async def _teacher(self, ctx):\n \"\"\"Lets you register as a Teacher\"\"\" \n author = ctx.message.author\n channel = ctx.message.channel\n server = author.server\n user = author\n to_delete = []\n for r in server.roles:\n if r.name == 'Teacher':\n await self.bot.say('{}, Hi Maam/sir, we are glad to have you here. Please read through the rules. Use the arrows to scroll.'.format(author.mention))\n embeds = await self._embedz(ctx, author)\n await self.cogs_menu(ctx, embeds, message=None, page=0, timeout=300)\n await self.bot.say('Thankyou, Now please enter your name')\n real_name = await self.bot.wait_for_message(author=author, channel=channel, timeout=30)\n if real_name == None:\n self.bot.say(\"Please Use the command again\")\n else:\n self.settings[user.id].update({'realname' : real_name.clean_content})\n dataIO.save_json(self.filepath, self.settings) \n n1ck = '{}'.format(real_name.clean_content)\n await self.bot.change_nickname(author, n1ck)\n await self.bot.send_message(user, 'Welcome the Faips discord server, Hope you have fun here')\n await self.bot.add_roles(user, r)\n async for message in self.bot.logs_from(channel, limit=8):\n to_delete.append(message)\n to_remove_role_names = ['Newcomers', 'Registering']\n to_remove_roles = [r for r in server.roles if r.name in to_remove_role_names]\n if len(to_remove_roles) > 0:\n await self.bot.remove_roles(user, *to_remove_roles) \n async for message in self.bot.logs_from(channel, limit=8):\n to_delete.append(message)\n await self.slow_deletion(to_delete)\n await self.bot.say(\"@everyone ,In order to register,please type `,register student` if you are a student and `,register teacher`if you are a teacher\") \n\n\n async def cogs_menu(self, ctx, cog_list: list,\n message: discord.Message=None,\n page=0, timeout: int=300):\n cog = cog_list[page]\n if not message:\n message =\\\n await self.bot.send_message(ctx.message.channel, embed=cog)\n await self.bot.add_reaction(message, \"⬅\")\n await self.bot.add_reaction(message, \"✅\")\n await self.bot.add_reaction(message, \"➡\")\n else:\n message = await self.bot.edit_message(message, embed=cog)\n react = await self.bot.wait_for_reaction(\n message=message, user=ctx.message.author, timeout=timeout,\n emoji=[\"➡\", \"⬅\", \"✅\"]\n )\n if react is None:\n try:\n await self.bot.remove_reaction(message, \"⬅\", self.bot.user)\n await self.bot.remove_reaction(message, \"✅\", self.bot.user)\n await self.bot.remove_reaction(message, \"➡\", self.bot.user)\n except:\n pass\n return None\n reacts = {v: k for k, v in numbs.items()}\n react = reacts[react.reaction.emoji]\n if react == \"next\":\n next_page = 0\n if page == len(cog_list) - 1:\n next_page = 0 # Loop around to the first item\n else:\n next_page = page + 1\n return await self.cogs_menu(ctx, cog_list, message=message,\n page=next_page, timeout=timeout)\n elif react == \"back\":\n next_page = 0\n if page == 0:\n next_page = len(cog_list) - 1 # Loop around to the last item\n else:\n next_page = page - 1\n return await self.cogs_menu(ctx, cog_list, message=message,\n page=next_page, timeout=timeout)\n else:\n try:\n return await\\\n self.bot.delete_message(message)\n except:\n pass \n \ndef setup(bot):\n bot.add_cog(newcomer(bot))\n","sub_path":"newcomer/newcomer.py","file_name":"newcomer.py","file_ext":"py","file_size_in_byte":11205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"518266212","text":"from __future__ import print_function\n\nimport errno\nimport os\nimport stat\nimport shutil\n\nfrom collections import Counter\nfrom itertools import ifilterfalse\nfrom pathlib import Path\n\nfrom inselect.lib.inselect_error import InselectError\n\nDEBUG_PRINT = False\n\n\ndef debug_print(*args, **kwargs):\n if DEBUG_PRINT:\n print(*args, **kwargs)\n\ndef make_readonly(path):\n \"\"\"Alters path to be read-only and return the original mode\n \"\"\"\n path = Path(path)\n mode = path.stat()[stat.ST_MODE]\n path.chmod(mode ^ (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH))\n return mode\n\ndef validate_normalised(boxes):\n for l,t,w,h in boxes:\n if not (l>=0 and t>=0 and l<=1 and t<=1 and w>0 and l+w<=1 and h>0 and\n t+h<=1):\n raise InselectError('One or more boxes are not normalised')\n\ndef rmtree_readonly(path):\n \"\"\"Like shutil.rmtree() but removes read-only files on Windows\n \"\"\"\n\n # http://stackoverflow.com/a/9735134\n def handle_remove_readonly(func, path, exc):\n excvalue = exc[1]\n if func in (os.rmdir, os.remove) and excvalue.errno == errno.EACCES:\n\n # ensure parent directory is writeable too\n pardir = os.path.abspath(os.path.join(path, os.path.pardir))\n if not os.access(pardir, os.W_OK):\n os.chmod(pardir, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO)\n\n os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777\n\n func(path)\n else:\n raise\n\n shutil.rmtree(str(path), ignore_errors=False, onerror=handle_remove_readonly)\n\ndef unique_everseen(iterable, key=None):\n \"List unique elements, preserving order. Remember all elements ever seen.\"\n # Taken from https://docs.python.org/2/library/itertools.html\n # unique_everseen('AAAABBBCCDAABBB') --> A B C D\n # unique_everseen('ABBCcAD', str.lower) --> A B C D\n seen = set()\n seen_add = seen.add\n if key is None:\n for element in ifilterfalse(seen.__contains__, iterable):\n seen_add(element)\n yield element\n else:\n for element in iterable:\n k = key(element)\n if k not in seen:\n seen_add(k)\n yield element\n\ndef duplicated(v):\n \"\"\"Returns values within v that appear more than once\n \"\"\"\n return [x for x, y in Counter(v).items() if y > 1]\n","sub_path":"inselect/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"20234929","text":"from itertools import combinations\nfrom itertools import permutations\n\nfrom string import ascii_lowercase\n\n# 놀랍게도 4일만에 개념논리적으론 \n# 조합_4개중 2개 순서상관없이 뽑는것_과\n# 순열_순서 구분해서 배치하는것_의 융합문제인 것을 발견\n\n# 다만, 출력결과가 개념적인 4C2 x 2! 를 순으로 출력한게 아니라,\n# 즉, [('a', 'b'), ('a', 'c'), ('a', 'd'), ('b', 'c'), ('b', 'd'), ('c', 'd')]\n# 에서 각 요소에대해서 2!한 게 아니라 \n# 재귀구조에 따른 순서로 출력\n# ab ac ad ba bc bd ca cb cd da db dc\n\n#--------------\n# 아마도 ?개 중 2개일때는 \n# sorted_total=sorted(total,key=lambda x: ord(x[0]))\n# 가 먹히는데 그 이상부터는 순서가 잘못되는듯\n# 결국 재귀구현해야\n\nif __name__==\"__main__\":\n n, r = map(int, input().split())\n\n #alphabets='abcdefghijklmnopqrstu'\n\n alphabets=list(ascii_lowercase)\n candidates=alphabets[:(n-1)+1]\n\n result_combi=list(combinations(candidates,r))\n # print(result_combi)\n\n tmp=[list(x) for x in permutations(('a', 'b'))]\n # print(tmp)\n\n total=[]\n\n for element in result_combi:\n permuted=[list(x) for x in permutations(element)]\n total.extend(permuted)\n \n # print(total)\n sorted_total=sorted(total,key=lambda x: ord(x[0]))\n # print(sorted_total)\n\n for element in sorted_total:\n print(''.join(element))\n","sub_path":"Algorithm/python/algorithmjobs/L8/L8_01combiNpermute_lib.py","file_name":"L8_01combiNpermute_lib.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"390496125","text":"\"\"\"\nThis example shows how to read point cloud data from a ZDF file, apply a binary mask, and visualize it.\n\nThe ZDF file for this sample can be found under the main instructions for Zivid samples.\n\"\"\"\n\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport open3d as o3d\nimport zivid\n\nfrom sample_utils.paths import get_sample_data_path\n\n\ndef _display_rgb(rgb, title):\n \"\"\"Display RGB image.\n\n Args:\n rgb: RGB image (HxWx3 darray)\n title: Image title\n\n Returns None\n\n \"\"\"\n plt.figure()\n plt.imshow(rgb)\n plt.title(title)\n plt.show(block=False)\n\n\ndef _display_depthmap(xyz):\n \"\"\"Create and display depthmap.\n\n Args:\n xyz: X, Y and Z images (point cloud co-ordinates)\n\n Returns None\n\n \"\"\"\n plt.figure()\n plt.imshow(\n xyz[:, :, 2],\n vmin=np.nanmin(xyz[:, :, 2]),\n vmax=np.nanmax(xyz[:, :, 2]),\n cmap=\"viridis\",\n )\n plt.colorbar()\n plt.title(\"Depth map\")\n plt.show(block=False)\n\n\ndef _display_pointcloud(xyz, rgb):\n \"\"\"Display point cloud provided from 'xyz' with colors from 'rgb'.\n\n Args:\n rgb: RGB image\n xyz: X, Y and Z images (point cloud co-ordinates)\n\n Returns None\n\n \"\"\"\n xyz = np.nan_to_num(xyz).reshape(-1, 3)\n rgb = rgb.reshape(-1, 3)\n\n point_cloud_open3d = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(xyz))\n point_cloud_open3d.colors = o3d.utility.Vector3dVector(rgb / 255)\n\n visualizer = o3d.visualization.Visualizer() # pylint: disable=no-member\n visualizer.create_window()\n visualizer.add_geometry(point_cloud_open3d)\n\n visualizer.get_render_option().background_color = (0, 0, 0)\n visualizer.get_render_option().point_size = 1\n visualizer.get_render_option().show_coordinate_frame = True\n visualizer.get_view_control().set_front([0, 0, -1])\n visualizer.get_view_control().set_up([0, -1, 0])\n\n visualizer.run()\n visualizer.destroy_window()\n\n\ndef _main():\n\n app = zivid.Application()\n\n data_file = Path() / get_sample_data_path() / \"Zivid3D.zdf\"\n print(f\"Reading ZDF frame from file: {data_file}\")\n frame = zivid.Frame(data_file)\n\n point_cloud = frame.point_cloud()\n xyz = point_cloud.copy_data(\"xyz\")\n rgba = point_cloud.copy_data(\"rgba\")\n\n pixels_to_display = 300\n print(f\"Generating binary mask of central {pixels_to_display} x {pixels_to_display} pixels\")\n mask = np.zeros((rgba.shape[0], rgba.shape[1]), np.bool)\n height = frame.point_cloud().height\n width = frame.point_cloud().width\n h_min = int((height - pixels_to_display) / 2)\n h_max = int((height + pixels_to_display) / 2)\n w_min = int((width - pixels_to_display) / 2)\n w_max = int((width + pixels_to_display) / 2)\n mask[h_min:h_max, w_min:w_max] = 1\n\n _display_rgb(rgba[:, :, 0:3], \"RGB image\")\n\n _display_depthmap(xyz)\n _display_pointcloud(xyz, rgba[:, :, 0:3])\n input(\"Press Enter to continue...\")\n\n print(\"Masking point cloud\")\n xyz_masked = xyz.copy()\n xyz_masked[mask == 0] = np.nan\n\n _display_depthmap(xyz_masked)\n _display_pointcloud(xyz_masked, rgba[:, :, 0:3])\n input(\"Press Enter to close...\")\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"source/applications/advanced/mask_point_cloud.py","file_name":"mask_point_cloud.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"597688111","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 18 14:48:19 2020\r\n\r\n@author: douzi\r\n\"\"\"\r\n\r\ndef factor(num):\r\n i = 1\r\n res = 0\r\n while i < num:\r\n if num % i == 0:\r\n res += i\r\n i = i + 1\r\n return res\r\n\r\n\r\ndef main():\r\n while True:\r\n n = int(input(\"输入n:\"))\r\n print(factor(n))\r\n\r\nif __name__=='__main__':\r\n main()","sub_path":"苏大上机代码/python_project/03_历年真题期末期中/suda_practice/py01_factor_sum.py","file_name":"py01_factor_sum.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"330441373","text":"import pytest\nimport json\nfrom datetime import datetime\nfrom moto import mock_s3\nfrom s3_sat.bucket_subresource import BucketAcl\nfrom s3_sat.bucket_subresource import BucketCors\nfrom s3_sat.bucket_subresource import BucketLifecycle\nfrom s3_sat.bucket_subresource import BucketLogging\nfrom s3_sat.bucket_subresource import BucketPolicy\nfrom s3_sat.bucket_subresource import BucketTagging\nfrom s3_sat.subresource_factory import SubResourceFactory\n\n@mock_s3\ndef test_factory(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n args = {\"acl\":True}\n\n subresources = SubResourceFactory().create(bucket, args)\n\n assert len(subresources) > 0\n\n@mock_s3\ndef test_get_acl(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_acl = BucketAcl(bucket)\n content = bucket_acl.get_content()\n\n assert len(content) == 2\n\n\n@mock_s3\ndef test_get_cors_exception(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_cors = BucketCors(bucket)\n\n content = bucket_cors.get_content()\n\n assert content.pop() == \"No CORS configuration\"\n\n\n@mock_s3\ndef test_get_cors(s3, s3_client):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n cors_configuration = {\n \"CORSRules\": [\n {\n \"AllowedHeaders\": [\"Authorization\"],\n \"AllowedMethods\": [\"GET\", \"PUT\"],\n \"AllowedOrigins\": [\"*\"],\n }\n ]\n }\n s3_client.put_bucket_cors(Bucket=\"testing\", CORSConfiguration=cors_configuration)\n\n bucket_cors = BucketCors(bucket)\n content = bucket_cors.get_content()\n\n assert len(content) == 1\n\n\n@mock_s3\ndef test_get_lifecycle_exception(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_lifecycle = BucketLifecycle(bucket)\n\n content = bucket_lifecycle.get_content()\n\n assert content.pop() == \"No lifecycle configuration\"\n\n\n@mock_s3\ndef test_get_lifecycle(s3, s3_client):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n lifecycle = {\n \"Rules\": [\n {\n \"Expiration\": {\"Days\": 91,},\n \"ID\": \"90-days-to-glacier\",\n \"Prefix\": \"\",\n \"Status\": \"Enabled\",\n \"Transitions\": [{\"Days\": 90, \"StorageClass\": \"GLACIER\"},],\n \"NoncurrentVersionTransitions\": [\n {\"NoncurrentDays\": 90, \"StorageClass\": \"GLACIER\"},\n ],\n \"NoncurrentVersionExpiration\": {\"NoncurrentDays\": 91},\n },\n ]\n }\n s3_client.put_bucket_lifecycle_configuration(\n Bucket=\"testing\", LifecycleConfiguration=lifecycle\n )\n\n bucket_lifecycle = BucketLifecycle(bucket)\n content = bucket_lifecycle.get_content()\n\n assert len(content) == 1\n\n@mock_s3\ndef test_get_logging(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_logging = BucketLogging(bucket)\n content = bucket_logging.get_content()\n\n assert len(content) == 1\n\n@mock_s3\ndef test_get_policy_exception(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_policy = BucketPolicy(bucket)\n content = bucket_policy.get_content()\n\n assert content.pop() == \"No policy configuration\"\n\n@mock_s3\ndef test_get_policy(s3, s3_client):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n policy= {\n 'Version': '2012-10-17',\n 'Statement': [{\n 'Sid': 'AddPerm',\n 'Effect': 'Allow',\n 'Principal': '*',\n 'Action': ['s3:GetObject'],\n 'Resource': f'arn:aws:s3:::testing/*'\n }]\n}\n policy = json.dumps(policy)\n s3_client.put_bucket_policy(Bucket=\"testing\", Policy=policy)\n\n bucket_policy = BucketPolicy(bucket)\n content = bucket_policy.get_content()\n\n assert len(content) == 1\n\n@mock_s3\ndef test_get_tagging_exception(s3):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n bucket_tagging = BucketTagging(bucket)\n content = bucket_tagging.get_content()\n\n assert content.pop() == \"No tag available\"\n\n@mock_s3\ndef test_get_tagging(s3, s3_client):\n s3.create_bucket(Bucket=\"testing\")\n bucket = s3.Bucket(\"testing\")\n tagging = {\n 'TagSet': [\n {\n 'Key': 'string',\n 'Value': 'string'\n },\n ]\n }\n s3_client.put_bucket_tagging(Bucket=\"testing\", Tagging=tagging)\n\n bucket_tagging = BucketTagging(bucket)\n content = bucket_tagging.get_content()\n\n assert len(content) == 1\n","sub_path":"tests/bucket_subresource_test.py","file_name":"bucket_subresource_test.py","file_ext":"py","file_size_in_byte":4585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"389214467","text":"# encoding: utf-8\nimport pandas as pd\nimport copy\nimport sys\nimport os\nimport time\nimport random\nsys.path.append(os.path.dirname(__file__))\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nfrom node import NodeManager, TriggerStatus\n\n\nclass WebNodeManager(NodeManager):\n def __init__(self, node_config):\n super().__init__(node_config)\n\n @staticmethod\n def _print_trigger_status(trigger_event, trigger_map):\n print()\n # print(f'{pd.datetime.now().strftime(DATE_STR_FORMAT)}')\n for trigger_name, status in trigger_map.items():\n print('\\t\\t{} {}: {}'.format(\n trigger_event,\n trigger_name,\n status.name))\n print()\n\n def run(self):\n while True:\n # print('=' * 20 + pd.datetime.now().strftime(DATE_STR_FORMAT) + '=' * 20)\n all_trigger_status = copy.deepcopy(self.node_config.all_trigger_status)\n for _trigger_event, _item in all_trigger_status.items():\n # if status is None, node does not start running, so there is no status\n if all(map(lambda x: (x == TriggerStatus.FINISH) or (x is None), _item.values())):\n continue\n self._print_trigger_status(_trigger_event, _item)\n time.sleep(self.default_interval)\n pass\n\n# output all nodes ..\n# config_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), \"node_config/daily_data_update.yaml\")\n# print(os.path.dirname(os.path.dirname(__file__)))\n# ab = WebNodeManager(node_config=r'F:\\test\\node_workflow\\tests\\test_workflow\\test_config.yaml')\n#\n# all config\n# cd = ab.node_config.get_all_node_config()\n#\n# result = ab.run()\n# print(ab.run())\n\n\ndef geneteate_web_node():\n ab = WebNodeManager(node_config=r'F:\\test\\node_workflow\\tests\\test_workflow\\test_config.yaml')\n cd = ab.node_config.get_all_node_config()\n result = list()\n\n node_map = {node: index+1 for index, node in enumerate(list(cd.keys()))}\n node_list, links_list = list(), list()\n for k, v in cd.items():\n\n watch_trigger = v[\"watch_trigger\"]\n if watch_trigger == None:\n parents = None\n elif isinstance(watch_trigger, list):\n parents = [parent.split('/')[-1] for parent in v[\"watch_trigger\"]]\n else:\n parents = [v[\"watch_trigger\"].split('/')[-1]]\n\n node_list.append({\"id\": node_map[k], \"label\": k, 'age': 'kid', 'gender': 'male'})\n\n if watch_trigger == None: continue\n\n [links_list.append({\"from\": node_map[p], \"to\": node_map[k], 'relation': 'parent', 'arrows': 'to','color': { 'color': 'red'}}) for p in parents]\n # if watch_trigger:\n\n\n\n print(node_list)\n print('/n'*3)\n print(links_list)\n\n\n\ngeneteate_web_node()\n\n","sub_path":"node_workflow/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"146196112","text":"from .abstract_data_stream import AbstractDataStream\nimport numpy as np\nfrom datetime import datetime\n\nclass TimeStream(AbstractDataStream):\n\n def __init__(self, hourly_frequency=10, timestamp='1970-01-01T00:00:00.000', w_vals=None):\n self.hourly_frequency = hourly_frequency\n self.timestamp = timestamp\n if w_vals is not None:\n self.weights = w_vals\n else:\n self.create_weights()\n\n def create_weights(self):\n w_vals = {'hourly_frequency': self.hourly_frequency, 'timestamp': self.timestamp}\n self.weights = w_vals\n\n @property\n def weights(self):\n return self._weights\n\n @weights.setter\n def weights(self, w_vals):\n self._weights=w_vals\n \n self.timestamp = np.datetime64(w_vals['timestamp'])\n self.hourly_frequency = w_vals['hourly_frequency']\n self.ms_interval = int(60*60*1000.0/int(w_vals['hourly_frequency']))\n\n\n def get_samples(self, n_samples):\n n_ms = n_samples*self.ms_interval\n end_time_stamp = self.timestamp + np.timedelta64(n_ms, 'ms')\n data = np.arange(self.timestamp, end_time_stamp, np.timedelta64(self.ms_interval,'ms'), dtype='datetime64')\n \n w_vals = self.weights\n w_vals['timestamp'] = end_time_stamp\n self.weights = w_vals\n\n return np.matrix(data)","sub_path":"stats_series_generator/time_streams.py","file_name":"time_streams.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"315659763","text":"#!/usr/bin/env python\n\n# Compute some generally linear (whatever that means) points\n# and compute a linear function to 'match' the points.\n# This type of data is nice for testing/tutorials.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# How many points do you want?\ndata_size = 200\n\n# Create the data\n\n# Random X values between -35 and 35\nX = np.random.uniform(-35, 35, size=data_size)\n# Compute the y values using the linear function of 3 * X + 2\ny = 3 * X + 2\n\n# To compute the random points multiply the out of our linear function\n# by -/+ 20 times a random number (0.0, 1.0]\n\n# A will be either -/+ 20\na = np.random.choice([-1, 1], size=data_size) * 20\n\n# Finish computing points by random * -/+ 20 from above plus the\n# y value of the linear funciton from above\npoints = np.random.random(size=data_size) * a + y\n\n# Plot the points\nplt.plot(X, points, 'ro')\n# Plot the linear function\nplt.plot(X, y)\nplt.show()\n","sub_path":"machine_learning/linear_data/python/linear_data.py","file_name":"linear_data.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120448513","text":"# similar data cleaning to earlier, this time,\n# obtaining district (LEAID) dropout and graduation rates\n\nos.chdir(rootdir + 'seda/data/dataextract/')\n\n# rows are school districts in this dataset\ndropouts = pd.read_csv('nys-dropouts-2008-09.csv',low_memory = False)\n\ndropouts.head()\n\n# the codebook is here:\n# https://nces.ed.gov/ccd/data/txt/agdr08lay1a.txt\n# the key variables in the dataset that we want are:\n# LEAID DRP912 AFGR\n\n# as before, negatives are missing. however, here, we don't want to treat\n# missing as 0. We want to instead treat it as missing.\n# in pandas, missing is given by np.nan\n# (the object in numpy, np.nan, means missing)\n# see:\n# http://pandas.pydata.org/pandas-docs/stable/missing_data.html\n# http://pandas.pydata.org/pandas-docs/stable/generated/pandas.isnull.html\n\n# here's some exploratory code\ntemp = (dropouts['AFGR']<0)\ntemp.value_counts() # lots of missing!\n\ntemp = (dropouts['TOTD912']<0)\ntemp.value_counts() # even more missing!\n\n# example code (for one variable);\n\ndropouts2 = dropouts[['LEAID','AFGR']]\n\n# (ix stuff, copied from earlier code:)\ndropouts2.ix[dropouts2['AFGR'] < 0, 'AFGR'] = np.nan\n\n# let's see if that worked...\ndropouts2.head(40) #I see NaNs...\ntemp = (dropouts2['AFGR']<0) #(Notice the 2 in the dropouts2)\ntemp.value_counts() # and NO negatives! good.\n\n# now it's never negative, which is good\n# is it nan?\n# to check looks like http://pandas.pydata.org/pandas-docs/stable/generated/pandas.isnull.html\n# pd.isnull will do the trick\n\ntemp = (pd.isnull(dropouts2['AFGR']))\ntemp.value_counts()\n\n# compare to the negatives in the original \"dropout\" DataFrame \ntemp = (dropouts['AFGR']<0)\ntemp.value_counts()\n\n# good, these are identical\n# so this is how to put in NaN for missing value in a panda's dataframe\n\n# to do:\n# backup this code and clean it up...\n# repeat for TOTD912\n# export result as a csv in /scratch/","sub_path":"scripts/1-data-cleaning/backup-4-obtain-district-dropout-and-gradrates.py","file_name":"backup-4-obtain-district-dropout-and-gradrates.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"120135405","text":"import sympy as sym\n\nM, m, g, x1, x2, x3, x4, F, ell = sym.symbols('M, m, g, x1, x2, x3, x4, F, ell')\nphi = 4*m*ell*x4**2*sym.sin(x3) + 4*F - 3*m*g*sym.sin(x3)*sym.cos(x3)\nphi /= 4*(M+m) - 3*m*sym.cos(x3)**2\n\nd_phi_F = phi.diff(F)\nd_phi_x3 = phi.diff(x3)\nd_phi_x4 = phi.diff(x4)\n\nd_phi_F_eq = d_phi_F.subs([(F, 0), (x3,0), (x4,0)])\nd_phi_x3_eq = d_phi_x3.subs([(F,0),(x3,0),(x4,0)])\nd_phi_x4_eq = d_phi_x4.subs([(F,0),(x3,0),(x4,0)])\n\na = d_phi_F_eq\nb = -(d_phi_x3_eq)\n\nc = 3/ell/(4*M+m)\nd = 3*(M+m)*g/ell/(4*M+m)\n\nM_val = 0.3\nm_val = 0.1\nell_val = 0.35\ng_val = 9.81\ndef substitute(z):\n subs = [(M, M_val), (m, m_val), (ell, ell_val), (g, g_val)]\n return float(z.subs(subs))\na_val = substitute(a)\nb_val = substitute(b)\nc_val = substitute(c)\nd_val = substitute(d)\n\nimport control as C\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nnum_t = [-c_val]\nden_t = [1, 0, -d_val]\nG_theta = C.TransferFunction(num_t, den_t)\n\nnum_x = [a_val, 0, (-a_val*d_val)+(b_val*c_val)]\nden_x = [1, 0, -d_val, 0, 0]\nG_x = C.TransferFunction(num_x, den_x)\n\n#forced response\nt_span = np.linspace(0,0.2,500)\nF_input = np.sin(100*t_span**2)\n#for θ(t) vs t\nt_out, x3_out = C.forced_response(G_theta, t_span, F_input)\n#for x(t) vs t\nt_out, x1_out = C.forced_response(G_x, t_span, F_input)\n#for trajectory of θ(t)\nplt.plot(t_out, x3_out)\n#for trajectory of x(t)\nplt.plot(t_out, x1_out)\nplt.xlabel('Time (s)')\n#for y axis label θ(t) vs t\nplt.ylabel('θ(t)')\n#for y axis label x(t) vs t\nplt.ylabel('x(t)')\nplt.grid()\nplt.show()\n","sub_path":"forced_response.py","file_name":"forced_response.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"108542351","text":"#!/Users/11834/.conda/envs/Pytorch_GPU/python.exe\n# -*- coding: UTF-8 -*-\n'''=================================================\n@Project -> File :FWorks -> DMVFL_RSA\n@IDE :PyCharm\n@Date :2020/12/6 10:56\n=================================================='''\nimport numpy as np\n#from torch.autograd import Variable\nimport torch, os\nfrom Util.feature_extraction import PSSMPSFMPSSPRSAGetWindowPadheadfoot\nfrom BiLSTM_SE_Net import LSTMMergeSENet\nfrom Util.processing_pssm_msaTopsfm import Processing_PSSM_MSAToPSFM\nfrom Util.WriteFile import appendWrite\n#from Util.GEN_HTML import GEN_HTML\n#device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nfrom Util.feature_generation import FeaturesGeneration\nimport warnings\nwarnings.filterwarnings('ignore')\n#print(device)\n\n\ndef tester(pro_name, result_dir):\n fa_path = os.path.join(result_dir, pro_name)\n save_model = \"./save_model/\"\n model = LSTMMergeSENet()\n saved_model = save_model + 'epoch_' + str(50)\n model.load_state_dict(torch.load(saved_model,map_location=\"cpu\"))\n optimizer = torch.optim.Adam(model.parameters())\n saved_model = save_model + 'epoch_' + str(50) + 'opt'\n optimizer.load_state_dict(torch.load(saved_model,map_location=\"cpu\"))\n\n model.eval()\n with torch.no_grad():\n Data = PSSMPSFMPSSPRSAGetWindowPadheadfoot(pro_name, result_dir)\n fea, fea_reverse, protein = Data.getIthSampleFea()\n fea_pssm, fea_psfm, fea_pss, fea_jpsfm = torch.FloatTensor(fea[0]), torch.FloatTensor(\n fea[1]), torch.FloatTensor(fea[2]), torch.FloatTensor(fea[3])\n fea_pssm, fea_psfm, fea_pss, fea_jpsfm = torch.unsqueeze(fea_pssm, 0), torch.unsqueeze(fea_psfm,\n 0), torch.unsqueeze(\n fea_pss, 0), torch.unsqueeze(fea_jpsfm, 0)\n# fea_pssm, fea_psfm, fea_pss, fea_jpsfm = Variable(fea_pssm.float()), Variable(fea_psfm.float()).to(\n# device), Variable(fea_pss.float()).to(device), Variable(fea_jpsfm.float())\n\n fea_pssm_rev, fea_psfm_rev, fea_pss_rev, fea_jpsfm_rev = torch.FloatTensor(fea_reverse[0]), torch.FloatTensor(\n fea_reverse[1]), torch.FloatTensor(fea_reverse[2]), torch.FloatTensor(fea_reverse[3])\n fea_pssm_rev, fea_psfm_rev, fea_pss_rev, fea_jpsfm_rev = torch.unsqueeze(fea_pssm_rev, 0), torch.unsqueeze(\n fea_psfm_rev, 0), torch.unsqueeze(fea_pss_rev, 0), torch.unsqueeze(fea_jpsfm_rev, 0)\n# fea_pssm_rev, fea_psfm_rev, fea_pss_rev, fea_jpsfm_rev = Variable(fea_pssm_rev.float()).to(device), Variable(\n# fea_psfm_rev.float()).to(device), Variable(fea_pss_rev.float()).to(device), Variable(\n# fea_jpsfm_rev.float()).to(device)\n predict00 = model(fea_pssm, fea_psfm, fea_pss, fea_jpsfm)\n predict01 = model(fea_pssm_rev, fea_psfm_rev, fea_pss_rev, fea_jpsfm_rev)\n predict = (predict00[4] + predict01[4]) / 2\n\n \n seq = np.loadtxt(fa_path, dtype=str)[1]\n pro_length = len(seq)\n filename = protein + \".rsa\"\n ASAValue = Processing_PSSM_MSAToPSFM()\n file_path = os.path.join(result_dir, filename)\n if os.path.exists(file_path):pass\n else:\n\n appendWrite(file_path, '{:>4}\\n\\n'.format(\"# DMVFL-RSA VFORMAT (DMVFL-RSA V1.0)\"))\n appendWrite(file_path, '{:>1} {:>1} {:>4} {:>4}\\t\\n'.format(\"NO.\", \"AA\", \"RSA\", \"ASA\"))\n for i in range(pro_length):\n index, residue, RSA = i + 1, seq[i], predict[i, 0]\n SA = ASAValue.MAXASAValue(seq[i]) * predict[i, 0]\n appendWrite(file_path, '{:>4} {:>1} {:>.3f} {:>.3f}\\t\\n'.format(index, residue, RSA, SA))\n appendWrite(file_path, '{:>8} \\t'.format(\"END\"))\n\n\ndef main():\n\n import argparse\n parser = argparse.ArgumentParser(description=\"DMVFL_RSA Predict Protein Solvent Accessibility\")\n parser.add_argument(\"-p\", \"--pro_name\", required=True, type=str, help=\"protein name\")\n parser.add_argument(\"-s\", \"--sequence\", required=True, type=str, help=\"AA sequence \")\n parser.add_argument(\"-o\", \"--result_path\", required=True, type=str, help=\"save result path\")\n args = parser.parse_args()\n features_generation = FeaturesGeneration(args.pro_name, args.sequence, args.result_path)\n features_generation.PSSM_PSS_generation()\n features_generation.PSFM_generation()\n features_generation.Threading_based_PRSA()\n tester(args.pro_name, args.result_path)\n #gan_html = GEN_HTML(args.pro_name, args.result_path)\n #gan_html.generate_html()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"298411845","text":"from typing import List\n\nfrom esque.io.messages import BinaryMessage, Message\nfrom esque.io.serializers.base import MessageSerializer\nfrom esque.io.serializers.string import StringSerializer\n\n\ndef test_message_serializer(\n binary_messages: List[BinaryMessage], string_messages: List[Message], string_serializer: StringSerializer\n):\n serializer: MessageSerializer = MessageSerializer(key_serializer=string_serializer)\n deserialized_message: Message = serializer.deserialize(binary_messages[0])\n assert deserialized_message == string_messages[0]\n serialized_message: BinaryMessage = serializer.serialize(string_messages[0])\n assert serialized_message == binary_messages[0]\n\n\ndef test_message_serializer_many(\n binary_messages: List[BinaryMessage], string_messages: List[Message], string_serializer: StringSerializer\n):\n serializer: MessageSerializer = MessageSerializer(key_serializer=string_serializer)\n deserialized_messages: List[Message] = list(serializer.deserialize_many(binary_messages))\n assert deserialized_messages == string_messages\n serialized_messages: List[BinaryMessage] = list(serializer.serialize_many(string_messages))\n assert serialized_messages == binary_messages\n","sub_path":"tests/unit/io/serializer/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506666850","text":"\"\"\" THE LEDGER is stored on a block every day, once the day changes a new block is created and the old block is added to the chain, the format of each ledger is below:\n {type:\"Log In\", \"Log Out\" or \"Upload\",\n user: \"xxxxxxx\",\n timeUTC: \"12:34\",\n hash_of_evidence:\"xxxxxxxxxx\"\n is_hash_identical:\"true\"/\"false\",\n previous_hashed_ledger: xxxx}\nThere are many ledgers in a block, and many blocks in the chain, you should only need the chain stored in coreManagement.chain.chain\n \"\"\"\nfrom datetime import datetime\nfrom management import CoreManagement\nmanager = CoreManagement(evidence=\"myfile.txt\")\n\"\"\"Example below to make a record:\"\"\"\nmanager.onAccess(type = \"Log In\", user=\"ved\", evidence=\"myfile.txt\")\nmanager.onAccess(type = \"Log In\", user=\"ved\", evidence=\"myfile.txt\")\nmanager.onAccess(type = \"Log In\", user=\"ved\", evidence=\"myfile.txt\")\nmanager.onAccess(type = \"Log In\", user=\"ved\", evidence=\"myfile.txt\")\nmanager.onAccess(type = \"Log In\", user=\"ved\", evidence=\"myfile.txt\")\nmanager.addToChain()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"139038828","text":"\"\"\"\nModule for tokenizing. Methods work with string objects\n\"\"\"\n\nimport unicodedata\n\n\nclass Token(object):\n \"\"\"\n Class for storing tokens w/ their first/last chars and types\n \"\"\"\n def __init__(self, first_char, last_char, line, token_type):\n\n self.first_char = first_char+1\n self.last_char = last_char\n self.token = line[first_char:last_char]\n self.token_type = token_type\n\n def __repr__(self):\n\n return \"{0}: [{1}, {2}]\".format(self.token, self.first_char, self.last_char)\n\n def __eq__(self, other):\n if (self.first_char == other.first_char and\n self.last_char == other.last_char and\n self.token == other.token and\n self.token_type == other.token_type):\n return True\n else:\n return False\n\n\n\nclass Tokenizer(object):\n \"\"\"\n Class for tokenizing methods\n \"\"\"\n def tokenize_alph(self, line):\n \"\"\"\n Method tokenize_alph gets an input line and returns alphabetical\n tokens with their characteristics (first/last chars, type)\n as a list\n\n :param line: input string object\n :return: list of tokens with their attributes\n \"\"\"\n\n if not isinstance(line, str):\n raise TypeError(\"I can't work with it! Give me a string object\")\n\n inside = False\n tokenized_line = []\n\n for i, char in enumerate(line):\n if char.isalpha():\n if not inside:\n first_char = i\n inside = True\n elif inside:\n tokenized_line.append(Token(first_char, i, line, \"a\"))\n inside = False\n\n if line != \"\" and line[-1].isalpha(): #to add the last token in the line\n tokenized_line.append(Token(first_char, i + 1, line, \"a\"))\n\n return tokenized_line\n\n def tokenize(self, line):\n \"\"\"\n Method tokenize gets an input line and returns tokens\n with their characteristics (first/last chars, type)\n as a list generator\n :param line: input string object for tokenizing\n :return: generator object that yields token with its attributes\n \"\"\"\n\n if not isinstance(line, str):\n raise TypeError(\"I can't work with it! Give me a string object\")\n\n token_type = None\n first_char = None\n\n for i, char in enumerate(line):\n new_token = Tokenizer().check(char)\n if new_token != token_type:\n if token_type is not None:\n token = Token(first_char, i, line, token_type)\n yield token\n first_char = i\n token_type = new_token\n\n if line != \"\": # to add the last token\n token = Token(first_char, i+1, line, token_type)\n yield token\n\n @staticmethod\n def check(char):\n \"\"\"\n Method check returns type of the current token:\n alphabetical, digital, space or punctuation\n :param char:\n :return: token type as a string:\n \"a\" - alphabetical\n \"d\" - digital\n \"s\" - space\n \"p\" - punctuational\n \"\"\"\n\n token_type = None\n if char.isalpha():\n token_type = \"a\"\n if char.isdigit():\n if token_type != \"d\":\n token_type = \"d\"\n if char.isspace():\n if token_type != \"s\":\n token_type = \"s\"\n if not (char.isspace() or char.isalpha() or char.isdigit()):\n category = unicodedata.category(char)\n if category[0] == \"P\":\n if token_type != \"p\":\n token_type = \"p\"\n return token_type","sub_path":"tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":3714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"69507835","text":"from itertools import combinations\r\n\r\ndef get_multiple_sum(value_list, bound):\r\n value = multiple_list_value(value_list)\r\n number = int((bound - 1) / value)\r\n return int(value * (number + 1) * number / 2)\r\n\r\ndef multiple_list_value(value_list):\r\n multiples = 1\r\n for value in value_list:\r\n multiples = multiples * value \r\n return multiples\r\n\r\ndef get_multiples_sum(bound, value_list):\r\n multiples = 0\r\n for idx, val in enumerate(value_list):\r\n for val_list in list(combinations(value_list, idx+1)):\r\n if idx % 2 == 0:\r\n multiples += get_multiple_sum(list(val_list), bound)\r\n else:\r\n multiples -= get_multiple_sum(list(val_list), bound)\r\n\r\n\r\n return multiples\r\n\r\nprint(get_multiples_sum(1000, [3, 5])) ","sub_path":"answer3.py","file_name":"answer3.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"105016745","text":"import redis\r\nimport threading\r\nimport multiprocessing\r\nimport os\r\nif os.name == \"nt\":\r\n import win32process\r\nimport itertools\r\nimport hcaptcha\r\nimport xrequests\r\nimport sys\r\nimport time\r\nimport string\r\nimport json as js2\r\nimport random\r\nimport secrets\r\nimport requests\r\nfrom requests import Session\r\nfrom urllib.parse import urlparse\r\nimport asyncio\r\nimport time\r\nimport websocket\r\nproxies = []\r\n\r\nfor line in open('proxies.txt'):\r\n proxies.append(line.replace('\\n', ''))\r\n\r\nclass Discord:\r\n\r\n def Username():\r\n return secrets.token_urlsafe(random.randint(7, 11)) + \" | Dort\"\r\n def Headers(fingerprint):\r\n json = {\r\n \"accept\": \"*/*\",\r\n \"accept-encoding\": \"gzip, deflate, br\",\r\n \"accept-language\": \"en-US,en;q=0.9\",\r\n \"content-type\": \"application/json\",\r\n \"origin\": \"https://discord.com\",\r\n \"referer\": \"https://discord.com/\",\r\n \"sec-fetch-dest\": \"empty\",\r\n \"sec-fetch-mode\": \"cors\",\r\n \"sec-fetch-site\": \"same-origin\",\r\n \"sec-gpc\": 1,\r\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\",\r\n \"x-track\": \"eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiQ2hyb21lIiwiZGV2aWNlIjoiIiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiYnJvd3Nlcl91c2VyX2FnZW50IjoiTW96aWxsYS81LjAgKFdpbmRvd3MgTlQgMTAuMDsgV2luNjQ7IHg2NCkgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzkxLjAuNDQ3Mi4xMjQgU2FmYXJpLzUzNy4zNiIsImJyb3dzZXJfdmVyc2lvbiI6IjkxLjAuNDQ3Mi4xMjQiLCJvc192ZXJzaW9uIjoiMTAiLCJyZWZlcnJlciI6IiIsInJlZmVycmluZ19kb21haW4iOiIiLCJyZWZlcnJlcl9jdXJyZW50IjoiIiwicmVmZXJyaW5nX2RvbWFpbl9jdXJyZW50IjoiIiwicmVsZWFzZV9jaGFubmVsIjoic3RhYmxlIiwiY2xpZW50X2J1aWxkX251bWJlciI6OTk5OSwiY2xpZW50X2V2ZW50X3NvdXJjZSI6bnVsbH0=\"\r\n }\r\n if fingerprint is not None:\r\n json['x-fingerprint'] = fingerprint\r\n def Fingerprint(p):\r\n return requests.get(\"https://discordapp.com/api/v9/experiments\", headers=Discord.Headers(None), proxies={\"http\": \"http://\" + p, \"https\": \"http://\" + p}).json()[\"fingerprint\"]\r\n def Register(captcha):\r\n try:\r\n p = random.choice(proxies)\r\n username = Discord.Username()\r\n date_of_birth = f'{random.randint(1988, 2000)}-0{random.randint(1, 9)}-0{random.randint(1, 9)}'\r\n fingerprint = Discord.Fingerprint(p);\r\n json = {\r\n \"consent\": True,\r\n \"captcha_key\": captcha,\r\n \"fingerprint\": fingerprint,\r\n \"username\": username,\r\n \"invite\": Z8xytgzV\r\n }\r\n r = requests.post(\"https://discord.com/api/v8/auth/register\", headers=Discord.Headers(fingerprint), proxies={\"http\": \"http://\" + p, \"https\": \"http://\" + p}, json=json, timeout=4)\r\n #print(r.text)\r\n if \"token\" in r.text:\r\n token = r.json()[\"token\"]\r\n with open(\"Tokens.txt\", \"a+\") as f:\r\n f.write(f\"{token}\\n\")\r\n f.close()\r\n try:\r\n purl = urlparse(random.choice(proxies))\r\n proxy_config = dict(\r\n http_proxy_host=purl.hostname,\r\n http_proxy_port=purl.port\r\n )\r\n ws = websocket.create_connection(\r\n f\"wss://gateway.discord.gg/?encoding=json&v=8&compress=zlib-stream\",\r\n **proxy_config,\r\n origin=f\"https://discord.com\"\r\n )\r\n ws.recv()\r\n ws.send(js2.dumps(\r\n {\"op\":2,\"d\":{\"token\":token,\"capabilities\":125,\"properties\":{\"os\":\"Windows\",\"browser\":\"Chrome\",\"device\":\"\",\"system_locale\":\"en-US\",\"browser_user_agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36\",\"browser_version\":\"91.0.4472.124\",\"os_version\":\"10\",\"referrer\":\"\",\"referring_domain\":\"\",\"referrer_current\":\"\",\"referring_domain_current\":\"\",\"release_channel\":\"stable\",\"client_build_number\":89709,\"client_event_source\":None},\"presence\":{\"status\":\"online\",\"since\":0,\"activities\":[],\"afk\":False},\"compress\":False,\"client_state\":{\"guild_hashes\":{},\"highest_last_message_id\":\"0\",\"read_state_version\":0,\"user_guild_settings_version\":-1}}},\r\n separators=(\",\", \":\")\r\n ).encode(\"UTF-8\"))\r\n Fard = {\r\n \"op\": 3,\r\n \"d\": {\r\n \"since\": 0,\r\n \"activities\": [{\r\n \"name\": \"Minecraft\",\r\n \"type\": 0\r\n }],\r\n \"status\": \"online\",\r\n \"afk\": False\r\n }\r\n }\r\n ws.send(js2.dumps(Fard))\r\n ws.recv()\r\n ws.recv()\r\n ws.close()\r\n except Exception as e:\r\n print(e)\r\n print(f\"[$] Created: {username} | {token[:22]}...\")\r\n if r.status_code == 429:\r\n Discord.Register(captcha)\r\n pass\r\n except Exception as e:\r\n print(e)\r\n Discord.Register(captcha)\r\n\r\n\r\nWORKER_COUNT = 6\r\nTHREAD_COUNT_PER_WORKER = 100\r\nSOLVER_PARAMS = dict(\r\n database=redis.Redis(host='127.0.0.1', password='f8a435650757296f3bac9fb5ee6e816dc9651209f6c1bc71ac0f7aa6843bbd86c8df0f7dad9862d9deb3e1c3fa5ef3f97672bd031f17771c7607ad6bb76294e1', port=1079, db=4),\r\n min_answers=1\r\n)\r\nCHALLENGE_PARAMS = dict(sitekey=\"f5561ba9-8f1e-40ca-9b5b-a0b3f719ef34\", page_url=\"https://discord.com\")\r\ndef thread_func(worker_num, thread_num, thread_barrier, thread_event,\r\n proxies, solver):\r\n thread_barrier.wait()\r\n thread_event.wait()\r\n loop = asyncio.new_event_loop()\r\n asyncio.set_event_loop(loop)\r\n while True:\r\n proxy = next(proxies)\r\n http_client = xrequests.Session(proxies={\r\n \"http\": f\"http://{proxy}\",\r\n \"https\": f\"http://{proxy}\"\r\n }, timeout=5)\r\n \r\n while True:\r\n try:\r\n token = solver.get_token(\r\n **CHALLENGE_PARAMS,\r\n http_client=http_client)\r\n print(token)\r\n if token:\r\n threading.Thread(target=Discord.Register, args=(token,)).start();\r\n except Exception as e:\r\n pass\r\n http_client.clear()\r\n\r\ndef worker_func(worker_num, worker_barrier, proxies):\r\n cpu_num = worker_num % multiprocessing.cpu_count()\r\n if os.name == \"nt\":\r\n win32process.SetProcessAffinityMask(-1, 1 << cpu_num)\r\n else:\r\n os.sched_setaffinity(0, [cpu_num])\r\n proxies = itertools.cycle(proxies)\r\n solver = hcaptcha.Solver(**SOLVER_PARAMS)\r\n thread_barrier = threading.Barrier(THREAD_COUNT_PER_WORKER + 1)\r\n thread_event = threading.Event()\r\n threads = [threading.Thread(target=thread_func, args=(worker_num, thread_num, thread_barrier, thread_event, proxies, solver)) for thread_num in range(THREAD_COUNT_PER_WORKER)]\r\n for thread in threads:\r\n thread.start()\r\n thread_barrier.wait()\r\n worker_barrier.wait()\r\n thread_event.set()\r\n \r\nif __name__ == \"__main__\":\r\n with open(\"proxies.txt\") as fp:\r\n proxies = fp.read().splitlines()\r\n proxies_per = int(len(proxies)/WORKER_COUNT)\r\n worker_barrier = multiprocessing.Barrier(WORKER_COUNT + 1)\r\n workers = [multiprocessing.Process(target=worker_func, args=(worker_num, worker_barrier, proxies[proxies_per * worker_num : proxies_per * (worker_num + 1)])) for worker_num in range(WORKER_COUNT)]\r\n for worker in workers:\r\n worker.start()\r\n worker_barrier.wait()","sub_path":"dort.py","file_name":"dort.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"221251045","text":"\r\n\r\ndef reverse(seq):\r\n \"\"\"Takes an input tuple, seq, and returns a tuple with the same items in\r\n reversed order. Does not reverse any items in the tuple and does not modify the\r\n original tuple.\r\n\r\n Arguments:\r\n seq -- The tuple for which we return a tuple with the items reversed.\r\n\r\n >>> x = (1, 2, 3, 4, 5)\r\n >>> reverse(x)\r\n (5, 4, 3, 2, 1)\r\n >>> x\r\n (1, 2, 3, 4, 5)\r\n >>> y = (1, 2, (3, 4), 5)\r\n >>> reverse(y)\r\n (5, (3, 4), 2, 1)\r\n \"\"\"\r\n \"*** Your code here. ***\"\r\n l=len(seq)\r\n op=()\r\n for k in range(0,l):\r\n op=op+(seq[l-k-1],)\r\n return op\r\n\r\ndef sizes(seq):\r\n \"\"\"Takes an input sequence of tuples, seq, and returns a sequence with the\r\n corresponding lengths of each tuple in seq.\r\n\r\n Arguments:\r\n seq -- A sequence of tuples.\r\n\r\n >>> sizes(((1,), (2, 3), (4, 5, 6)))\r\n (1, 2, 3)\r\n \"\"\"\r\n \"*** Your code here. ***\"\r\n return tuple(map(len,seq))\r\n\r\ndef odd_len_only(seq):\r\n \"\"\"Takes an input sequence of tuples, seq, and returns a sequence with only\r\n the tuples which had odd length.\r\n \r\n Arguments:\r\n seq -- A sequence of tuples.\r\n\r\n >>> odd_len_only(((1,), (2, 3), (4, 5, 6)))\r\n ((1,), (4, 5, 6))\r\n \"\"\"\r\n \"*** Your code here. ***\"\r\n return tuple(filter(lambda x: len(x) % 2 != 0, seq))\r\n\r\ndef tuple_to_rlist(tup):\r\n \"\"\"Takes an input tuple, tup, and returns the equivalent representation of\r\n the sequence using an rlist.\r\n \r\n Arguments:\r\n tup -- A sequence represented as a tuple.\r\n\r\n >>> tuple_to_rlist((1, 2, 3, 4, 5, 6))\r\n (1, (2, (3, (4, (5, (6, None))))))\r\n \"\"\"\r\n \"*** Your code here. ***\"\r\n temp=reverse(tup)\r\n op=(temp[0],None)\r\n for i in range(1,len(tup)):\r\n op=(temp[i],op)\r\n return op\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n","sub_path":"lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"86780391","text":"import binascii\nimport socket\nimport struct\nimport sys\nimport urllib.request\n\n\n# Create a TCP/IP socket\ndef create_packet(**kwargs):\n print(kwargs)\n s_n = kwargs['sequence_number']\n a_n = kwargs['ack_number']\n padding = ['x']*29\n ack = kwargs['ack']\n syn = kwargs['syn']\n fin = kwargs['fin']\n payload = kwargs['payload']\n data = struct.pack('!I', s_n) #pack the version\n data += struct.pack('!I', a_n) #pack the version\n data += struct.pack('!{0}s'.format(len(padding), padding)) #pack the version\n data += struct.pack(\"!c\", ack) #pack the length of string\n data += struct.pack(\"!c\", syn) #pack the length of string\n data += struct.pack(\"!c\", fin) #pack the length of string\n data += struct.pack(\"{0}s\".format(len(payload),payload))\n data += payload.encode() #pack the data\n return data\n\ndef get_webpage(**kwargs):\n page = kwargs['webpage']\n with urllib.request.urlopen(page) as response, open(\"test\", 'w') as w:\n html = response.read()\n w.write(html.decode())\n return html\n\nif __name__=='__main__':\n webpage = get_webpage(webpage=\"http://www.python.org\")\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_address = ('localhost', 30001)\n buf = 512\n r = open('test', 'rb')\n total_read = 0\n data_size = len(webpage)\n data = r.read(buf)\n total_read += 512\n send_data = create_packet(sequence_number=100, ack_number=0, ack = 'Y', syn = 'N', fin = 'N', payload=data)\n sock.sendto(data,server_address)\n while (total_read < data_size):\n if (sock.sendto(data, server_address)):\n send_data = create_packet(sequence_number=101, ack_number=0, ack = 'Y', syn = 'N', fin = 'N', payload=data)\n data = r.read(buf)\n total_read += len(send_data)-12\n\n sock.close()\n r.close()\n\n\n","sub_path":"pa2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"376620895","text":"'''\nCreated on Oct 6, 2014\n\n@author: Aaron\n'''\nteam_abbrvs = ['ANA', 'ARI', 'BOS', 'BUF', 'CAR', 'CBJ', 'CGY', 'CHI', 'COL', 'DAL',\n 'DET', 'EDM', 'FLA', 'LA', 'MIN', 'MTL', 'NAS', 'NJ', 'NYI', 'NYR', \n 'OTT', 'PHI', 'PIT', 'SJ', 'STL', 'TB', 'TOR', 'VAN', 'WPG', 'WSH']\n\nteam_names = ['Ducks', 'Coyotes', 'Bruins', 'Sabres', 'Hurricanes', 'Blue Jackets', \n 'Flames', 'Blackhawks', 'Avalanche', 'Stars','Red Wings', 'Oilers', \n 'Panthers', 'Kings', 'Wild', 'Canadiens', 'Predators', 'Devils', \n 'Islanders', 'Rangers', 'Senators', 'Flyers', 'Penguins', 'Sharks', \n 'Blues', 'Lightning', 'Maple Leafs', 'Canucks', 'Jets', 'Capitals']\n\nclass Team(object):\n def __init__(self, abbv, name):\n self.abbv = abbv\n self.name = name\n \nteam_data = []\n\nfor abv, name in zip(team_abbrvs, team_names):\n Team","sub_path":"test_files/test_itr.py","file_name":"test_itr.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"468925233","text":"# db_info.py\n#coding=utf-8\n\nimport sqlite3,os,datetime,time\nfrom myprpcrypt import prpcrypt\n\n\ncontainer_DB='./data/container.db3'\n\ndef _create_tomcatlist():\n\tcon = sqlite3.connect(container_DB)\n\tcur = con.cursor()\n\tcur.execute('CREATE TABLE tomcatlist(\\\n\t\tid integer primary key,\\\n\t\tplatform varchar(100) NOT NULL,\\\n\t\thost varchar(40) NOT NULL,\\\n\t\ttomcatname varchar(100) NOT NULL,\\\n\t\tservicename varchar(200),\\\n\t\ttmpath varchar(100) NOT NULL)')\n\tcon.close()\n\n\ndef _create_saltapi_token():\n\tcon = sqlite3.connect(container_DB)\n\tcur = con.cursor()\n\tcur.execute('CREATE TABLE saltapi_token(\\\n\t\tid integer primary key,\\\n\t\tsaltplatform varchar(40) NOT NULL,\\\n\t\tencrypt_token varchar(100) NOT NULL,\\\n\t\texpire varchar(200) NOT NULL,\\\n\t\tinsert_time date(100) NOT NULL)')\n\tcon.close()\n\n\ndef delete_tomcat_list(platform,host):\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = 'delete from tomcatlist where host = \"' + host +'\" and platform = \"' + platform + '\"'\n\tcur.execute(sql)\n\tcon.close()\n\ndef insert_tomcat_list(datalist):\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = '''insert into tomcatlist values (?,?,?,?,?,?)'''\n\tfor data in datalist:\n\t\tcur.execute(sql,data)\n\tcon.close()\n\ndef select_tomcat_list(platname,servicename):\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = 'select host,tomcatname,servicename from tomcatlist where platform = \"' + platname +'\" and servicename like \"%' + servicename +'%\"'\n\tresult = cur.execute(sql)\n\treturn result.fetchall()\n\n\ndef insert_saltapi_token(data):\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = '''insert into saltapi_token values (?,?,?,?,?)'''\n\tcur.execute(sql,data)\n\tcon.commit()\n\tcon.close()\n\n\n\ndef delete_saltapi_token():\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = '''delete from saltapi_token where id = (select min(id) from saltapi_token) '''\n\tcur.execute(sql)\n\tcon.commit()\n\tcon.close()\n\n\ndef select_saltapi_token(saltplatform):\n\tcon = sqlite3.connect(container_DB,isolation_level=None)\n\tcur = con.cursor()\n\tsql = 'select id,encrypt_token,expire from saltapi_token where saltplatform = \"' + saltplatform + '\"'\n\tresult = cur.execute(sql)\n\tresultlist = result.fetchall()\n\tfor info in resultlist:\n\t\texpire = info[2]\n\t\tsql = 'delete from saltapi_token where id = \"' + str(info[0]) + '\"'\n\t\tdatenow = time.time()\n\n\t\tif ( float(datenow) < float(expire)):\n\t\t\tcon.close()\n\t\t\tmypt = prpcrypt('YUES^&%$<>?L)932')\n\t\t\ttoken_id = mypt.decrypt(info[1])\n\t\t\treturn token_id\n\t\t\t\n\t\tcur.execute(sql)\n\t\tcon.commit()\n\n\treturn False\n\n\n\n","sub_path":"db_info.py","file_name":"db_info.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103699812","text":"__author__ = 'Justin M'\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n w = QWidget()\n w.resize(250,150)\n w.move(300,300)\n w.setWindowTitle(\"First Application\")\n w.show()\n\n app.exec_()\n","sub_path":"Keith_Control_project/PyQT Examples/Basics/basic_window.py","file_name":"basic_window.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"6468229","text":"from flask_restful import reqparse, abort, Resource, fields, marshal\nfrom flask_jwt_extended import (jwt_required)\n\ntodo_fields = {\n \"task\": fields.String,\n \"id\": fields.String\n}\n\nclass Todolist_handler(Resource):\n def __init__(self, todolist_service):\n parser = reqparse.RequestParser()\n parser.add_argument('q')\n parser.add_argument('sort_column')\n parser.add_argument('sort_direction')\n parser.add_argument('page')\n self.parser = parser\n\n self.todolist_service = todolist_service\n\n @jwt_required\n def get(self):\n args = self.parser.parse_args()\n todolist = self.todolist_service.get_todolist(query=args['q'], sort_column=args['sort_column'], sort_direction=args[\"sort_direction\"], page=args[\"page\"])\n\n return marshal(todolist, todo_fields), 200","sub_path":"flask_template/handler/todolist.py","file_name":"todolist.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358166326","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 21 16:27:58 2017\n\n@author: gbaechle\n\"\"\"\n\nfrom scipy import misc, io\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom skimage.color import rgb2xyz, xyz2rgb\nfrom lippmann import *\nimport imageio\n\nimport sys\nsys.path.append(\"../\")\nimport color_tools as ct\n\nplt.close('all')\n\n\n\ndef read_image(path):\n\n return imageio.imread(path).astype(float)/255.\n \n \ndef compute_spectrum_slice(sliced, lambdas):\n \n #comppute the spectrum\n im_xyz = xyz2rgb(sliced.reshape((1,-1,3))).reshape(-1, 3)\n spectrum = ct.from_xyz_to_spectrum(im_xyz, lambdas)\n \n return spectrum\n \n\ndef compute_lippmann_slice(spectrums, lambdas, depths):\n \n lippmann = np.zeros((len(spectrums), len(depths)))\n \n for i, s in enumerate(spectrums):\n print(i)\n lip, _ = lippmann_transform(lambdas, s, depths) \n lippmann[i, :] = lip\n \n return lippmann\n \n \ndef compute_end_plate(im, lambdas, vmax):\n \n two_k = 4 * np.pi / lambdas\n \n im_xyz = xyz2rgb(im)\n spectrums = ct.from_xyz_to_spectrum(im_xyz, lambdas)\n \n intensity = -np.trapz(spectrums, two_k*c/2, axis=2)\n mpl.image.imsave('Figures/baseline.png', intensity, vmax=vmax, vmin=0)\n \n return intensity\n\n \ndef generate_slices(im, N=500):\n \n lambdas, _ = generate_wavelengths(N)\n depths = generate_depths(delta_z=2.5E-9, max_depth=2.5E-6)\n \n H = 883-1\n L = 883-1\n slice1 = compute_spectrum_slice(im[:H, L, :3], lambdas)\n slice2 = compute_spectrum_slice(im[H, :L, :3], lambdas)\n slice3 = compute_spectrum_slice(im[:H, 0, :3], lambdas)\n slice4 = compute_spectrum_slice(im[0, :L, :3], lambdas)\n \n lip1 = compute_lippmann_slice(slice1, lambdas, depths)\n lip2 = compute_lippmann_slice(slice2, lambdas, depths)\n lip3 = compute_lippmann_slice(slice3, lambdas, depths)\n lip4 = compute_lippmann_slice(slice4, lambdas, depths)\n \n print(np.max(lip1), np.max(lip2), np.max(lip3), np.max(lip4))\n vmax = max(np.max(lip1), np.max(lip2), np.max(lip3), np.max(lip4))\n \n for i in range(1,5): \n \n i_str = str(i)\n mpl.image.imsave('Figures/slice' + i_str + '.png', eval('lip' + i_str), vmax=vmax)\n \n return lambdas, vmax\n \n \n \nif __name__ == '__main__':\n \n# path = '../images/original.png'\n path = '../images/lippmann_image.jpg'\n im = read_image(path) \n \n lambdas, vmax = generate_slices(im, N=500)\n \n# spectrum = compute_end_plate(im[:800, :750, :3], lambdas, vmax) \n spectrum = compute_end_plate(im[:, :, :3], lambdas, vmax) \n \n# misc.imsave('Figures/front.png', im[:800, :750])\n misc.imsave('Figures/front.png', im)\n \n plt.figure()\n plt.imshow(im)\n plt.figure()\n# plt.imshow(im[:800, :750, :3])\n plt.imshow(im[:, :, :3])\n \n ","sub_path":"generate_lippmann_plate.py","file_name":"generate_lippmann_plate.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"173556577","text":"def run(name, df_train, df_test, acc_func, target):\n from sklearn.linear_model import LinearRegression\n from benchmarks.helpers.sklearn_helpers import normalize, to_np\n\n model = LinearRegression()\n\n df_train, enc_map = normalize(df_train, target)\n X_train, Y_train = to_np(df_train, target)\n model.fit(X_train, Y_train)\n\n df_test, _ = normalize(df_test, target, enc_map)\n X_test, Y_test = to_np(df_test, target)\n predictions = model.predict(X_test)\n accuracy = acc_func(predictions, Y_test)\n\n return accuracy\n","sub_path":"alternatives/sklearn/home_rentals/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"204835574","text":"\n# coding: utf-8\n\n# The National Centers for Environmental Information (NCEI)\n#\n# Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.\n#\n# An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d400/fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv`. The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe.\n#\n# Each row in the assignment datafile corresponds to a single observation.\n#\n# The following variables are provided to you:\n#\n# * **id** : station identification code\n# * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012)\n# * **element** : indicator of element type\n# * TMAX : Maximum temperature (tenths of degrees C)\n# * TMIN : Minimum temperature (tenths of degrees C)\n# * **value** : data value for element (tenths of degrees C)\n#\n# For this assignment, you must:\n#\n# 1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day should be shaded.\n# 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015.\n# 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization.\n# 4. Make the visual nice! Leverage principles from the first module in this course when developing your solution. Consider issues such as legends, labels, and chart junk.\n#\n# The data you have been given is near **Ann Arbor, Michigan, United States**, and the stations the data comes from are shown on the map below.\n\n\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\ndef read_data():\n\tdf = pd.read_csv('fb441e62df2d58994928907a91895ec62c2c42e6cd075c2700843b89.csv')\n\tdf.Date= pd.to_datetime(df.Date)\n\tdf=df[(df.Date!='2008-02-29') & (df.Date!='2012-02-29')]\n\tdf_2015 = (df[df.Date.map(lambda d: d.year == 2015)]).groupby('Date').apply(lambda\n\t\tgp: pd.Series({'Tmax':gp.Data_Value.max(),'Tmin':gp.Data_Value.min()})).reset_index()\n\tdf_05_14 = (df[df.Date.map(lambda d: d.year != 2015)].groupby('Date')).apply(lambda\n\t\tgp: pd.Series({'Tmax':gp.Data_Value.max(),'Tmin':gp.Data_Value.min()})).reset_index()\n\tdf_05_14['m-day'] = df_05_14.Date.map(lambda d: '{:02d}-{:02d}'.format(d.month,d.day))\n\tdf_05_14_gp = df_05_14.groupby('m-day').apply(lambda gp: pd.Series({'Tmax':gp.Tmax.max(),'Tmin':gp.Tmin.min()})).reset_index()\n\tdf_merge = df_2015.merge(df_05_14_gp, left_index=True, right_index=True, suffixes=('_2015','_05_14'))\n\tdf_merge['high'] = df_merge[df_merge.Tmax_2015 > df_merge.Tmax_05_14].Tmax_2015\n\tdf_merge['low'] = df_merge[df_merge.Tmin_2015 < df_merge.Tmin_05_14].Tmin_2015\n\tdf_merge.set_index('Date')\n\treturn df_merge, df_05_14_gp\ndf_merge, df_05_14_gp = read_data()\n\n\n# In[7]:\n\n# ===== Plot 2D line and scatter\nfig = plt.figure(figsize=(16,8))\nax = plt.gca()\nplt.plot(df_05_14_gp.index, df_05_14_gp.Tmax, label='2005-2014 record high', color='red', alpha=0.2)\nplt.plot(df_05_14_gp.index, df_05_14_gp.Tmin, label='2005-2014 record low', color='green', alpha=0.2)\nax.fill_between(df_05_14_gp.index, df_05_14_gp.Tmin, df_05_14_gp.Tmax, facecolor='grey', alpha=0.2)\nplt.scatter(df_merge.index, df_merge.high, label='2015 broke high',color='red', marker='^', s=30, alpha=0.4)\nplt.scatter(df_merge.index, df_merge.low, label='2015 broke low',color='blue', marker='v', s=30, alpha=0.4)\n# ===== Plot legend\nlegend = plt.legend(bbox_to_anchor=(0.45,0.2),loc=3, ncol=1, mode='expand', handlelength=3, scatterpoints=1)\nlegend.get_frame().set_alpha(0.)\nfor line in legend.get_lines():\n line.set_lw(3)\nfor s_legend in legend.legendHandles:\n s_legend._sizes = [70]\n s_legend.set_alpha(0.4)\n# ===== Plot annotation\nm_day = [0] + list(np.cumsum(pd.date_range('2005-01-01', periods=12, freq='M').map(lambda d: d.day)))\nx_pos = list(map(lambda x: x+15, m_day[:-1]))\nx_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\nfor pos, month in zip(x_pos, x_label):\n ax.annotate(s=month, xy =(pos, -350), xycoords='data', alpha=0.8, size=9, va='top', ha='center')\nplt.vlines(m_day[1:-1], *ax.get_ylim(), color='k', linestyles='--', lw=0.3, alpha=0.3)\nax.annotate(s='Plot by Lambert Huang',xy=(x_pos[5]+20,-420), xycoords='data', alpha=0.8, size=9, va='bottom', ha='center')\n# ==== Plot Celsius and Fahrenheit\nyaxis_tick_left = np.array([-300, -200, -100, 0, 100, 200, 300, 400])\nyaxis_tick_right = yaxis_tick_left * 0.18 +32\nyaxis_temp_left = list(map(lambda t: '{}$^{{\\circ}}$C'.format(int(t*0.1)), yaxis_tick_left))\nyaxis_temp_right = list(map(lambda t: '{}$^{{\\circ}}$F'.format(int(t)), yaxis_tick_right))\nfor pos, temp in zip(yaxis_tick_left, yaxis_temp_left):\n ax.annotate(s=temp, xy =(-1, pos), xycoords='data', alpha=0.7, size=9,va='center', ha='right')\nfor pos, temp in zip(yaxis_tick_left, yaxis_temp_right):\n ax.annotate(s=temp, xy =(380, pos), xycoords='data', alpha=0.7, size=9,va='center', ha='right')\nplt.hlines(yaxis_tick_left, *ax.get_xlim(), color='k', linestyles='--', lw=0.3, alpha=0.3)\nplt.title('The temperature of 2015 broke record high/low of 2005-2014 near Ann Arbor, Michigan US',size=15, alpha=0.8)\n# ===== Remove Axes ticks\nplt.tick_params(top='off', bottom='off', left='off', right='off', labelleft='off', labelbottom='off')\nfor spine in ax.spines.values(): spine.set_visible(False)\nplt.show()\n","sub_path":"NCEI.py","file_name":"NCEI.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"237416655","text":"import ajustador as aju\nfrom ajustador.helpers import save_params,converge\nimport numpy as np\nfrom ajustador import drawing\nimport A2Acre as a2a\nimport os\n#must be in current working directory for this import to work, else use exec\nimport params_fitness,fit_commands\n\n# a. simplest approach is to use CAPOOL (vs CASHELL, and CASLAB for spines)\n# b. no spines\n# c. use ghk (and ghkkluge=0.35e-6) once that is working/implemented in moose\nghkkluge=1\n\nmodeltype='d1d2'\nrootdir='/home/avrama/moose/SPN_opt/'\n#use 1 and 3 for testing, 250 and 8 for optimization\ngenerations=200\npopsiz=8\nseed=62938\n#after generations, do 25 more at a time and test for convergence\ntest_size=25\n\n################## neuron /data specific specifications #############\nntype='D1'\nmorph_file='MScell-primDend.p'\ndataname='non05Jan2015_SLH004'\nexp_to_fit = a2a.alldata[dataname][[0,20]] #0,6 are hyperpol\n\ndirname='tmp_'+dataname+str(seed)\nif not dirname in os.listdir(rootdir):\n os.mkdir(rootdir+dirname)\nos.chdir(rootdir+dirname)\n\n######## set up parameters and fitness \nparams,fitness=params_fitness.params_fitness(morph_file,ntype,modeltype,ghkkluge)\n\n########### set-up and do optimization\nfit,mean_dict,std_dict,CV=fit_commands.fit_commands(dirname,exp_to_fit,modeltype,ntype,fitness,params,generations,popsiz, seed, test_size)\n\n#look at results\ndrawing.plot_history(fit, fit.measurement)\n\n#Save parameters of good results from end of optimization, and all fitness values\nstartgood=0 #set to 0 to print all\nthreshold=5 #set to large number to print all\n\nsave_params.save_params(fit, startgood, threshold)\n\n#to save the fit object\n#save_params.persist(fit1,'.')\n#import inspect\n\n#for name, data in inspect.getmembers( fit):\n# print(name, data)\n#from ajustador.helpers.save_param.copy_param import create_npz_param\n#npz_file='fitd1d2-D1-tmp_non05Jan2015_SLH00462938.npz'\n#create_npz_param(npz_file, modeltype,ntype)\n","sub_path":"Str_opt/spnD1_a2a.py","file_name":"spnD1_a2a.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"203579550","text":"\"\"\"\nSome functions are own by BYU-Idaho.\nThis is intended only to be for teaching purposes.\n\"\"\"\nclass BST:\n\n class Node:\n\n def __init__(self, data):\n\n self.data = data\n self.left = None\n self.right = None\n\n def __init__(self):\n\n self.root = None\n\n def insert(self, data):\n\n if self.root is None:\n self.root = BST.Node(data)\n else:\n self._insert(data, self.root)\n\n def _insert(self, data, node):\n\n if data != node.data:\n if data < node.data:\n if node.left is None:\n node.left = BST.Node(data)\n else:\n self._insert(data, node.left)\n else:\n if node.right is None:\n node.right = BST.Node(data)\n else:\n self._insert(data, node.right)\n\n def __contains__(self, data):\n\n return self._contains(data, self.root)\n\n def _contains(self, data, node):\n\n if self.root is not None:\n if data < node.data:\n if node.left is None:\n return False\n else:\n return self._contains(data, node.left)\n elif data > node.data:\n if node.right is None:\n return False\n else:\n return self._contains(data, node.right)\n else:\n return True\n\n def __iter__(self):\n\n yield from self._traverse_forward(self.root)\n \n def _traverse_forward(self, node):\n\n if node is not None:\n yield from self._traverse_forward(node.left)\n yield node.data\n yield from self._traverse_forward(node.right)\n \n def __reversed__(self):\n \n yield from self._traverse_backward(self.root)\n\n def _traverse_backward(self, node):\n\n if node is not None:\n yield from self._traverse_backward(node.right)\n yield node.data\n yield from self._traverse_backward(node.left)\n\n def get_height(self):\n\n if self.root is None:\n return 0\n else:\n return self._get_height(self.root)\n\n def _get_height(self, node):\n\n if node != None:\n leftHeight = self._get_height(node.left)\n rightHeight = self._get_height(node.right)\n if leftHeight > rightHeight:\n return leftHeight + 1\n else:\n return rightHeight + 1\n \n return 0\n\n\ndef create_bst_from_sorted_list(sorted_list):\n\n bst = BST()\n _insert_middle(sorted_list, 0, len(sorted_list)-1, bst)\n return bst\n\ndef _insert_middle(sorted_list, first, last, bst):\n\n middle = int((last + first)/2)\n \n if sorted_list:\n bst.insert(sorted_list[middle])\n\n sorted_list_one = sorted_list[:middle]\n _insert_middle(sorted_list_one, 0, len(sorted_list_one)-1, bst)\n \n sorted_list_two = sorted_list[middle+1:]\n _insert_middle(sorted_list_two, 0, len(sorted_list_two)-1, bst)\n\n\n# ---- Solution ----\n\n# Function to get size of tree\ndef get_size(BST):\n size = 0\n for x in BST:\n size = size + 1\n return size\n\n# Create BST with given functions\ntree = create_bst_from_sorted_list([5,3,7,8,1,2])\ntree2 = create_bst_from_sorted_list([2,2,3,3,4,4])\n\n# Call your function and get the size\nsize = get_size(tree)\nsize2 = get_size(tree2)\n\n# Let's show it!\nprint(size) # Returns 6\nprint(size2) # Returns 3","sub_path":"solution-tree.py","file_name":"solution-tree.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"440373277","text":"import smartsheet # Import all necessary packages\r\nimport os\r\nimport time\r\nfrom time import sleep\r\nimport PySimpleGUI as sg\r\nimport xml.etree.ElementTree as ET\r\nimport xlwt as Workbook\r\n\r\nform = sg.FlexForm('Smartsheet') # Create form for GUI from PySimpleGUI\r\nlayout = [ # Create Layout for window\r\n [sg.Text('Please Enter Information')],\r\n [sg.Text('XML File', size=(16, 1)), sg.InputText(''), sg.FileBrowse()], # File browser to locate XML file\r\n [sg.Text('Image Folder', size=(16,1)), sg.InputText(''), sg.FolderBrowse()], # Folder browser for image folder\r\n [sg.Text('API Token', size=(16, 1)), sg.InputText('')],\r\n [sg.Text('New Sheet Name', size = (16,1)), sg.InputText('')],\r\n [sg.Submit(), sg.Cancel()]\r\n ]\r\n\r\nbutton, values = form.Layout(layout).Read() # Read buttons from GUI\r\nform.Close() # Close the window\r\n\r\npath = values[0] # Variable named path to store location from window\r\nimage_path = values[1] # Variable named image_path to store location to image folder from window\r\nAPI_Token = values[2] # Variable named API_Token to store the API Token given by user in window\r\nsheet_name = values[3] # Variable named sheet_name to store desired name of the new smartsheet\r\n\r\nsmart = smartsheet.Smartsheet(API_Token) # Initialize smartsheet client using API Token. Can be thought of as 'logging in'\r\n\r\n\r\ntree = ET.parse(path) # Set up tree element based on the XML file. This gives the raw XML file a structure \r\nroot = tree.getroot() # Get root, or base on XML file\r\n\r\nnew_sheet = smartsheet.models.Sheet({ # Set up new sheet, give it a name and a bunch of columns because it doesnt start off with any columns\r\n 'name': sheet_name,\r\n 'columns': [{\r\n 'title': 'Primary', # One of the columns must be the primary column\r\n 'primary': True,\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 1',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 2',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 3',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 4',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 5',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 6',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 7',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 8',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 9',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 10',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 11',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 12',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 13',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 14',\r\n 'type': 'TEXT_NUMBER'\r\n }, {\r\n 'title': 'Column 15',\r\n 'type': 'TEXT_NUMBER'\r\n }]\r\n})\r\nresponse = smart.Home.create_sheet(new_sheet) # Save the response to the creation of the new sheet\r\nnew_sheet = response.result # Save the response results, which is the sheet object\r\n\r\n#print(new_sheet.id) # Print function to check what the new sheet's ID is \r\nsheet_id = new_sheet.id # Save the new sheet's ID for later use\r\n\r\ndef add_new_row(columnID, sheet_ID, cellInfo): # New function to add rows to a sheet. Need to pass array of columns to use, and sheet ID. Also need to pass what information is going to filll each new cell\r\n row_a = smartsheet.models.Row() # Create new row object outside of for loop so it doesnt re-create over the old object each iteration\r\n row_a.to_bottom = True # Append new row to the bottom of the sheet\r\n for x in range(0,len(cellInfo)): # For loop to append information for each cell for as long as there is information to append\r\n row_a.cells.append({ # Append cells\r\n 'column_id': columnID[x], # To append cells, need the column ID \r\n 'value': cellInfo[x] # Can put values or information into new cells\r\n })\r\n response = smart.Sheets.add_rows( # Update the sheet with the new rows \r\n sheet_ID, # Need to give sheet ID\r\n [row_a]) # Need to give array of row objects, will add rows in order of array\r\n\r\n\r\nviewfolders = [] # Empty array for storing all viewfolder elements\r\nfor x in range(0,len(root)): # Iterate one step down from root to get all viewfolders\r\n for y in range(0,len(root[x])): # Nested for loop to get \"down\" one level \r\n viewfolders.append(root[x][y]) # Append all viewfolders to the array for later use\r\n\r\n\r\nviewpoints = [] # Empty array to store all thre viewpoint names\r\nfor x in range(0,len(viewfolders)): # For loop to iterate through all the viewfolders\r\n #print(viewfolders[x].get('name')) # Print function to help see what the names of the folders are\r\n folder_name = viewfolders[x].get('name') # Save the folder name to a variable for later use\r\n for y in range(0,len(viewfolders[x])): # For loop to iterate through each folder\r\n #print(viewfolders[x][y].get('name')) # Print function to see the name of elements inside of each folder\r\n viewpoint_or_folder_name = viewfolders[x][y].get('name') # Save the name of the elements inside the folder to a variable\r\n for view in viewfolders[x][y].findall('view'): # For loop to get all the names of anything inside folders found previously\r\n views = view.get('name') # Save name to a variable\r\n #print(view.get('name')) # Print to see what name is being stored\r\n string = folder_name + '_' + viewpoint_or_folder_name + '_' + views # Create a string to concatenate all information taken from XML file. The double underscores are added so they can be pulled apart later on\r\n #string = views + '_' + viewpoint_or_folder_name + '_' + folder_name # Better (After testing it may be worse) way of formatting string to be able to have a variety of folder and viewpoint structure\r\n viewpoints.append(string)\r\n \r\n\r\ncolumnIDs = [] # Create empty array to store all the column IDs\r\nfor x in range(0,len(new_sheet.columns)): # Iterate through all the column objects in the new sheet\r\n columnIDs.append(new_sheet.columns[x].id) # Append each column ID to column ID array\r\n #print(new_sheet.columns[x].id) # Test print function to check if its storing the right things \r\nfor x in range(0,len(viewpoints)): # Iterate through all the viewpoints \r\n split_views = viewpoints[x].split('_') # Split all the viewpoint names to parse information \r\n add_new_row(columnIDs, sheet_id, split_views) # Add new rows to the smartsheet with the information from the split views\r\n\r\n\r\nsheet = smart.Sheets.get_sheet(sheet_id) # Get an updated sheet after the new rows are added to make sure all IDs are there\r\n\r\nfiles = [] # Empty array to hold all file names \r\n\r\nfor entry in os.listdir(image_path): # Iterate through all files in folder path\r\n files.append(entry) # Append all files in folder to files array\r\n\r\n\r\nviewnames = []\r\nfor x in range(0,len(viewpoints)): # Iterate through all viewpoints to rename images\r\n if x < 10: # If the index, x, is less than 10\r\n temp_name = '0' + str(x) + '_' + viewpoints[x] # Add an extra '0' in front to avoid '10' being sorted before '2'. So '2' becomes '02'\r\n viewnames.append(temp_name)\r\n else: # Else, so if the index, x, is greater than or equal to 10\r\n temp_name = str(x) + '_' + viewpoints[x] # Rename files with number in front and rename it using the viewpoint name\r\n viewnames.append(temp_name)\r\n\r\nfor x in range(0,len(files)): # For loop to loop the same amount of times as there are files to upload\r\n if len(files) >= 30: # If we are uploading 30 files or more, the rate limit will be exceeded\r\n sleep(5) # To account for this, wait 5 seconds per upload. This is very slow but hitting the rate limit makes it even slower\r\n smart.Attachments.attach_file_to_row( # Smartsheet API function to attach a file to a row\r\n sheet_id, # Need to give sheet ID\r\n sheet.rows[x].id_, # Pass through the row ID in accordance to the for loop, so it'll go down sequentially\r\n (str(viewnames[x] + '.jpg'), # Specifify the attachments name\r\n open(str(image_path) + '//' + str(files[x]), 'rb'), # Specify the location of the attachment so it can be uploaded\r\n 'application/msword') # I'm 99% sure this part does not do anything but I am afraid to take it out\r\n ) ","sub_path":"Scripts/Navis to Smartsheet.py","file_name":"Navis to Smartsheet.py","file_ext":"py","file_size_in_byte":11623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"300918284","text":"#! /usr/bin/python3\n\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\n\ndata=pd.read_csv('finall.csv')\ndata=data[:10000]\n\ny=data.pop('result')\nx=data\nx,y=np.array(x), np.array(y)\n\nx, x_test, y, y_test = train_test_split(\n x, y, test_size=0.2, random_state=42)\n\nmodel = tf.keras.models.Sequential([\n tf.keras.layers.Dense(6),\n tf.keras.layers.Dense(36, activation=tf.nn.sigmoid),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)\n])\nmodel.compile(optimizer='adam',\n loss='mse',\n metrics=['accuracy'])\n\nmodel.fit(x, y, epochs=500,verbose=2)\nprint(model.evaluate(x_test, y_test))\n\n","sub_path":"tf.py","file_name":"tf.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"53068848","text":"def run_simulation():\n with open('./data/MorningTraffic-b.csv', 'r') as file:\n content = file.readlines()\n content = [x.strip() for x in content]\n timestep = 0\n while True:\n if len(content) > 0:\n parts = content[0].split(';')\n start_time = parts[0]\n if int(start_time) == timestep:\n start_floor = parts[1]\n dest_floor = parts[2]\n print('Time:'+str(timestep)+'. Elevator ordered from: '+start_floor+', to: '+dest_floor)\n content.pop(0)\n else:\n print('Time:'+str(timestep)+'. Nobody wants an elevator now')\n timestep += 1\n\n else:\n print('Simulation over')\n break\n\nrun_simulation()\n","sub_path":"simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581577889","text":"\"\"\"\nThe Python standard library's 'calendar' module allows you to\nrender a calendar to your terminal.\nhttps://docs.python.org/3.6/library/calendar.html\n\nWrite a program that accepts user input of the form\n `14_cal.py [month] [year]`\nand does the following:\n- If the user doesn't specify any input, your program should\n print the calendar for the current month. The 'datetime'\n module may be helpful for this.\n- If the user specifies one argument, assume they passed in a\n month and render the calendar for that month of the current year.\n- If the user specifies two arguments, assume they passed in\n both the month and the year. Render the calendar for that\n month and year.\n- Otherwise, print a usage statement to the terminal indicating\n the format that your program expects arguments to be given.\n Then exit the program.\n\nNote: the user should provide argument input (in the initial call to run the file) and not \nprompted input. Also, the brackets around year are to denote that the argument is\noptional, as this is a common convention in documentation.\n\nThis would mean that from the command line you would call `python3 14_cal.py 4 2015` to \nprint out a calendar for April in 2015, but if you omit either the year or both values, \nit should use today’s date to get the month and year.\n\"\"\"\n\nimport sys\nimport calendar\nfrom datetime import datetime\n\n# if user doesn't specify input, display current month and current year\nif len(sys.argv) == 1:\n print(calendar.month(datetime.now().year, datetime.now().month))\n\n# if user only provides one argument, display the provided month and the current year\nelif len(sys.argv) == 2:\n # check if argument is a number\n if sys.argv[1].isnumeric():\n # typecast the argument from a string to an int\n month = int(sys.argv[1])\n # if the provided argument is in the range of 1 to 12, display calendar for that month\n if month > 0 and month < 13:\n print(calendar.month(datetime.now().year, month))\n else:\n print(\"Please provide a number from 1 to 12 for the calendar month you'd like to see\")\n # if the argument provided isn't a number, throw an error\n else:\n print(\"Please provide a valid number for the calendar month you'd like to see\")\n\n# if user provides two arguments, display provided month and provided year\nelif len(sys.argv) == 3:\n # check if argument is a number\n if sys.argv[1].isnumeric() and sys.argv[2].isnumeric():\n # typecast the arguments from a string to an int\n month = int(sys.argv[1])\n year = int(sys.argv[2])\n # check if the month is in the range of 1 to 12\n if month > 0 and month < 13:\n print(calendar.month(year, month))\n else:\n print(\"Please provide a number from 1 to 12 for the calendar month you'd like to see\")\n\n # if the argument provided isn't a number, throw an error\n else:\n print(\"Please provide proper argument input. When invoking this file, the format should look like 14_cal.py [month] [year] or just 14_cal.py for a calendar of the current month and year\")","sub_path":"src/14_cal.py","file_name":"14_cal.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"37871794","text":"from flask import Flask\nimport json\n\napp = Flask(__name__)\n\n'''\n'''\n@app.route('/')\ndef root():\n t = {\n 'a': 1,\n 'b': 2,\n 'c': [3, 4, 5, 6]\n }\n return json.dumps(t)\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"run_flask.py","file_name":"run_flask.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"323922243","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nfrom tqdm import tqdm\n\n\n# #### Data Load\n\n# In[2]:\n\n\ndata_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'val': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n}\n\ndata_dir = 'image_result/'\nimage_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),\n data_transforms[x])\n for x in ['train', 'val']}\ndataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,\n shuffle=True, num_workers=4)\n for x in ['train', 'val']}\ndataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}\nclass_names = image_datasets['train'].classes\n\n\n# #### Model setting\n\n# In[3]:\n\n\nresnet = models.resnet18(pretrained = True)\n\n\n# In[4]:\n\n\nnum_ftrs = resnet.fc.in_features\nresnet.fc = nn.Linear(num_ftrs,2)\n\n\n# In[5]:\n\n\ncriterion = nn.CrossEntropyLoss()\n\noptimizer = optim.SGD(resnet.parameters(), lr = 0.001)\n\nexp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)\n\n\n# In[8]:\n\n\ndef train_model(model, criterion, optimizer, scheduler, num_epochs=25):\n since = time.time() #시작 시간을 기록(총 소요 시간 계산을 위해)\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n for epoch in range(num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1)) #epoch를 카운트\n print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']: #train mode와 validation mode 순으로 진행\n if phase == 'train':\n scheduler.step()\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n\n # Iterate over data.\n for inputs, labels in dataloaders[phase]: #dataloader로부터 dataset과 그에 해당되는 label을 불러옴\n inputs = inputs #GPU로 입력데이터를 올림\n labels = labels #GPU로 label을 올림\n\n # zero the parameter gradients\n optimizer.zero_grad() #Gradient를 0으로 초기화\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1) #마지막 layer에서 가장 값이 큰 1개의 class를 예측 값으로 지정\n loss = criterion(outputs, labels) \n\n # backward + optimize only if in training phase\n if phase == 'train': # training 모드에서는 weight를 update한다.\n loss.backward() #backward\n scheduler.step()\n optimizer.step()\n\n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n\n print()\n\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(\n time_elapsed // 60, time_elapsed % 60))\n print('Best val Acc: {:4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n\n# In[9]:\n\n\nmodel_ft = train_model(resnet, criterion, optimizer, exp_lr_scheduler,\n num_epochs=20)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Build Model.py","file_name":"Build Model.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"322647065","text":"from __future__ import division\nimport numpy as np\nclass witten_bell:\n def __init__(self):\n self.info=np.load(\"info_wb.dict.npy\").item()\n self.unigram=np.load(\"unigram.dict.npy\").item()\n self.bigram=np.load(\"bigram.dict.npy\").item()\n self.trigram=np.load(\"trigram.dict.npy\").item()\n self.n1w1=np.load(\"n1w1.dict.npy\").item()\n self.n1w1w2=np.load(\"n1w1w2.dict.npy\").item()\n self.unigram_wb=dict()\n self.bigram_wb=dict()\n self.trigram_wb=dict()\n\n def unigram_smooth(self):\n for word in self.unigram:\n self.unigram_wb[word]=(self.info[\"chEp\"]/(self.info[\"chEp\"]+self.info[\"n1Ep\"]))*(self.unigram[word]/self.info[\"chEp\"])\n\n def bigram_smooth(self):\n for ngram in self.bigram:\n self.bigram_wb[ngram]=(self.unigram[ngram[0]]/(self.unigram[ngram[0]]+self.n1w1[ngram[0]]))*(self.bigram[ngram]/self.unigram[ngram[0]]) + (self.n1w1[ngram[0]]/(self.unigram[ngram[0]]+self.n1w1[ngram[0]]))*self.unigram_wb[ngram[0]]\n\n def trigram_smooth(self):\n for ngram in self.trigram:\n tup=(ngram[0],ngram[1]);\n self.trigram_wb[ngram]=(self.bigram[tup]/(self.bigram[tup]+self.n1w1w2[tup]))*(self.trigram[ngram]/self.bigram[tup]) + (self.n1w1w2[tup]/(self.bigram[tup]+self.n1w1w2[tup]))*self.bigram_wb[(ngram[0],ngram[1])]\n","sub_path":"codes/witten_bell.py","file_name":"witten_bell.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"292503104","text":"from rest_framework.test import APITestCase\nfrom mall.models import BaseProductModel, GameModel, CategoryModel\n\n\nclass ProductModelTester(APITestCase):\n def test_create_category(self):\n g = GameModel.objects.create(platform=GameModel.PS, name='test game', price=666)\n\n cate = []\n for i in CategoryModel.objects.filter(product=g):\n cate.append(i.name)\n\n self.assertListEqual(cate, ['all', 'game'])\n\n def test_base_product_attr(self):\n g = GameModel.objects.create(platform=GameModel.PS, name='test game', price=666)\n base = g.baseproductmodel_ptr\n self.assertJSONEqual(base.attr, {'platform': GameModel.PS})\n\n\n\n","sub_path":"tests/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651379834","text":"# Convert output of fusibile to DTU evaluation format.\n# By: Jiayu Yang\n# Date: 2020-03-30\n\nimport os\nfrom os import listdir\n\nfusibile_out_folder=\"../outputs-dtu/\"\ndtu_eval_folder=\"../outputs-dtu/\"\n\nif not os.path.isdir(dtu_eval_folder):\n os.mkdir(dtu_eval_folder)\n\nscans = [\"scan1\", \"scan4\", \"scan9\", \"scan10\", \"scan11\",\n \"scan12\", \"scan13\", \"scan15\", \"scan23\", \"scan24\",\n \"scan29\", \"scan32\", \"scan33\", \"scan34\", \"scan48\",\n \"scan49\", \"scan62\", \"scan75\", \"scan77\", \"scan110\",\n \"scan114\", \"scan118\"]\n\nfor scan in scans:\n # Move ply to dtu eval folder and rename\n scan_folder = os.path.join(fusibile_out_folder, scan, \"points_mvsnet\")\n consis_folders = [f for f in listdir(scan_folder) if f.startswith('consistencyCheck-')]\n \n consis_folders.sort()\n consis_folder = consis_folders[-1]\n source_ply = os.path.join(fusibile_out_folder, scan, \"points_mvsnet\", consis_folder, 'final3d_model.ply')\n #print(\"source :{}\".format(source_ply))\n #source_ply = os.path.join(fusibile_out_folder,scan,'consistencyCheck/final3d_model.ply')\n scan_idx = int(scan[4:])\n target_ply = os.path.join(dtu_eval_folder,'mvsnet{:03d}_l3.ply'.format(scan_idx))\n\n cmd = 'mv '+source_ply+' '+target_ply\n\n print(cmd)\n os.system(cmd)\n","sub_path":"tools/fusibile_to_dtu_eval.py","file_name":"fusibile_to_dtu_eval.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"479593018","text":"from tkinter import *\nimport time\nimport random\n\nmultiplier = 10#int(input(\"size multiplier: \"))\n\nroot = Tk()\n\ncanvas = Canvas(root,width = 120 * multiplier,height = 120 * multiplier,background = \"white\")\ncanvas.pack()\n\ncreatureList = []\nnumOfGens = 2000#int(input(\"number of generations: \"))\ngeneration = 1\nfitnesses = []\ntopFitnesses = []\nnumOfTopFittnesses = 1#int(input(\"number of surviving creatures: \"))\nnumCreatures = 1000#int(input(\"number of creatures: \"))\ncreatureRepeats = numCreatures/numOfTopFittnesses\ncreatureMoveChanges = 12#int(input(\"number of mutations: \"))\n\ncreatureXPos = 0\ncreatureYPos = 1\ncreatureDirections = 2\ncreatureFitness = 3\ncreatureGraphics = 4\n\nright = 1\nleft = 2\nup = 3\ndown = 4\n\nplayFieldX = 100\nplayFieldY = 100\n\nnumMoves = playFieldX + playFieldY\n\nfinnish = [playFieldX,playFieldY]\n\nfinnishlLine = canvas.create_rectangle((playFieldX + 4)*multiplier,(playFieldY + 4)*multiplier,(playFieldX+14)*multiplier,(playFieldY+14)*multiplier,fill = \"green\")\n\ndef move(creature):\n i = 0\n while i < numMoves:\n direction = creature[creatureDirections][i]\n\n if direction == right:\n if creature[creatureXPos] < playFieldX:\n if creature[creatureYPos] > playFieldY/2 or not creature[creatureXPos] == playFieldX/2:\n creature[creatureXPos] += 1\n elif direction == left:\n if creature[creatureXPos] > 0:\n if not creature[creatureYPos] > playFieldY/2 or not creature[creatureXPos] == playFieldX/2:\n creature[creatureXPos] -= 1\n elif direction == up:\n if creature[creatureYPos] < playFieldY:\n creature[creatureYPos] += 1\n else:\n if creature[creatureYPos] > 0:\n creature[creatureYPos] -= 1\n i += 1\n\n creature[creatureGraphics] = canvas.create_rectangle((creature[0] + 5)*multiplier,(creature[1] + 5)*multiplier,(creature[0] - 5)*multiplier,(creature[1] - 5)*multiplier, fill = \"red\",width = multiplier)\n return creature\ni = 0\nwhile i < numCreatures:\n directions = []\n j = 0\n while j < numMoves:\n directions.append(random.randint(1,4))\n j += 1\n creature = [0,0,directions,0]\n creature.append(0)\n creatureList.append(creature)\n i += 1\n\ni = 0\n\nwhile generation <= numOfGens:\n\n fitnesses = []\n print(\"generation: \" + str(generation))\n i = 0\n while i < numCreatures:\n root.update()\n # time.sleep(1/(numCreatures*10))\n creatureList[i] = move(creatureList[i])\n creatureList[i][creatureFitness] = (creatureList[i][creatureXPos] - playFieldX) + (creatureList[i][creatureYPos] - playFieldY)\n fitnesses.append(creatureList[i])\n\n i += 1\n\n i = 0\n while i < numCreatures:\n canvas.itemconfig(creatureList[i][creatureGraphics],fill = \"blue\")\n i += 1\n\n end = []\n i = 0\n while i < numCreatures:\n\n creature = fitnesses[i]\n fitnessNum = creature[creatureFitness]\n\n if i == 0:\n end.append(creature)\n\n elif fitnessNum < end[0][creatureFitness]:\n end.insert(0,creature)\n\n elif fitnessNum >= end[0][creatureFitness]:\n isBigest = True\n j = 0\n while j < len(end):\n if fitnessNum < end[j][creatureFitness]:\n end.insert(j,creature)\n isBigest = False\n break\n j += 1\n if isBigest:\n end.append(creature)\n else:\n print(\"what?\")\n i += 1\n\n fitnesses = end\n\n i = 0\n while i < len(fitnesses) - numOfTopFittnesses:\n canvas.delete(fitnesses[0][creatureGraphics])\n fitnesses.remove(fitnesses[0])\n\n if fitnesses[0][creatureFitness] == 0:\n time.sleep(1)\n generation = numOfGens\n\n print(\"top fitness: \" + str(fitnesses[0][creatureFitness]))\n\n generation += 1\n\n creatureRepeats = numCreatures/numOfTopFittnesses\n\n creatureList = []\n i = 0\n while i < numOfTopFittnesses:\n l = 0\n while l < creatureRepeats:\n directions = list(fitnesses[i][creatureDirections])\n\n z = 0\n while z < creatureMoveChanges:\n changedDirection = random.randint(0,numMoves - 1)\n directions[changedDirection] = random.randint(1,4)\n z += 1\n\n creature = [0,0,directions,0]\n creature.append(0)\n l += 1\n\n creatureList.append(creature)\n i += 1\n","sub_path":"mazeAIv5.py","file_name":"mazeAIv5.py","file_ext":"py","file_size_in_byte":4521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"27609678","text":"from pyexplain.utils.utils import get_expl\nimport time\nfrom pyexplain.solvers.bestStep import optimalPropagate\nfrom ..solvers.params import BestStepParams, ExplanationComputer\nfrom pysat.formula import CNF\nfrom pysat.solvers import Solver\n\nimport json\nfrom pathlib import Path\n\nclass CSPExplain(object):\n def __init__(self, C: CNF, verbose=0, matching_table=None):\n self.cnf = C\n\n self.verbose = verbose\n\n if self.verbose > 0:\n print(\"Expl:\")\n print(\"\\tcnf:\", len(C.clauses), C.nv)\n print(\"\\n\\tcnf:\", C.clauses)\n\n # Initialise the sat solver with the cnf\n self.sat = Solver(bootstrap_with=C.clauses)\n assert self.sat.solve(), f\"CNF is unsatisfiable\"\n\n # explanation sequence\n self.E = []\n\n # matching table\n self.matching_table = matching_table\n\n # initial interpretation\n self.I0 = None\n self.I = None\n self.Iend = None\n\n # keeping track of the statistics\n self.time_statisitics = {\n \"totalTime\": 0,\n \"hs\": [],\n \"opt\": [],\n \"sat\": [],\n \"grow\": [],\n \"explain\": [],\n \"cumul_explain\": [],\n \"prop\": [],\n \"mus\":[],\n \"greedyBestStep\":[],\n \"preprocess\":0,\n \"preseeding\":[],\n \"postprocessing\":[],\n \"timeout\": 0,\n \"timedout\": False\n }\n\n # keep track of the calls\n self.call_statistics = {\n \"hs\": [],\n \"opt\": [],\n \"sat\": [],\n \"grow\": [],\n \"skipped\": [],\n \"prop\": 0,\n \"explained\":0,\n }\n\n def bestStep(self, f, Iend, I):\n raise NotImplementedError(\"Please implemnt this method\")\n\n def preprocess(self, U: set, f, I0: set, Iend: set):\n # checking everything is correct\n if self.verbose > 0:\n print(\"\\tU:\", len(U))\n print(\"\\tf:\", f)\n print(\"\\tI0:\", len(I0))\n print(\"\\tIend:\", len(Iend))\n\n def reset_statistics(self):\n # keeping track of the statistics\n self.time_statisitics = {\n \"totalTime\": 0,\n \"hs\": [],\n \"opt\": [],\n \"sat\": [],\n \"grow\": [],\n \"explain\": [],\n \"cumul_explain\": [],\n \"prop\": [],\n \"mus\":[],\n \"greedyBestStep\":[],\n \"preprocess\":0,\n \"preseeding\":[],\n \"postprocessing\":[],\n \"timeout\": 0,\n \"timedout\": False\n }\n\n # keep track of the calls\n self.call_statistics = {\n \"hs\": [],\n \"opt\": [],\n \"sat\": [],\n \"grow\": [],\n \"skipped\": [],\n \"prop\": 0,\n \"explained\":0\n }\n\n def explain_1_lit(self, lit, f, I0: set):\n assert type(lit) is int, f\"Type of given lit is {type(lit)} expected int.\"\n assert type(I0) is set, f\"Type of given initial intepretation is {type(I0)} expected set.\"\n\n U = set(abs(l) for l in I0) | set({abs(lit)})\n I = I0\n\n tstart = time.time()\n Iend = optimalPropagate(U=U, I=I0, sat=self.sat)\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n # keep track of explanation config-specific information\n tstart = time.time()\n self.preprocess(U, f, I0, Iend)\n self.time_statisitics[\"preprocess\"] = time.time() - tstart\n\n tstart = time.time()\n expl = self.bestStep(f, Iend, I)\n self.time_statisitics[\"explain\"].append(time.time() - tstart)\n\n # difficulty of explanation\n costExpl = sum(f(l) for l in expl)\n\n # facts & constraints used\n Ibest = I & expl\n\n # New information derived \"focused\" on\n tstart = time.time()\n Nbest = optimalPropagate(U=U, I=Ibest, sat=self.sat) - I0\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n\n return {\n \"constraints\": list(Ibest),\n \"derived\": list(Nbest),\n \"cost\": costExpl\n }\n\n def explain_1_step(self, U: set, f, I0: set):\n assert type(U) is set, f\"Type of given User variables is {type(U)} expected set.\"\n assert type(I0) is set, f\"Type of given initial intepretation is {type(I0)} expected set.\"\n\n # check literals of I are all user vocabulary\n assert all(True if abs(lit) in U else False for lit in I0), f\"Part of supplied literals not in U (user variables): {lits for lit in I if lit not in U}\"\n\n # Initialise the sat solver with the cnf\n assert self.sat.solve(assumptions=I0), f\"CNF is unsatisfiable with given assumptions {I0}.\"\n\n # Most precise intersection of all models of C project on U\n tstart = time.time()\n Iend = optimalPropagate(U=U, I=I0, sat=self.sat)\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n # keep track of explanation config-specific information\n tstart = time.time()\n self.preprocess(U, f, I0, Iend)\n self.time_statisitics[\"preprocess\"] = time.time() - tstart\n\n tstart = time.time()\n expl = self.bestStep(f, Iend, I0)\n self.time_statisitics[\"explain\"].append(time.time() - tstart)\n\n # difficulty of explanation\n costExpl = sum(f(l) for l in expl)\n\n # facts & constraints used\n Ibest = I0 & expl\n\n # New information derived \"focused\" on\n tstart = time.time()\n Nbest = optimalPropagate(U=U, I=Ibest, sat=self.sat) - I0\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n return {\n \"constraints\": list(Ibest),\n \"derived\": list(Nbest),\n \"cost\": costExpl\n }\n\n def explain(self, U: set, f, I0: set):\n # check literals of I are all user vocabulary\n assert all(True if abs(lit) in U else False for lit in I0), f\"Part of supplied literals not in U (user variables): {lits for lit in I if lit not in U}\"\n\n # Initialise the sat solver with the cnf\n assert self.sat.solve(assumptions=I0), f\"CNF is unsatisfiable with given assumptions {I0}.\"\n\n # Explanation sequence\n self.E = []\n\n I0 = set(I0)\n tstart_explain = time.time()\n\n # Most precise intersection of all models of C project on U\n tstart = time.time()\n Iend = optimalPropagate(U=U, I=I0, sat=self.sat)\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n # keep track of information\n tstart = time.time()\n self.preprocess(U, f, I0, Iend)\n self.time_statisitics[\"preprocess\"] = time.time() - tstart\n\n I = set(I0) # copy\n while(len(Iend - I) > 0):\n # finding the next best epxlanation\n tstart = time.time()\n # OUS/MUS\n expl = self.bestStep(f, Iend, I)\n self.time_statisitics[\"explain\"].append(time.time() - tstart)\n self.time_statisitics[\"cumul_explain\"].append(time.time() - tstart_explain)\n\n # difficulty of explanation\n costExpl = sum(f(l) for l in expl)\n\n # facts & constraints used\n Ibest = I & expl\n\n tstart = time.time()\n # New information derived \"focused\" on\n Nbest = optimalPropagate(U=U, I=Ibest, sat=self.sat) - I\n self.time_statisitics[\"prop\"].append(time.time() - tstart)\n\n assert len(Nbest - Iend) == 0\n\n self.E.append({\n \"constraints\": list(Ibest),\n \"derived\": list(Nbest),\n \"cost\": costExpl\n })\n\n I |= Nbest\n self.call_statistics[\"explained\"] += len(Nbest)\n\n if self.verbose > 0:\n print(f\"\\n\\tElapsed time=\", round(time.time() - tstart_explain), \"s\")\n print(get_expl(self.matching_table, Ibest, Nbest))\n\n self.time_statisitics[\"totalTime\"] = time.time() - tstart_explain\n return list(self.E)\n\n def print_statistics(self):\n print(\"texpl=\", round(self.time_statisitics[\"explain\"][-1], 2), \"s\\n\")\n\n def to_json_expl(self, f, explanation):\n constraints = list(explanation[\"constraints\"])\n derived = list(explanation[\"derived\"])\n\n json_explanation = {\n \"cost\": sum(f(l) for l in constraints),\n \"clue\": None,\n \"assumptions\": [],\n \"derivations\": []\n }\n\n for fact in derived:\n json_fact = self.matching_table['bvRel'][abs(fact)]\n json_fact[\"value\"] = True if fact > 0 else False\n json_explanation[\"derivations\"].append(json_fact)\n\n clue = []\n nTrans = 0\n nBij = 0\n nClue = 0\n\n for c in constraints:\n if(c in self.matching_table['Transitivity constraint']):\n nTrans += 1\n elif(c in self.matching_table['Bijectivity']):\n nBij += 1\n elif(c in self.matching_table['clues']):\n nClue += 1\n clue.append(self.matching_table['clues'][c])\n else:\n json_fact = self.matching_table['bvRel'][abs(c)]\n json_fact[\"value\"] = True if c > 0 else False\n json_explanation[\"assumptions\"].append(json_fact)\n\n\n if nClue == 0:\n if nTrans == 0 and nBij == 1:\n json_explanation[\"clue\"] = \"Bijectivity\"\n elif nTrans == 1 and nBij == 0:\n json_explanation[\"clue\"] = \"Transitivity constraint\"\n else:\n json_explanation[\"clue\"] = \"Combination of logigram constraints\"\n elif nClue == 1:\n if nTrans + nBij >= 1:\n json_explanation[\"clue\"] = \"Clue and implicit Constraint\"\n else:\n json_explanation[\"clue\"] = clue[0]\n else:\n json_explanation[\"clue\"] = \"Multiple clues\"\n\n return json_explanation\n\n\n def export_explanations(self, f, fname):\n assert self.matching_table is not None, \"Matching table for explanations not available\"\n\n if not Path(fname).parent.exists():\n Path(fname).parent.mkdir()\n\n file_path = Path(fname)\n json_explanations = []\n\n for explanation in self.E:\n json_explanation = self.to_json_expl(f, explanation)\n json_explanations.append(json_explanation)\n\n with file_path.open('w') as fp:\n json.dump(json_explanations, fp, indent=2)\n\n def export_statistics(self, params: BestStepParams=None, fname=\"\"):\n if fname == \"\":\n return\n\n json_statistics = {\n \"time\": self.time_statisitics,\n \"numbers\": self.call_statistics,\n \"explanation\": self.E,\n 'params': params.to_dict() if params is not None else dict()\n }\n print(\"Statistics exported to\", fname)\n if not Path(fname).parent.exists():\n Path(fname).parent.mkdir(parents=True)\n\n file_path = Path(fname)\n\n with file_path.open('w') as f:\n json.dump(json_statistics, f)\n\n\n def __del__(self):\n if hasattr(self, 'sat') and self.sat:\n self.sat.delete()\n","sub_path":"pyexplain/explain/csp_explain.py","file_name":"csp_explain.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"581590984","text":"# coding: utf-8\n\nN = int(input())\nH = list(map(int, input().split()))\n\ndp = [0] * (N+1)\n# dp[i+1]: H[i]に降りたときの最大移動回数\n\nfor n in range(N):\n i = N - n\n if i == N:\n continue\n\n if H[i-1] >= H[i]:\n dp[i-1] = dp[i] + 1\n else:\n dp[i-1] = 0\n\nresult = max(dp)\n\nprint(result)\n\n","sub_path":"Atcoder/beginner/139/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"624724096","text":"import numpy as np\nfrom scipy import linalg as la\nfrom matplotlib import pyplot as plt\nfrom scipy.stats import linregress\nimport math\nfrom scipy import linalg as la\nimport cmath\n\n# Problem 1\nprint(\"\\n\\n --Problem 1--\\n\")\n\ndef least_squares(A, b): \n # Generate a random matrix and get its reduced QR decomposition via SciPy.\n Q, R = la.qr(A, mode=\"economic\") # Use mode=\"economic\" for reduced QR.\n \n # y = RX = Q.T*b\n y = Q.T @ b \n \n # Answer\n x = la.solve_triangular(R, y)\n return x\n\n# Generate A and return Q_mine and R_mine.\nm = 6\nn = 4\n\nA = np.random.random((m,n))\nb = np.random.random((m,1))\nprint(\"x =\", least_squares(A, b))\n\n\n# Problem 2\nprint(\"\\n\\n --Problem 2--\\n\")\nhousing = np.load('C:/Users/suket/Desktop/Homeworks/Computation/Week3/Problem2/housing.npy')\n\n# 1. Constrcut A and b\nyear = housing[:,0]\nprice_T = housing[:,1]\nprice = np.vstack(price_T)\n\nm_2 = np.size(year)\nconstant_T = np.ones(m_2)\nconstant = np.vstack(constant_T)\n\nA2 = np.column_stack((year, constant))\n\n# 2. least squares\nx_2 = least_squares(A2, price)\na2, b2, rvalue2, pvalue2, stderr2 = linregress(year, price_T)\n\n# Plotting\nplt.scatter(year, price, label = \"Scatter\")\nplt.plot(year, A2@x_2, label = \"Mine\")\nplt.plot(year, a2*year + b2, label = \"Built-in Function\")\nplt.show()\n\n'''\nFor Practice!\n\n# Generate some random data close to the line y = .5x - 3.\nx = np.linspace(0, 10, 20)\ny = .5*x - 3 + np.random.randn(20)\n# Use linregress() to calculate m and b, as well as the correlation\n# coefficient, p-value, and standard error. See the documentation for\n# details on each of these extra return values.\na, b, rvalue, pvalue, stderr = linregress(x, y)\nplt.plot(x, y, 'k*', label=\"Data Points\")\nplt.plot(x, a*x + b, 'b-', lw=2, label=\"Least Squares Fit\")\nplt.legend(loc=\"upper left\")\nplt.show()\n'''\n\n# Problem 3\nprint(\"\\n\\n --Problem 3--\\n\")\n# Contruct A matrix\nA3_degree_3 = np.vander(year, 4)\nA3_degree_6 = np.vander(year, 7)\nA3_degree_9 = np.vander(year, 10)\nA3_degree_12 = np.vander(year, 13)\n\n# Regression\nx_degree_3 = la.lstsq(A3_degree_3, price)[0]\nx_degree_6 = la.lstsq(A3_degree_6, price)[0]\nx_degree_9 = la.lstsq(A3_degree_9, price)[0]\nx_degree_12 = la.lstsq(A3_degree_12, price)[0]\n\n# Plotting\n\nplt.subplot(221)\nplt.scatter(year, price, label = \"Scatter\")\nplt.plot(year, A3_degree_3@x_degree_3, label = \"degree_3\")\nplt.subplot(222)\nplt.scatter(year, price, label = \"Scatter\")\nplt.plot(year, A3_degree_6@x_degree_6, label = \"degree_6\")\nplt.subplot(223)\nplt.scatter(year, price, label = \"Scatter\")\nplt.plot(year, A3_degree_9@x_degree_9, label = \"degree_9\") \nplt.subplot(224) \nplt.scatter(year, price, label = \"Scatter\")\nplt.plot(year, A3_degree_12@x_degree_12, label = \"degree_12\")\nplt.show()\n\n# Compare np.polyfit()\nx_fit_3 = np.polyfit(year, price, 3)\nx_fit_6 = np.polyfit(year, price, 6)\nx_fit_9= np.polyfit(year, price, 9)\nx_fit_12 = np.polyfit(year, price, 12)\n\nprint(np.allclose(x_fit_3, x_degree_3))\nprint(np.allclose(x_fit_6, x_degree_6))\nprint(np.allclose(x_fit_9, x_degree_9))\nprint(np.allclose(x_fit_12, x_degree_12), \": when we assume higher degree,\\\n we can face a trouble like this.\") \n# when we have higher degree, we can face trouble like this.\n\n\n# Problem 4\nprint(\"\\n\\n --Problem 4--\\n\")\ndef plot_ellipse(a, b, c, d, e):\n \"\"\"Plot an ellipse of the form ax^2 + bx + cxy + dy + ey^2 = 1.\"\"\"\n theta = np.linspace(0, 2*np.pi, 200)\n cos_t, sin_t = np.cos(theta), np.sin(theta)\n A = a*(cos_t**2) + c*cos_t*sin_t + e*(sin_t**2)\n B = b*cos_t + d*sin_t\n r = (-B + np.sqrt(B**2 + 4*A))/(2*A)\n \n plt.plot(r*cos_t, r*sin_t, lw=2)\n plt.gca().set_aspect(\"equal\", \"datalim\")\n\nellipse = np.load('C:/Users/suket/Desktop/Homeworks/Computation/Week3/Problem2/ellipse.npy')\n\nplt.scatter(ellipse[:,0], ellipse[:,1])\nx_4_T = ellipse[:,0]\ny_4_T = ellipse[:,1]\nx_4 = np.vstack(x_4_T)\ny_4 = np.vstack(y_4_T)\n\nm_4 = np.size(x_4)\nconstant_T = np.ones(m_4)\n\nA4 = np.column_stack((x_4*x_4, x_4, x_4*y_4, y_4, y_4*y_4))\nslope = least_squares(A4, constant_T)\n\nplot_ellipse(slope[0], slope[1], slope[2], slope[3], slope[4])\nplt.show()\n\n\n# Problem 5\nprint(\"\\n\\n --Problem 5--\\n\")\ndef Power_method(A, N, tol):\n m, n = np.shape(A)\n x_5 = np.random.random((m,1))\n x_5 = x_5 / np.linalg.norm(x_5)\n\n for k in range(N):\n x_5_1 = A @ x_5 \n x_5_1 = x_5_1 / np.linalg.norm(x_5_1)\n \n if np.linalg.norm(x_5_1 - x_5) < tol:\n break\n \n x_5 = x_5_1\n \n return x_5_1.T @ A @ x_5_1, x_5_1\n\nA5 = np.random.random((10,10))\nN = 10^5\ntol = 1e-8\neigs, vecs = Power_method(A5, N, tol)\n\nloc = np.argmax(eigs)\nlamb, x = eigs[loc], vecs[:,loc]\nprint(np.allclose(A5.dot(x), lamb*x))\n\n\n# Problem 6\nprint(\"\\n\\n --Problem 6--\\n\")\n\ndef QR_eig(A, N, tol):\n m, n = np.shape(A)\n S = la.hessenberg(A)\n \n for k in range(N):\n Q, R = la.qr(S)\n S = R@Q\n \n eigs = []\n i = 0\n \n while i < n:\n if i == n-1 or abs(S[i+1, i]) < tol:\n eigs.append(S[i, i])\n else:\n a, b, c, d = S[i, i], S[i, i+1], S[i+1, i], S[i+1, i+1]\n lambda_plus = ( a+d + cmath.sqrt((a+d)**2 - 4*(a*d-b*c)) ) / 2\n lambda_minus = ( a+d - cmath.sqrt((a+d)**2 - 4*(a*d-b*c)) ) / 2 \n eigs.append(lambda_plus)\n eigs.append(lambda_minus)\n i = i + 1\n i = i + 1\n \n return eigs\n\nA6 = np.random.random((5,5))\nA6_sym = A6 + A6.T\nN = 10^100\ntol = 1e-10\n\neig_mine = QR_eig(A6_sym, N, tol)\neig_builtin = la.eig(A6_sym)[0]\n\nprint(eig_mine, \"\\n\")\nprint(eig_builtin)\n\n\n\n\n\n\n","sub_path":"Homeworks/Computation/Week3/Problem2/Problem_set_3_2.py","file_name":"Problem_set_3_2.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"428897248","text":"import unittest\nimport logging\nimport ozawa.tasks.process as ozp\nimport os\n\n\nclass TestEnvironmentVariables(unittest.TestCase):\n def setUp(self):\n self.vars = {'ozawa_test1': 'val1', 'ozawa_test2': 'val 2', 'ozawa_test3': 'val \"3\"'}\n self.task = ozp.EnvironmentVariables(self.vars)\n\n def tearDown(self):\n del self.task\n\n def test_set_variables(self):\n self.task.execute()\n for var in self.vars:\n self.assertEqual(self.vars[var], os.getenv(var))\n\n def test_unset_existing_variable(self):\n self.task.execute()\n for var in self.vars:\n self.vars[var] = None\n\n with self.assertLogs(self.task.logger, logging.DEBUG) as log:\n self.task.execute()\n\n for var in self.vars:\n self.assertIsNone(os.getenv(var))\n\n self.assertIn('INFO:EnvironmentVariables_1', log.output[0])\n","sub_path":"ozawa/test/test_tasks/process/test_environmentvariables.py","file_name":"test_environmentvariables.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"525073656","text":"import os\nimport sys\nimport hdfs\n\n#hdfs.client._Request.webhdfs_prefix = \"/webhdfs/api/v1\"\n_hdfs_client = None\n_hdfs_url = \"http://10.1.5.4:5070\"\n_hdfs_user_name = \"couplet\"\n_hdfs_root = \"/resource//\"\n\n\n\ndef _hdfs_initialize():\n global _hdfs_client\n if _hdfs_client is None:\n _hdfs_client = hdfs.InsecureClient(_hdfs_url, root=_hdfs_root, user=_hdfs_user_name)\n _hdfs_client.set_permission(_hdfs_root, 777)\n\ndef _hdfs_download(hdfs_path, local_path):\n _hdfs_initialize()\n _hdfs_client.download(hdfs_path, local_path)\n\ndef _hdfs_upload(local_path, hdfs_path):\n _hdfs_initialize()\n _hdfs_client.makedirs(hdfs_path)\n _hdfs_client.upload(hdfs_path, local_path, overwrite=True)\n\ndef Download(hdfs_path, local_path):\n print(\"Downloading from {} to {}\".format(hdfs_path, local_path))\n os.makedirs(local_path, exist_ok=True)\n _hdfs_download(hdfs_path, local_path)\n\ndef Upload(local_path, hdfs_path):\n print(\"Uploading from {} to {}\".format(local_path, hdfs_path))\n _hdfs_upload(local_path, hdfs_path)","sub_path":"lesson-08/lesson08-HW-LiuWei/openpai-mnist/hdfshelper.py","file_name":"hdfshelper.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"334152007","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/8/13 下午2:14\n# @Author : Rdxer\n# @Email : Rdxer@foxmail.com\n# @File : parser_conf.py.py\n# @Software: PyCharm\n\nimport datetime\nimport sys\nimport os\nimport os.path\nimport re\nimport main.parser_conf\nfrom main.parser_conf import *\nfrom main import readExcel,conf\nfrom main.model import AnswerRecord,Question\n\n\n\nimport main.result as result\nfrom main.write_to_file import writeToFile\n\n\ndef filtrFile(dirPath):\n excelList = []\n for parent, dirnames, filenames in os.walk(dirPath, followlinks=False):\n for filename in filenames:\n\n # print('文件名:%s' % filename)\n # print('文件完整路径:%s\\n' % file_path)\n\n if re.match(conf.excelRegStr, filename) != None:\n file_path = os.path.join(parent, filename)\n excelList.append(file_path)\n\n # for f in excelList:\n # print(f)\n return excelList\n\n\ndef exeCal(excelData):\n sumDict = {}\n countDict = {}\n total = len(excelData) #!读取表格行数\n groupDict = {}\n count9_10 = 0\n count0_6 = 0\n rowData: AnswerRecord\n for rowData in excelData: #!遍历表格行数\n\n if confobj.nps != -1: #!判断单元格是否是缺失值\n que: Question = rowData.tryGetQuestion(confobj.nps)\n if que != None: #\n v = que.tryGetIntValue()\n if v is not None:\n\n if v >= 9 and v <= 10:\n count9_10 += 1\n elif v >= 0 and v <= 6:\n count0_6 += 1\n\n if confobj.projectIndex != -1:\n que: Question = rowData.tryGetQuestion(confobj.projectIndex)\n if que != None:\n v = que.value\n if v is not None:\n groupRowList = groupDict.get(v, [])\n groupRowList.append(rowData)\n groupDict[v] = groupRowList\n\n col: Question\n for col in rowData.analysisQuestionList:\n\n sum = sumDict.get(col.index)\n count = countDict.get(col.index)\n\n if sum is None:\n sum = 0\n\n if count is None:\n count = 0\n\n intv = col.tryGetIntValue()\n if intv is not None:\n sum += intv\n count += 1\n\n sumDict[col.index] = sum\n countDict[col.index] = count\n\n print(groupDict.keys())\n print(\"{} {} {} {}% <<<\".format(count9_10, count0_6, total, (count9_10 - count0_6) / total * 100))\n print(sumDict)\n print(\"----\")\n print(countDict)\n print(\"======\")\n # print(excelObject[0])\n for key in sumDict.keys():\n co = countDict[key]\n su = sumDict[key]\n\n configCalculateItem = confobj.getConfigCalculateItem(key)\n\n if configCalculateItem.calculateType == conf.configCalculateType_scale:\n rowCount = len(excelObject[1])\n scale = co / rowCount\n scale *= 100\n print(\"{} {} {}% {}\".format(su, co, scale, rowCount))\n else:\n avg = ((su / co) - 1) * 25\n print(\"{} {} {}\".format(su, co, avg))\n print(\"XXXXXXXXXXXXXX\")\n\n\nif __name__ == '__main__':\n\n\n # modelList = readExcel.read(\"/Users/Rdxer/Desktop/原始数据+txt/第一教育.xls\")\n #\n # print(modelList)\n print(\"请输入所在文件路径:\\n\")\n dirPath = input()\n #dirPath = \"E:/第一资产/配置文件/原始数据+txt(1)/\"\n excelList = filtrFile(dirPath)\n\n for excel in excelList:\n print(excel)\n\n excelObjectList = []\n\n for excel in excelList:\n excelObjectList.append(readExcel.read(excel))\n\n sheetResList = []\n for excelObject in excelObjectList:\n\n confobj:ConfigObject = excelObject[0]\n excelDataList = excelObject[1]\n\n # exeCal()\n sheetRes = result.genSheetResult(confobj.name,excelDataList,confobj)\n\n sheetResList.append(sheetRes)\n\n # 写入文件\n writeToFile(dirPath,sheetResList)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"532538680","text":"# Flight FM London XBMC Plugin\n# Developer: Android TV Boxes\n# Support: support@androidtvboxes.co.uk\n# Disclaimer: Android TV Boxes do not own or publish the content delivered by the plugin\n# streams and content is owned by DejaVu FM\n\nimport sys\nimport xbmcgui\nimport xbmcplugin\n \naddon_handle = int(sys.argv[1])\n \nxbmcplugin.setContent(addon_handle, 'audio')\n \nurl = 'http://176.31.239.83:9136/'\nli = xbmcgui.ListItem('Studio 1 = Deja Classic >>', iconImage='http://s2.postimg.org/eg7k51z3t/icon.png', thumbnailImage='http://www.urbanreadyuk.co.uk/deja%20vu%20icon.jpg')\nli.setProperty('fanart_image', 'http://s18.postimg.org/fnbfwgw3d/fanart.jpg')\nxbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)\n\nurl = 'http://176.31.239.83:9041/'\nli = xbmcgui.ListItem('Studio 2 = DejaVuLive >>', iconImage='http://s2.postimg.org/eg7k51z3t/icon.png', thumbnailImage= 'http://s2.postimg.org/eg7k51z3t/icon.png')\nli.setProperty('fanart_image', 'http://s18.postimg.org/fnbfwgw3d/fanart.jpg')\nxbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)\n\n\nxbmcplugin.endOfDirectory(addon_handle)","sub_path":"plugin.audio.DejaVu/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"555869467","text":"\r\n\r\n\r\nimport pkg.cliflo.cliflo as cf\r\nimport pkg.hika_utils.hika_utils as hika\r\nfrom datetime import datetime as dt\r\nfrom datetime import timezone\r\nimport time\r\nimport os\r\nimport psycopg2\r\nimport numpy as np\r\nimport pandas as pd\r\nimport os\r\nos.system('cls' if os.name == 'nt' else 'clear')\r\n\r\n# dictionary containing new column names from cliflo fields\r\n# old column names and what attribute they will be\r\n\r\nCOL_DICT = {\r\n 'agent_no' :['Agent Number', 'INT', 'PRIMARY KEY'],\r\n 'network_no' :['Network Number', 'VARCHAR(50)', 'NOT NULL'],\r\n 'name' :['Name', 'TEXT', 'NOT NULL'],\r\n 'latitude' :['Lat (dec deg, S of equator is neg)', 'FLOAT', 'NOT NULL'],\r\n 'longitude' :['Longitude (dec deg, E of Greenwich is pos e.g. NZ)', 'FLOAT', 'NOT NULL'],\r\n 'precision' :['Position Precision', 'VARCHAR(50)', ''],\r\n 'elevation_m' :['Height above MSL in metres', 'FLOAT', ''],\r\n 'grid_reference' :['Grid Reference (NZ Metric Map Series)', 'VARCHAR(50)', ''],\r\n 'start_date' :['Start Date', 'DATE', ''],\r\n 'end_date' :['End Date', 'DATE' , ''],\r\n 'closed' :['Closed Indicator (Closed = 1)', 'BOOLEAN', 'NOT NULL'],\r\n 'stty_type' :['Stty Station Type', 'VARCHAR(50)', ''],\r\n 'synoptic_no' :['Synoptic Number (World Met. Organisation Number)', 'VARCHAR(50)', ''],\r\n 'wra_no' :['WRA No', 'VARCHAR(50)', ''],\r\n 'obs_auth' :['Observing Authority', 'VARCHAR(50)', ''],\r\n 'rain' :['Rain', 'BOOLEAN', 'NOT NULL'],\r\n 'surf_wind_dir' :['Surface Wind Dirn', 'BOOLEAN', 'NOT NULL'],\r\n 'max_gust_dir' :['Max Gust Dirn', 'BOOLEAN', 'NOT NULL'],\r\n 'solar_radiation' :['Solar Radiation', 'BOOLEAN', 'NOT NULL'],\r\n 'earth_temp_10cm' :['10cm Earth Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'earth_temp_20cm' :['20cm Earth Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'earth_temp_30cm' :['30cm Earth Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'earth_temp_100cm' :['100cm Earth Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'dry_bulb_temp' :['Dry Bulb Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'grass_min_temp' :['Grass Min Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'max_temp' :['Max Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'visibility' :['Visibility', 'BOOLEAN', 'NOT NULL'],\r\n 'msl_pressure' :['MSL Pressure', 'BOOLEAN', 'NOT NULL'],\r\n 'wind_run' :['Wind Run', 'BOOLEAN', 'NOT NULL'],\r\n 'utc_time_offset_hr' :['Time Offset (from UTC)', 'INT', 'NOT NULL'],\r\n 'evaporation' :['Evaporation', 'BOOLEAN', 'NOT NULL'],\r\n 'surface_wind_speed' :['Surface Wind Speed', 'BOOLEAN', 'NOT NULL'],\r\n 'max_gust_speed' :['Max Gust Speed', 'BOOLEAN', 'NOT NULL'],\r\n 'sunshine_hours' :['Sunshine Hours', 'BOOLEAN', 'NOT NULL'],\r\n 'wet_bulb_temp' :['Wet Bulb Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'weather_phenonmonen':['Weather Phenomonen', 'BOOLEAN', 'NOT NULL'],\r\n 'min_temp' :['Min Temp', 'BOOLEAN', 'NOT NULL'],\r\n 'cloud_amount' :['Cloud Amount', 'BOOLEAN', 'NOT NULL'],\r\n 'dayl_daylight_area' :['dayl_daylight_area', 'INT', 'NOT NULL']}\r\n\r\nOTHER_DICT = {\r\n 'created_on': ['TIMESTAMP WITH TIME ZONE' , 'NOT NULL'],\r\n 'updated_on': ['TIMESTAMP WITH TIME ZONE' , 'NOT NULL']\r\n}\r\n\r\nSQL_COLS = COL_DICT.keys()\r\nSQL_PARAMS = list(zip(*list(COL_DICT.values())))[1]\r\nSQL_COND = list(zip(*list(COL_DICT.values())))[2]\r\nCLIFLO_COLS = list(zip(*list(COL_DICT.values())))[0]\r\n\r\ndef create_stations_table():\r\n # create table if does not already exist\r\n\r\n cliflo_create = [f\"{key} {value[1]} {value[2]}\" for key, value in COL_DICT.items()]\r\n other_create = [f\"{key} {value[0]} {value[1]}\" for key, value in OTHER_DICT.items()]\r\n sql_statement = f\"\"\"\r\n CREATE TABLE climate_obs (\r\n {', '.join(cliflo_create)},\r\n {', '.join(other_create)}\r\n );\r\n \"\"\"\r\n hika.exec_hika_sql(sql_statement)\r\n\r\ndef insert_station_data(df):\r\n conn = hika.connect_db_user()\r\n cursor = conn.cursor()\r\n try:\r\n insert_string = f\"{', '.join(COL_DICT.keys())}, created_on, updated_on\"\r\n values = tuple(df.values.tolist()[0] + [dt.now(timezone.utc), dt.now(timezone.utc)])\r\n sql_statement = f\"\"\"\r\n INSERT INTO stations (\r\n {insert_string})\r\n VALUES ({', '.join(['%s'] * len(values))})\r\n ON CONFLICT (agent_no)\r\n DO NOTHING;\r\n \"\"\"\r\n cursor.execute(sql_statement, values)\r\n time.sleep(0.1)\r\n print()\r\n except psycopg2.Error as err:\r\n print(err)\r\n cursor.close()\r\n conn.close()\r\n\r\ndef run_etl_first_load(username='NA', password='NA'):\r\n \"\"\"\r\n this function is run if we need to add a new data set. i.e sunshine_hours daily\r\n \"\"\"\r\n create_stations_table() #if already exisits it will skip\r\n existing_stations = []\r\n cliflo = cf.Cliflo(\r\n username=username,\r\n password=password,\r\n webdriver_path='/mnt/c/webdrivers/chromedriver.exe')\r\n for key, values in cf.Cliflo.data_type_dict.items():\r\n for freq in values[1]:\r\n station_df = cliflo.get_stations(latitude=-37.9, longitude=175.8, radius=1000, data_type=key, freq=freq)\r\n existing_stations = hika.exec_hika_sql('SELECT agent_no FROM stations;' , data=True)\r\n existing_stations = existing_stations['agent_no'].to_list()\r\n potential_stations = pd.to_numeric(station_df['AgentNumber']).to_list()\r\n stations_list = list(set(potential_stations) - set(existing_stations))\r\n print(stations_list)\r\n for df in cliflo.get_station_full(stations_list):\r\n df = df.loc[:, CLIFLO_COLS]\r\n df['End Date'] = df['End Date'].replace({'-': None})\r\n df = df.replace({np.nan: None})\r\n try:\r\n df['Height above MSL in metres'] = df['Height above MSL in metres'].str.strip('m').astype(float)\r\n except:\r\n df['Height above MSL in metres'] = None\r\n insert_station_data(df)\r\n cliflo.close()\r\n\r\nif __name__ == '__main__':\r\n run_etl_first_load(username='mamirodata', password='&MaM!r0niwa&')\r\n #TODO make a function that only does updates for current stations.","sub_path":"projects/etl_scripts/cliflo_stations_table_etl.py","file_name":"cliflo_stations_table_etl.py","file_ext":"py","file_size_in_byte":6350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"23859563","text":"from flask import Flask, request, jsonify\nfrom flask_sslify import SSLify\nfrom typing import Tuple, Dict\nimport requests, json, re, os\n\napp = Flask(__name__)\nsslify = SSLify(app)\n\n# INSERT YOUR ACCESS TOKEN INSTEAD OF MINE\nACCESS_TOKEN = os.environ.get('ACCESS_TOKEN')\nURL = f'https://api.telegram.org/bot{ACCESS_TOKEN}/'\n\n\ndef send_message_to_user(chat_id: int, text: str) -> str:\n \"\"\" Sends response to users on their requests.\n For these purposes, Telegram API gives us sendMessage method,\n which takes POST-requests as json-objects.\n\n :param chat_id: id of chat with a user\n :param text: the message to send to a user\n :returns: Telegram response in json-format\n \"\"\"\n\n url = f'{URL}sendMessage'\n telegram_answer = {'chat_id': chat_id, 'text': text,}\n response = requests.post(url, json=telegram_answer)\n return response.json()\n\n\ndef parse_user_message_to_telegram(user_message: str) -> Tuple[str]:\n \"\"\" Extracts the only / text from user_message.\n Users can send any messages, so we need to get /\n text, to give them the response, that they need.\n\n :param user_message: User message to telegram bot to be handled\n :returns: the name of cryptocurrency without \"/\" symbol\n \"\"\"\n cryptocurrency = re.search(r'/\\w+', user_message).group()\n return cryptocurrency[1:]\n\n\ndef get_cryptocurrency_price(cryptocurrency: str) -> str:\n \"\"\"Extracts cryptocurrency price from coinmarketcap.com via its API\n\n :param cryptocurrency: cryptocurrency, which needs to extract the price\n :returns: the price of needed cryptocurrency in USD\n \"\"\"\n url = f'https://api.coinmarketcap.com/v1/ticker/{cryptocurrency}'\n response = requests.get(url).json()\n return response[-1]['price_usd']\n\n\ndef get_user_telegram_chat_info(telegram_response: Dict[str, str]) -> Tuple[int, str]:\n \"\"\"Retrieves chat_id and message from user telegram chat with bot\n\n :param telegram_response: Telegram chat data with a user\n :returns: user chat_id and user message to Telegram\n \"\"\"\n chat_id = telegram_response['message']['chat']['id']\n message = telegram_response['message']['text']\n return chat_id, message\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef index() -> str:\n \"\"\" Handles user request to the bot and sends a response\n \"\"\"\n if request.method == 'POST':\n telegram_response = request.get_json()\n # Getting chat_id and message from telegram response\n chat_id, message = get_user_telegram_chat_info(telegram_response)\n pattern = r'/\\w+'\n if re.search(pattern, message):\n # Getting cryptocurrency price searched by a user\n price = get_cryptocurrency_price(parse_user_message_to_telegram(message))\n # Sending the price back to a user\n send_message_to_user(chat_id, text=price)\n return jsonify(telegram_response)\n return json.dumps({'Telegram bot is working': True}), 200, {'ContentType':'application/json'}\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"166136477","text":"from django_dicom.models.patient import Patient\nfrom django_dicom.models.series import Series\nfrom django_dicom.models.study import Study\nfrom rest_framework import serializers\n\n\nclass SeriesSerializer(serializers.HyperlinkedModelSerializer):\n \"\"\"\n A `serializer `_ class for the :class:`~django_dicom.models.series.Series` model.\n \n \"\"\"\n\n study = serializers.HyperlinkedRelatedField(\n view_name=\"dicom:study-detail\", queryset=Study.objects.all()\n )\n patient = serializers.HyperlinkedRelatedField(\n view_name=\"dicom:patient-detail\", queryset=Patient.objects.all()\n )\n\n class Meta:\n model = Series\n fields = (\n \"id\",\n \"study\",\n \"patient\",\n \"body_part_examined\",\n \"patient_position\",\n \"number\",\n \"description\",\n \"date\",\n \"time\",\n \"modality\",\n \"protocol_name\",\n \"scanning_sequence\",\n \"sequence_variant\",\n \"pixel_spacing\",\n \"echo_time\",\n \"inversion_time\",\n \"repetition_time\",\n \"flip_angle\",\n \"manufacturer\",\n \"manufacturer_model_name\",\n \"magnetic_field_strength\",\n \"device_serial_number\",\n \"institution_name\",\n \"uid\",\n )\n","sub_path":"django_dicom/serializers/series_serializer.py","file_name":"series_serializer.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"572307837","text":"import math\n\ndef sigmoid(x):\n a = []\n for item in x:\n a.append(1/(1+math.exp(-item)))\n return a\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.arange(-70, 70,1)\nsig = sigmoid(x)\nplt.plot(x,sig)\nplt.title('Sigmoid Weight 1')\nplt.show()\n\nx = np.arange(-70, 70,5)\nsig = sigmoid(x)\nplt.title('Sigmoid Weight 5')\nplt.plot(x,sig)\nplt.show()\n\nx = np.arange(-70,70,100)\nsig = sigmoid(x)\nplt.title('Sigmoid Weight 100')\nplt.plot(x,sig)\nplt.show()","sub_path":"A3/2_Theory_Sigmoid.py","file_name":"2_Theory_Sigmoid.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"522136449","text":"\"\"\"\nTkinter app for running the pa download, cleandata and PAplot scripts\nfrom a simple GUI\n\"\"\"\n\nfrom Tkinter import Tk, Text, BOTH, W, N, E, S, RAISED, StringVar, Menu, Radiobutton, IntVar, OptionMenu\nfrom ttk import Frame, Button, Label, Style, Entry\nimport tkFileDialog\n\nimport time\n\nfrom cntrun import download_data\nfrom cntrun import run_FET_series\n\n\n\n\n##HELPER FUNCTIONS##\n\n\n\ndef insertstring(instring,placeholder,value,pad=0):\n # assert type(value)==int\n if placeholder in instring:\n return instring[:instring.find(placeholder)]+str(value).zfill(pad)+instring[instring.find(placeholder)+len(placeholder):]\n else:\n return instring\n\ndef timeStampYMDH():\n # -> 'YYYY_MM_DD_HHMM' as a time stamp\n return time.strftime('%Y_%m_%d_%H%M')\n\ndef make_fname_final(instring,chipval,deviceval,runval):\n postchip=insertstring(instring,\"[chip]\",chipval,pad=3)\n postdev=insertstring(postchip,\"[device]\",deviceval,pad=2)\n postrun=insertstring(postdev, \"[run]\", runval,pad=3)\n posttime=insertstring(postrun,\"[time]\",timeStampYMDH(),pad=0)\n return posttime\n\n\n\n# MAIN PAGE CLASS\n\nclass PAGUI(Frame):\n\n def __init__(self, parent):\n Frame.__init__(self, parent)\n self.parent = parent\n self.initUI()\n\n def initUI(self):\n self.parent.title(\"Parameter Analyzer Control\")\n self.style = Style()\n self.style.theme_use(\"clam\")\n self.pack(fill=BOTH, expand=True)\n\n self.columnconfigure(1, weight=1)\n self.columnconfigure(3, pad=7)\n # self.rowconfigure(3, weight=1)\n # self.rowconfigure(5, pad=7)\n\n #set up instance variables\n self.directory = StringVar()\n self.fname = StringVar()\n self.fname_final=StringVar()\n self.device = StringVar()\n self.chip = StringVar()\n self.run = StringVar()\n\n\n self.toplabel = Label(self, text=\"Dummy label at the top of the widget\",justify=\"center\")\n self.bottomlabel = Label(self, text=\"for help/info see github.com/leobrowning92/pa-control\",justify=\"center\",font=\"arial 11 italic\")\n\n\n self.directory_btn = Button(self, text=\"Directory\", command=self.askdirectory)\n\n self.filename_btn = Button(self, text=\"Filename\", command=self.askfile)\n\n # self.exit_btn = Button(self, text=\"Exit\", command=self.quit)\n # self.exit_btn.grid(row=5, column=0, padx=5)\n\n self.update_btn = Button(self, text=\"Update\", command=self.runUpdate)\n\n self.iterdevice_btn = Button(self, text=\"Iterate [device]\", command=self.newDevice)\n\n self.iterchip_btn = Button(self, text=\"Iterate [chip]\", command=self.newChip)\n self.iterrun_btn = Button(self, text=\"Iterate [run]\", command=self.newRun)\n\n self.chip.set(\"001\")\n self.chipnum_entry = Entry(self, textvariable=self.chip,width=5)\n\n self.device.set(\"01\")\n self.devicenum_entry = Entry(self, textvariable=self.device,width=5)\n\n self.run.set(\"001\")\n self.runnum_entry = Entry(self, textvariable=self.run,width=5)\n\n self.directory_entry = Entry(self, textvariable=self.directory)\n\n self.fname.set('Chip[chip]_[device]_run[run]_somenotes_[INFO]_[time].csv')\n self.fname_final.set(make_fname_final(self.fname.get(),\n self.chip.get(),self.device.get(),self.run.get()))\n\n self.fname_entry = Entry(self, textvariable=self.fname)\n\n self.fname_final_label = Label(self, textvariable=self.fname_final,\n justify=\"center\", font=\"arial 11 italic\")\n\n\n # this button runs pulldata with parameter set by\n # self.datatype, which stores the value of self.radbtn\n self.pulldata_btn = Button(self, text=\"Pull Data\", command=self.pulldata)\n\n self.datarun_btn = Button(self, text=\"Run Data sweep\", command=self.datarun)\n\n\n #datatype=1 => diode, datatype=2 => FET\n self.datatype = IntVar()\n self.datatype1_radiobutton = Radiobutton(\n self, text='Diode (VF, IF)',\n variable=self.datatype, value=1)\n self.datatype2_radiobutton = Radiobutton(\n self, text='FET (VG, VDS, ID, IG)',\n variable=self.datatype, value=2)\n\n #grid alignments of all widgets\n self.toplabel.grid(column=0,columnspan=3, sticky=W, pady=4, padx=5)\n\n self.directory_btn.grid(row=1, column=0)\n self.directory_entry.grid(row=1, column=1, columnspan=2,\n padx=5, sticky=E + W)\n\n self.filename_btn.grid(row=2, column=0)\n self.fname_entry.grid(row=2, column=1, columnspan=2,\n padx=5, sticky=E + W)\n self.fname_final_label.grid(row=3,column=1, columnspan=2,\n sticky=N + E + W, pady=4, padx=5)\n\n self.iterchip_btn.grid(row=4, column=0, sticky=N)\n self.chipnum_entry.grid(row=4, column=1,padx=5, sticky=W)\n\n self.iterdevice_btn.grid(row=5, column=0, sticky=N)\n self.devicenum_entry.grid(row=5, column=1,padx=5, sticky=W)\n\n self.iterrun_btn.grid(row=6, column=0, sticky=N)\n self.runnum_entry.grid(row=6, column=1,padx=5, sticky=W)\n\n self.update_btn.grid(row=7, column=0, padx=5)\n self.pulldata_btn.grid(row=7, column=3, padx=5,sticky=E)\n self.datarun_btn.grid(row=6, column=3, padx=5,sticky=E)\n\n self.datatype1_radiobutton.grid(row=7,column=1,padx=5,sticky=N + E + S)\n self.datatype2_radiobutton.grid(row=7,column=2,padx=5,sticky=N + W + S)\n self.bottomlabel.grid(row=8,column=0,columnspan=3, sticky=W, pady=4, padx=5)\n\n\n#action funcctions for the various buttons\n def newChip(self):\n if self.fname.get() == '':\n self.askfile()\n else:\n try:\n self.chip.set(str((int(self.chip.get())+1)).zfill(3))\n self.device.set(str(1).zfill(2))\n except Exception as e:\n print(e)\n self.runUpdate()\n def newRun(self):\n if self.fname.get() == '':\n self.askfile()\n else:\n try:\n self.run.set(str((int(self.run.get())+1)).zfill(3))\n except Exception as e:\n print(e)\n self.runUpdate()\n\n\n\n def newDevice(self):\n if self.fname.get() == '':\n self.askfile()\n else:\n try:\n self.device.set(str((int(self.device.get())+1)).zfill(2))\n except Exception as e:\n print(e)\n self.runUpdate()\n\n def runUpdate(self):\n self.fname_final.set(make_fname_final(self.fname.get(),\n self.chip.get(),self.device.get(),self.run.get()))\n\n\n def askdirectory(self):\n \"\"\"Returns a selected directoryname.\"\"\"\n self.directory.set( tkFileDialog.askdirectory())\n\n def askfile(self):\n fullpath = tkFileDialog.askopenfilename()\n if \"/\" in fullpath:\n i = fullpath.rfind(\"/\")\n if \"\\\\\" in fullpath:\n i = fullpath.rfind(\"\\\\\")\n # self.fname_final.set(fullpath[i + 1:])\n self.fname.set(fullpath[i + 1:])\n\n def pulldata(self):\n self.runUpdate()\n if self.datatype.get() == 1:\n download_data(['VF', 'IF'], self.fname_final.get(), self.directory.get())\n elif self.datatype.get() == 2:\n download_data(['VG', 'VDS', 'ID', 'IG'], self.fname_final.get(), self.directory.get())\n def datarun(self):\n self.runUpdate()\n if self.datatype.get() ==1:\n print(\"oh no we havent written a script for that yet!\")\n elif self.datatype.get() == 2:\n run_FET_series(self.fname_final.get(),self.directory.get())\n\n\n\n\n\n##MAIN INSTANCE OF PAGE##\n\ndef main():\n root = Tk()\n root.geometry(\"1000x300+300+300\")\n app = PAGUI(root)\n root.mainloop()\n\nif __name__ == '__main__':\n main()\n","sub_path":"gui-tkinter.py","file_name":"gui-tkinter.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"71522152","text":"# Advanced Python: Part I - Regular Expressions\n#Q1. Find how many different degrees there are, and their frequencies\n#Ex: PhD, ScD, MD, MPH, BSEd, MS, JD, etc.\n\n#Q2. Find how many different titles there are, and their frequencies.\n#Ex: Assistant Professor, Professor\n\n#Q3. Search for email addresses and put them in a list.\n#Print the list of email addresses.\n\n#Q4. Find how many different email domains there are\n#Ex: mail.med.upenn.edu, upenn.edu, email.chop.edu, etc.\n#Print the list of unique email domains.\n\nimport re\nfilename = 'faculty.csv'\nf = open(filename)\nf_lines = f.readlines() # list of strings\n\nf_lines = [x.strip() for x in f_lines] # getting rid of \\n at the end of each line\n\nf_list = [] # a list to store values of each line in the file\nf_degrees =[] # a list to store degrees\nf_cleanDegrees =[] # a list to store degrees without spaces and dots\nf_titles = [] # a list to store titles\nf_emails = [] # a list to store email addresses\nf_domains = [] # a list to store email domains\nfor i in range(1,len(f_lines)):\n f_list = list(f_lines[i].split(','))\n cleanDegree = re.sub('\\W+','',f_list[1])\n f_degrees.append(f_list[1])\n f_cleanDegrees.append(cleanDegree)\n f_titles.append(f_list[2])\n f_emails.append(f_list[3])\n domain = re.findall(r'@([\\w.]+)',f_list[3])\n if domain in f_domains:\n pass\n else:\n f_domains.append(domain)\n\nprint(\"-\"*10)\nprint(\"Q1. There are %d differently spelled degrees in the data file:\" % len(set(f_degrees)))\nprint(sorted(set(f_degrees)))\n\nprint(\"-\"*10)\nprint(\"Q1. There are %d unique degrees in the data file:\" % len(set(f_cleanDegrees)))\nprint(sorted(set(f_cleanDegrees)))\n\nprint(\"-\"*10)\nprint(\"Q2. There are %d differently spelled titles in data the file:\" % len(set(f_titles)))\ni=1\nfor title in sorted(set(f_titles)):\n print(\"%d. %s\" %(i, title))\n i = i + 1\n\nprint(\"-\"*10)\nprint(\"Q3. There are %d unique email addresses in the data file:\" % len(set(f_emails)))\ni=1\nfor email in sorted(set(f_emails)):\n print(\"%d. %s\" %(i, email))\n i = i + 1\n\nprint(\"-\"*10)\nprint(\"Q4. There are %d email domains in the file:\" % len(f_domains))\ni=1\nfor domain in sorted(f_domains):\n print(\"%d. %s\" %(i, domain))\n i = i + 1\n\nf.close()\n \n","sub_path":"python/advanced_python_regex.py","file_name":"advanced_python_regex.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"400801600","text":"n=int(input())\r\nm=n**2\r\ns=0\r\nwhile m!=0:\r\n r=m%10\r\n s+=r\r\n m=m//10\r\nif s==n:\r\n print(n,\"Neon Number\")\r\nelse:\r\n print(n,'not a Neon Number')\r\n","sub_path":"Neon Number.py","file_name":"Neon Number.py","file_ext":"py","file_size_in_byte":156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"590067609","text":"# 题目: 给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。\n\n\nclass Solution(object):\n def longestPalindrome(self, s):\n if len(s) < 2 or s == s[::-1]:\n return s\n\n start, maxlength = 0, 1\n for i in range(len(s)):\n odd = s[i - maxlength - 1:i + 1] # 奇数\n even = s[i - maxlength:i + 1] # 偶数\n if i - maxlength - 1 >= 0 and odd == odd[::-1]:\n start = i - maxlength - 1\n maxlength += 2\n elif i - maxlength >= 0 and even == even[::-1]:\n start = i - maxlength\n maxlength += 1\n return s[start:start + maxlength]\n\n def longestPalindrome1(self, s):\n # 动态规划\n if not s or s == s[::-1]:\n return s\n\n res = ''\n max_len = 0\n dp = [[0] * len(s) for _ in range(len(s))]\n\n for i in range(len(s)):\n for j in range(i + 1):\n if s[i] == s[j]:\n if i - j < 2 or dp[j + 1][i - 1]:\n dp[j][i] = 1\n\n if dp[j][i] == 1:\n if max_len < i - j + 1:\n res = s[j: i + 1]\n max_len = i - j + 1\n return res\n\n\ns = Solution()\n# print(s.longestPalindrome(\"babad\"))\nprint(s.longestPalindrome1(\"babad\"))\n","sub_path":"leetcode005_最长回文子串.py","file_name":"leetcode005_最长回文子串.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"598799786","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport oneflow.typing as oft\n\nimport test_global_storage\nfrom test_util import GenArgList\n\nfunc_config = flow.FunctionConfig()\nfunc_config.default_data_type(flow.float)\nfunc_config.default_logical_view(flow.scope.consistent_view())\n\n\ndef _check(test_case, data, segment_ids, out_shape, out):\n test_case.assertEqual(out.shape, out_shape)\n ref = np.zeros_like(out)\n for idx, i in np.ndenumerate(segment_ids):\n out_idx = list(idx)\n out_idx[-1] = i\n out_idx = tuple(out_idx)\n ref[out_idx] += data[idx]\n test_case.assertTrue(np.allclose(ref, out, atol=1e-5, rtol=1e-5))\n\n\ndef _check_bw(test_case, params, indices, out_shape, out):\n ref = np.zeros_like(out)\n for idx, i in np.ndenumerate(indices):\n in_idx = list(idx)\n in_idx[-1] = i\n in_idx = tuple(in_idx)\n ref[idx] += params[in_idx]\n test_case.assertTrue(np.array_equal(ref, out))\n\n\ndef _gen_segment_ids(out_shape, num_segments, segment_ids_shape):\n axis = len(segment_ids_shape) - 1\n return np.random.randint(\n low=0, high=out_shape[axis], size=segment_ids_shape, dtype=np.int32\n )\n\n\ndef _gen_data(out_shape, num_segments, segment_ids_shape):\n axis = len(segment_ids_shape) - 1\n data_shape = out_shape[0:axis] + (segment_ids_shape[axis],) + out_shape[axis + 1 :]\n return np.random.rand(*data_shape).astype(np.float32)\n\n\ndef _make_unsoted_segment_sum_fn(device, data, segment_ids, num_segments):\n flow.clear_default_session()\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def unsorted_batch_segment_sum_job(\n data: oft.Numpy.Placeholder(data.shape, dtype=flow.float),\n segment_ids: oft.Numpy.Placeholder(segment_ids.shape, dtype=flow.int32),\n ):\n with flow.scope.placement(device, \"0:0\"):\n x = flow.get_variable(\n \"data\",\n shape=data.shape,\n dtype=flow.float32,\n initializer=flow.constant_initializer(0),\n )\n data = x + data\n res = flow.math.unsorted_batch_segment_sum(\n data=data, segment_ids=segment_ids, num_segments=num_segments\n )\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0\n ).minimize(res)\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch_diff(res, test_global_storage.Setter(\"loss_diff\"))\n return res\n\n return unsorted_batch_segment_sum_job(data, segment_ids)\n\n\ndef _run_test(test_case, device, out_shape, num_segments, segment_ids_shape):\n segment_ids = _gen_segment_ids(out_shape, num_segments, segment_ids_shape)\n data = _gen_data(out_shape, num_segments, segment_ids_shape)\n\n unsorted_batch_segment_sum_out = _make_unsoted_segment_sum_fn(\n device, data, segment_ids, num_segments\n ).get()\n out_ndarray = unsorted_batch_segment_sum_out.numpy()\n grad_in_ndarray = test_global_storage.Get(\"x_diff\")\n grad_out_ndarray = test_global_storage.Get(\"loss_diff\")\n check_point = flow.train.CheckPoint()\n check_point.init()\n\n _check(test_case, data, segment_ids, out_shape, out_ndarray)\n _check_bw(\n test_case, grad_out_ndarray, segment_ids, grad_in_ndarray.shape, grad_in_ndarray\n )\n\n\ndef test_unsorted_batch_segment_sum(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"cpu\", \"gpu\"]\n arg_dict[\"out_shape\"] = [(2, 4, 7, 6)]\n arg_dict[\"num_segments\"] = [7]\n arg_dict[\"segment_ids_shape\"] = [(2, 4, 5)]\n for arg in GenArgList(arg_dict):\n _run_test(test_case, *arg)\n","sub_path":"oneflow/python/test/ops/test_unsorted_batch_segment_sum.py","file_name":"test_unsorted_batch_segment_sum.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"189611379","text":"# May The Force Be With You\n\n# CMD Args\n\n# Laptop/Desktop(slower, more accurate):\n# python3 encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method cnn\n\n# Raspberry Pi (faster, less accurate):\n# python3 encode_faces.py --dataset dataset --encodings encodings.pickle --detection-method hog\n#----------------------------------------------------------------------------\n\nfrom imutils import paths\nimport face_recognition\nimport argparse\nimport pickle\nimport cv2\nimport os\nimport time\n\n# Argument Parser and Arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--dataset\", required=True,\n\thelp=\"path to input directory of faces + images\")\nap.add_argument(\"-e\", \"--encodings\", required=True,\n\thelp=\"path to serialized db of facial encodings\")\nap.add_argument(\"-d\", \"--detection-method\", type=str, default=\"cnn\",\n\thelp=\"face detection model to use: either `hog` or `cnn`\")\nargs = vars(ap.parse_args())\n\n\n\n# Path to input images in dataset folder\nprint(\"quantifying faces\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\n\n# Initialize the list of known encodings and known names\nknownEncodings = []\nknownNames = []\nprint(\"begin counter\")\nT = time.perf_counter()\n# Loop over the image paths\nfor (i, imagePath) in enumerate(imagePaths):\n\t# Extract the person name from the image path\n\tprint(\"[INFO] processing image {}/{}\".format(i + 1,\n\t\tlen(imagePaths)))\n\tname = imagePath.split(os.path.sep)[-2]\n\n\t# Load the input image and convert it from RGB (OpenCV ordering) to RGB (dlib ordering)\n\timage = cv2.imread(imagePath)\n\trgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\tprint(\"{} loaded at {} . now detecting face\".format(i + 1, (time.perf_counter()-T)))\n\t# Detect the (x, y)-coordinates of the bounding boxes corresponding to each face in the input image\n\tboxes = face_recognition.face_locations(rgb, model=args[\"detection_method\"])\n\tprint(\"{} detected at {} . now encoding\".format(i+1, (time.perf_counter()-T)))\n\t# Compute the facial embedding for the face\n\tencodings = face_recognition.face_encodings(rgb, boxes)\n\tprint(\"encoding {} finished at {}\".format(i+1, (time.perf_counter()-T)))\n\t# Loop over the encodings\n\tfor encoding in encodings:\n\t\t# Add each [encoding + name] to our set of known names and encodings\n\t\tknownEncodings.append(encoding)\n\t\tknownNames.append(name)\n\tprint(\" done {}. time is {}\".format(i+1, (time.perf_counter()-T)))\n# Dump the facial encodings + names to disk as a pickle file\nprint(\"[INFO] serializing encodings...\")\ndata = {\"encodings\": knownEncodings, \"names\": knownNames}\n\nf = open(args[\"encodings\"], \"wb\")\nf.write(pickle.dumps(data))\nf.close()\n","sub_path":"encode_faces.py","file_name":"encode_faces.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"435699676","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018 CERN.\n#\n# invenio-app-ils is free software; you can redistribute it and/or modify it\n# under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Ils Records Items record document resolver.\"\"\"\n\nimport jsonresolver\nfrom werkzeug.routing import Rule\n\nfrom invenio_app_ils.records.api import Document\n\n\n@jsonresolver.hookimpl\ndef jsonresolver_loader(url_map):\n \"\"\"Resolve the document for item record.\"\"\"\n from flask import current_app\n\n def _document_for_item_resolver(document_pid):\n \"\"\"Return the document for the given item.\"\"\"\n document = Document.get_record_by_pid(document_pid)\n # delete circulation field when document is dereferenced inside item\n del document[\"circulation\"]\n return document\n\n url_map.add(\n Rule(\n \"/api/resolver/items/document/\",\n endpoint=_document_for_item_resolver,\n host=current_app.config.get(\"JSONSCHEMAS_HOST\"),\n )\n )\n","sub_path":"invenio_app_ils/records/jsonresolver/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"592307489","text":"import numpy as np\nimport http.client\nimport ast\nimport pandas as pd\nimport datetime\nimport os\nimport urllib\n#from . types import c19_dict, c19d, isc3d\n\n# C19 data files.\nurl_c19_files = \"https://raw.githubusercontent.com/datadista/datasets/master/COVID%2019\"\nc19_file_cases = \"ccaa_covid19_casos.csv\"\nc19_file_deaths = \"ccaa_covid19_fallecidos.csv\"\nc19_file_uci = \"ccaa_covid19_uci.csv\"\nc19_file_hosp = \"ccaa_covid19_hospitalizados.csv\"\nc19_file_recovered = \"ccaa_covid19_altas.csv\"\n\n# Define the dictionary associating a weather sensor to each region.\nsensor_dict = {\n \"Andalucia\" : \"5402\" , # CORDOBA/AEROPUERTO\n \"Aragon\" : \"9434\" , # ZARAGOZA/AEROPUERTO\n \"Asturias\" : \"1208H\", # GIJON, MUSEL\n \"Baleares\" : \"B278\" , # PALMA DE MALLORCA/SON SAN JUAN\n \"Canarias\" : \"C029O\", # LANZAROTE/AEROPUERTO\n \"Cantabria\" : \"1111\" , # SANTANDER I,CMT\n \"Castilla La Mancha\": \"4121\" , # CIUDAD REAL\n \"Castilla y Leon\" : \"2422\" , # VALLADOLID\n \"Cataluna\" : \"0016A\", # REUS/AEROPUERTO\n \"Ceuta\" : \"5000C\", # CEUTA\n \"C. Valenciana\" : \"8414A\", # VALENCIA/AEROPUERTO\n \"Extremadura\" : \"3469A\", # CACERES\n \"Galicia\" : \"1428\" , # SANTIAGO DE COMPOSTELA/LABACOLLA\n \"Madrid\" : \"3200\" , # MADRID/GETAFE\n \"Melilla\" : \"6000A\", # MELILLA\n \"Murcia\" : \"7178I\", # MURCIA\n \"Navarra\" : \"9263D\", # PAMPLONA/NOAIN\n \"Pais Vasco\" : \"1024E\", # SAN SEBASTIAN,IGUELDO\n \"La Rioja\" : \"9170\" # LOGRONO/AGONCILLO\n}\n\n# Dictionary of miscellaneous information.\n# Population data from: Cifras oficiales de población resultantes de la revisión del Padrón municipal a 1 de enero (year 2018)\nmisc_dict = {\n \"Andalucia\" : {\"geoId\": \"AN\", \"countryterritoryCode\": \"AND\", \"popData2018\": 8384408},\n \"Aragon\" : {\"geoId\": \"AR\", \"countryterritoryCode\": \"ARA\", \"popData2018\": 1308728},\n \"Asturias\" : {\"geoId\": \"AS\", \"countryterritoryCode\": \"AST\", \"popData2018\": 1028244},\n \"Baleares\" : {\"geoId\": \"BA\", \"countryterritoryCode\": \"BAL\", \"popData2018\": 1128908},\n \"Canarias\" : {\"geoId\": \"CN\", \"countryterritoryCode\": \"CAN\", \"popData2018\": 2127685},\n \"Cantabria\" : {\"geoId\": \"CT\", \"countryterritoryCode\": \"CAB\", \"popData2018\": 580229},\n \"Castilla La Mancha\": {\"geoId\": \"CM\", \"countryterritoryCode\": \"CLM\", \"popData2018\": 2026807},\n \"Castilla y Leon\" : {\"geoId\": \"CL\", \"countryterritoryCode\": \"CYL\", \"popData2018\": 2409164},\n \"Cataluna\" : {\"geoId\": \"CA\", \"countryterritoryCode\": \"CAT\", \"popData2018\": 7600065},\n \"Ceuta\" : {\"geoId\": \"CE\", \"countryterritoryCode\": \"CEU\", \"popData2018\": 85144},\n \"C. Valenciana\" : {\"geoId\": \"CV\", \"countryterritoryCode\": \"CVA\", \"popData2018\": 4963703},\n \"Extremadura\" : {\"geoId\": \"EX\", \"countryterritoryCode\": \"EXT\", \"popData2018\": 1072863},\n \"Galicia\" : {\"geoId\": \"GA\", \"countryterritoryCode\": \"GAL\", \"popData2018\": 2701743},\n \"Madrid\" : {\"geoId\": \"MA\", \"countryterritoryCode\": \"MAD\", \"popData2018\": 6578079},\n \"Melilla\" : {\"geoId\": \"ME\", \"countryterritoryCode\": \"MEL\", \"popData2018\": 86384},\n \"Murcia\" : {\"geoId\": \"MU\", \"countryterritoryCode\": \"MUR\", \"popData2018\": 1478509},\n \"Navarra\" : {\"geoId\": \"NA\", \"countryterritoryCode\": \"NAV\", \"popData2018\": 647554},\n \"Pais Vasco\" : {\"geoId\": \"PV\", \"countryterritoryCode\": \"PVA\", \"popData2018\": 2199088},\n \"La Rioja\" : {\"geoId\": \"LR\", \"countryterritoryCode\": \"RIO\", \"popData2018\": 315675}\n}\n\n# Get the weather dataframe for the specified station and date range.\ndef get_meteo_df(station,date_init,date_final,api_key):\n\n # Send the initial request.\n conn = http.client.HTTPSConnection(\"opendata.aemet.es\")\n request_str = \"https://opendata.aemet.es/opendata/api/valores/climatologicos/diarios/datos/fechaini/{}/fechafin/{}/estacion/{}/?api_key={}\".format(date_init,date_final,station,api_key)\n headers = {'cache-control': \"no-cache\"}\n conn.request(\"GET\", request_str, headers=headers)\n\n # Interpret the response.\n res_init = conn.getresponse()\n data_init = res_init.read()\n dict_init = ast.literal_eval(data_init.decode(\"utf-8\"))\n url_init = dict_init['datos']\n url_meta = dict_init['metadatos']\n\n # Send the request for the metadata.\n #print(\"Requesting metadata from:\",url_meta)\n conn.request(\"GET\", url_meta, headers=headers)\n\n res_meta = conn.getresponse()\n data_meta = res_meta.read()\n dict_meta = data_meta.decode(\"ISO-8859-1\")\n #print(dict_meta)\n\n # Send the request for the data.\n #print(\"Requesting data from:\",url_init)\n conn.request(\"GET\", url_init, headers=headers)\n\n # Interpret the response.\n res_final = conn.getresponse()\n data_final = res_final.read()\n dict_data = ast.literal_eval(data_final.decode(\"ISO-8859-1\"))\n\n return pd.DataFrame(dict_data)\n\ndef prepare_meteo_df(df):\n\n # Check that all required keys exist in the dataframe.\n required_keys = ['fecha', 'prec', 'sol', 'tmax', 'tmed', 'tmin']\n for rk in required_keys:\n if(not (rk in df)):\n print(\"Warning: dataframe missing\",rk)\n return None\n\n # Extract required elements.\n meteo = df[required_keys].copy()\n\n # Replace comma with dot.\n meteo[['prec', 'sol', 'tmax', 'tmed', 'tmin']] = meteo[['prec', 'sol', 'tmax', 'tmed', 'tmin']].apply(lambda x: x.str.replace(',','.'))\n\n # Replace Ip with 0.0.\n meteo[['prec']] = meteo[['prec']].apply(lambda x: x.str.replace('Ip','0.0'))\n\n # Convert to numerical values.\n meteo[['prec','sol','tmax','tmed','tmin']] = meteo[['prec','sol','tmax','tmed','tmin']].astype('float')\n\n # Convert dates to datetime objects.\n meteo['fecha'] = pd.to_datetime(meteo['fecha'], format=\"%Y-%m-%d\")\n\n return meteo\n\ndef get_data_communities(api_key, datapath=\"../data/data_communities.csv\", update=False):\n\n # If we're just reading (not updating) the data, just read it from the CSV.\n if(not update):\n if(not os.path.isfile(datapath)):\n print(\"File\",datapath,\"does not exist. Run this function with update=True to retrieve the data.\")\n return None\n cdf = pd.read_csv(datapath)\n cdf.drop(\"Unnamed: 0\", axis=1, inplace=True)\n cdf['dateRep'] = pd.to_datetime(cdf['dateRep'], format=\"%Y-%m-%d\")\n return cdf\n\n # Get the meteo data.\n meteo_regions = {}\n date_init = \"2020-02-27T00:00:00UTC\"\n date_final = \"{}T23:59:59UTC\".format(datetime.datetime.today().strftime('%Y-%m-%d'))\n print(\"Obtaining meteo data...\")\n for region,station in sensor_dict.items():\n print(region,station)\n df = get_meteo_df(station,date_init,date_final,api_key)\n meteo = prepare_meteo_df(df)\n meteo_regions[region] = meteo\n print(\"-- Done\")\n\n # Retrieve the C19 data.\n print(\"Downloading C19 data...\")\n urllib.request.urlretrieve (\"{}/{}\".format(url_c19_files,c19_file_cases), c19_file_cases)\n urllib.request.urlretrieve (\"{}/{}\".format(url_c19_files,c19_file_deaths), c19_file_deaths)\n urllib.request.urlretrieve (\"{}/{}\".format(url_c19_files,c19_file_uci), c19_file_uci)\n urllib.request.urlretrieve (\"{}/{}\".format(url_c19_files,c19_file_hosp), c19_file_hosp)\n urllib.request.urlretrieve (\"{}/{}\".format(url_c19_files,c19_file_recovered), c19_file_recovered)\n if(not (os.path.isfile(c19_file_cases) and os.path.isfile(c19_file_deaths) and os.path.isfile(c19_file_uci) and os.path.isfile(c19_file_hosp) and os.path.isfile(c19_file_recovered))):\n print(\"ERROR downloading C19 data.\")\n return None\n print(\"-- Done\")\n\n # Read in the C19 data.\n cases = pd.read_csv(c19_file_cases); os.remove(c19_file_cases)\n ucases = pd.read_csv(c19_file_uci); os.remove(c19_file_uci)\n fcases = pd.read_csv(c19_file_deaths); os.remove(c19_file_deaths)\n hcases = pd.read_csv(c19_file_hosp); os.remove(c19_file_hosp)\n rcases = pd.read_csv(c19_file_recovered); os.remove(c19_file_recovered)\n\n # Remove all accents from the region names.\n cases['CCAA'] = cases['CCAA'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')\n ucases['CCAA'] = ucases['CCAA'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')\n fcases['CCAA'] = fcases['CCAA'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')\n hcases['CCAA'] = hcases['CCAA'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')\n rcases['CCAA'] = rcases['CCAA'].str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8')\n\n # Set the region name as index.\n cases = cases.set_index('CCAA')\n print(cases.index)\n ucases = ucases.set_index('CCAA')\n fcases = fcases.set_index('CCAA')\n hcases = hcases.set_index('CCAA')\n rcases = rcases.set_index('CCAA')\n\n # Add the C19 data to the meteo dataframes.\n df_regions = {}\n print(\"Combining C19 and meteo data...\")\n for region, df in meteo_regions.items():\n\n # Get a new dataframe of cases with the dates and # of cases as columns.\n cframe = pd.DataFrame({'ncases' : cases.loc[region][1:].values,\n 'fecha' : cases.loc[region].keys()[1:].values})\n uframe = pd.DataFrame({'uci' : ucases.loc[region][1:].values,\n 'fecha' : ucases.loc[region].keys()[1:].values})\n fframe = pd.DataFrame({'fallecidos' : fcases.loc[region][1:].values,\n 'fecha' : fcases.loc[region].keys()[1:].values})\n hframe = pd.DataFrame({'hospitalizados': hcases.loc[region][1:].values,\n 'fecha' : hcases.loc[region].keys()[1:].values})\n rframe = pd.DataFrame({'altas' : rcases.loc[region][1:].values,\n 'fecha' : rcases.loc[region].keys()[1:].values})\n\n # Change the dates to datetime objects.\n cframe['fecha'] = pd.to_datetime(cframe['fecha'], format=\"%Y-%m-%d\")\n uframe['fecha'] = pd.to_datetime(uframe['fecha'], format=\"%Y-%m-%d\")\n fframe['fecha'] = pd.to_datetime(fframe['fecha'], format=\"%Y-%m-%d\")\n hframe['fecha'] = pd.to_datetime(hframe['fecha'], format=\"%Y-%m-%d\")\n rframe['fecha'] = pd.to_datetime(rframe['fecha'], format=\"%Y-%m-%d\")\n\n # Merge the dataframes.\n mdf = pd.merge(df, cframe, on = 'fecha', how='outer')\n mdf = pd.merge(mdf, uframe, on = 'fecha', how='outer')\n mdf = pd.merge(mdf, fframe, on = 'fecha', how='outer')\n mdf = pd.merge(mdf, hframe, on = 'fecha', how='outer')\n mdf = pd.merge(mdf, rframe, on = 'fecha', how='outer')\n df_regions[region] = mdf\n print(\"-- Done\")\n\n # Merge all the community dataframes.\n cdf = None\n print(\"Merging into a single dataframe...\")\n for key in df_regions.keys():\n\n # Add the misc information to this dataframe.\n cframe = df_regions[key]\n cframe['countriesAndTerritories'] = key\n cframe['geoId'] = misc_dict[key]['geoId']\n cframe['countryterritoryCode'] = misc_dict[key]['countryterritoryCode']\n cframe['popData2018'] = misc_dict[key]['popData2018']\n\n if(cdf is None):\n cdf = cframe\n else:\n cdf = cdf.append(cframe)\n\n # Reset the index count.\n cdf = cdf.reset_index()\n\n # Change column names.\n cdf = cdf.rename(columns={\"fecha\": \"dateRep\", \"ncases\": \"cases\", \"fallecidos\": \"deaths\", \"hospitalizados\": \"hospitalized\", \"altas\": \"recovered\"})\n\n # Add columns for day, month, and year.\n cdf['day'] = cdf.apply(lambda row: row['dateRep'].date().day, axis=1)\n cdf['month'] = cdf.apply(lambda row: row['dateRep'].date().month, axis=1)\n cdf['year'] = cdf.apply(lambda row: row['dateRep'].date().year, axis=1)\n print(\"-- Done\")\n\n # Save the final dataframe to file.\n print(\"Saving the final dataframe to\",datapath,\"...\")\n cdf.to_csv(datapath)\n print(\"-- Done\")\n\n return cdf\n\n\ndef c19_select_ca_and_date_xdead(dm, ca_code='MA', datei='2020-03-10', datef='2020-06-10'):\n dfs = dm.loc[dm['geoId'] == ca_code]\n dfts = dfs.loc[dfs['dateRep'] >= np.datetime64(datei)]\n dfts2 = dfts.loc[dfts['dateRep'] < np.datetime64(datef)]\n return dfts2\n\n\ndef c19_get_ccaa_data(dm, datei='2020-03-10', datef='2020-06-10', dic= c19d):\n return{ccaa:c19_select_ca_and_date_xdead(dm, c19d[ccaa], datei, datef) for ccaa in c19d.keys()}\n","sub_path":"c19/data_functions.py","file_name":"data_functions.py","file_ext":"py","file_size_in_byte":12663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"609591025","text":"#!/usr/bin/env python\n# utf-8\n\n\"\"\"\nAssignment 6\nTask 5 Program: Recursive function to calculate e with argparse user input\nJessie Salter\n2 February 2016\n\"\"\"\n\nimport sys\n\nimport argparse\n\nfrom math import factorial\n\nsys.setrecursionlimit(1500)\n\n\ndef user_input():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"range_max\", help=\"The maximum number of the range over \\\n which you wish to compute e\", type=int)\n args = parser.parse_args()\n return args.range_max\n\n\ndef e_recursive(x=0):\n \"\"\"This function calculates the value of e recursively\"\"\"\n arg = user_input()\n if x < arg:\n return (1/(factorial(x))) + e_recursive(x+1)\n # This returns e\n else:\n return 0\n # This must be 0 because it will be added to the return value above\n\n\ndef main():\n user_input()\n e = e_recursive()\n print(e)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"answers/jsalt/jsalt6_task5.py","file_name":"jsalt6_task5.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"35112439","text":"# -*- coding:utf-8 -*-\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\n\nfrom jiafujin.misc.forms import RecommendForm\nfrom jiafujin.ipmgr.utility import get_cities, get_towns\nfrom jiafujin.settings import VERIFY_CODE, CNZZ, DEBUG, ALPHA\n\ndef Recommend(request):\n the_ranks = {}\n the_ranks['cnzz'] = CNZZ\n if \"POST\" == request.method:\n if VERIFY_CODE:\n ses_code = request.session.get('verify', \"no code\")\n pos_code = request.POST['verifycode']\n if ses_code.strip().upper() != pos_code.strip().upper():\n return render_to_response('verify_code_error.html')\n form = RecommendForm(request.POST)\n else:\n form = RecommendForm()\n if form.is_valid():\n cmt = form.save(commit=False)\n cmt.ip_addr = request.META['REMOTE_ADDR']\n cmt.save()\n if not DEBUG:\n send_mail(u\"用户建议\", cmt.content, 'admin@jfj7788.com',['aceway@jfj7788.com', 'zj@jfj7788.com'], fail_silently=True)\n the_data = the_ranks\n the_data['form'] = form\n c = RequestContext(request, the_data)\n return render_to_response('misc/recommend_ok.html', context_instance=c)\n else:\n the_data = the_ranks\n the_data['verify_code'] = VERIFY_CODE\n the_data['form'] = form\n c = RequestContext(request, the_data)\n return render_to_response('misc/recommend.html', context_instance=c)\n\ndef GetCities(request):\n select = \"\"\n prv_code = request.GET['prv_code']\n ct_code = request.GET['ct_select']\n if prv_code is None or len(prv_code.strip()) < 4:\n return HttpResponse(select)\n cities = get_cities(int(prv_code))\n if ct_code is None or len(ct_code) != 6:\n select = \"\"\n else:\n select = \"\"\n for opt in cities:\n if len(ct_code)==6 and int(opt['city_code']) == int(ct_code):\n select += \"\" %(opt['city_code'], opt['show_name'])\n else:\n select += \"\" %(opt['city_code'], opt['show_name'])\n return HttpResponse(select)\n\n################################################\ndef GetTowns(request):\n select = \"\"\n city_code = request.GET['city_code']\n town_code = request.GET['tn_select']\n if city_code is None or len(city_code.strip()) < 4:\n return HttpResponse(select)\n towns = get_towns(int(city_code))\n if town_code is None or len(town_code) != 12:\n select = \"\"\n else:\n select = \"\"\n for opt in towns:\n if len(town_code)==12 and int(opt['town_code']) == int(town_code):\n select += \"\" %(opt['town_code'], opt['show_name'])\n else:\n select += \"\" %(opt['town_code'], opt['show_name'])\n return HttpResponse(select)\n","sub_path":"misc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"364573220","text":"\"\"\"\n10. Regular Expression Matching\n\nGiven an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.\n\n'.' Matches any single character.\n'*' Matches zero or more of the preceding element.\nThe matching should cover the entire input string (not partial).\n\nNote:\n\ns could be empty and contains only lowercase letters a-z.\np could be empty and contains only lowercase letters a-z, and characters like . or *.\nExample 1:\n\nInput:\ns = \"aa\"\np = \"a\"\nOutput: false\nExplanation: \"a\" does not match the entire string \"aa\".\nExample 2:\n\nInput:\ns = \"aa\"\np = \"a*\"\nOutput: true\nExplanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\".\nExample 3:\n\nInput:\ns = \"ab\"\np = \".*\"\nOutput: true\nExplanation: \".*\" means \"zero or more (*) of any character (.)\".\nExample 4:\n\nInput:\ns = \"aab\"\np = \"c*a*b\"\nOutput: true\nExplanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches \"aab\".\nExample 5:\n\nInput:\ns = \"mississippi\"\np = \"mis*is*p*.\"\nOutput: false\n\n# https://leetcode.com/problems/regular-expression-matching/\n# Implement regular expression matching with support for '.' and '*'.\n# '.' Matches any single character.\n# '*' Matches zero or more of the preceding element.\n# The matching should cover the entire input string (not partial).\n\n# Dynamic programming calculation of matrix of whether any prefix of s patches any prefix of p.\n# Time - O(m*n) when m = len(p) and n = len(s)\n# Space - O(m*n)\n\nhttps://github.com/jakehoare/leetcode/blob/master/python_1_to_1000/010_Regular_Expression_Matching.py\n\n\"\"\"\n\nclass Solution:\n\n def is_match(self, s, p):\n\n matched = [[False for _ in range(len(p)+1)] for _ in range(len(s)+1)]\n print(matched)\n matched[0][0] = True\n print(matched)\n\n for i in range(len(s) + 1):\n for j in range(1, len(p) + 1):\n pattern = p[j-1]\n\n if pattern == '.': # dot mathces any last character of\n matched[i][j] = (i != 0 and matched[i-1][j-1])\n\n elif pattern == '*': # either ignore last 2 chars of p, or ignore last char of s provided it\n start = p[j-2] # matches the star char\n matched[i][j] = matched[i][j-2] or (i > 0 and matched[i-1][j] and (start == s[i-1] or start == '.'))\n\n else: # pattern must match the last character of s\n matched[i][j] = (i != 0 and matched[i-1][j-1] and s[i-1] == pattern)\n print(matched)\n return matched[-1][-1]\n\n\n\n\nif __name__ == '__main__':\n\n t = Solution()\n print(t.is_match(\"aa\", \"a\"))\n print(t.is_match(\"aa\", \"a*\"))\n print(t.is_match(\"aab\", \"c*a*b\"))\n print(t.is_match(\"mississippi\", \"mis*is*p*.\"))\n","sub_path":"leetcode/010_Regular_Expression_Matching.py","file_name":"010_Regular_Expression_Matching.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232727813","text":"import logging\n\nfrom jackhammer.cloud.cloud import Cloud\n\n# libcloud\nfrom libcloud.compute.types import Provider\nfrom libcloud.compute.providers import get_driver\n\nlogger = logging.getLogger(\"jackhammer.cloud\")\n\n\nclass GCP(Cloud):\n \"\"\"\n Wrapper around libcloud's GCP NodeDriver. Creates preemptible\n machines, to keep costs low.\n \"\"\"\n\n def __init__(self, uuid, config):\n super().__init__(uuid, config)\n logger.debug(\"Creating GCP node driver\")\n self.compute = get_driver(Provider.GCE)\n self.driver = self.compute(\n self.config['email'],\n self.config['keyPath'],\n project=self.config['project'])\n\n def create_machine(self, name, key):\n logger.debug(\"Creating GCP node\")\n node = self.config['node']\n metadata = node['metadata'] if 'metadata' in node else {}\n metadata['ssh-keys'] = key\n\n storage = self.driver.create_volume(\n node['diskSize'], \n name,\n location=node['zone'],\n image=node['image'])\n\n return self.driver.create_node(\n name,\n node['size'],\n node['image'],\n node['zone'],\n ex_boot_disk=storage,\n ex_tags=node['tags'],\n ex_metadata=metadata,\n ex_preemptible=True)\n\n def list_machines(self):\n return [m for m in self.driver.list_nodes() if self.uuid in m.name]\n\n def list_volumes(self):\n return [v for v in self.driver.list_volumes() if self.uuid in v.name]\n","sub_path":"jackhammer/cloud/gcp.py","file_name":"gcp.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"149505929","text":"import logging, os\nimport json\nfrom kiteconnect import KiteTicker\n\n# from db import insert_ticks\n\nimport time\nimport sqlite3\nfrom datetime import datetime\nfrom dateutil import tz\n\n\nAPI_KEY = os.getenv(\"API_KEY\")\nAPI_SECRET = os.getenv(\"API_SECRET\")\nACCESS_TOKEN = os.getenv(\"ACCESS_TOKEN\")\nDB_FILE = f\"{os.getenv('DB_FILE')}_{datetime.now().strftime('%Y-%m-%d')}.db\"\n\nINSTRUMENT_CSV = os.getenv('INSTRUMENT_CSV')\n\n# CREATE_TICK_TABLE_SCRIPT = f\"\"\"\n# CREATE TABLE `ticks` ( \n# `tick_date`\tDATETIME, \n# `token`\tINTEGER, \n# `price`\tREAL \n# );\n# \"\"\"\n\nCREATE_TICK_TABLE_SCRIPT = f\"\"\"\n CREATE TABLE IF NOT EXISTS ticks ( \n tick_date\tDATETIME, \n token\tINTEGER, \n price\tREAL \n );\n\"\"\"\n\ndef get_instruments():\n import csv\n\n data = []\n with open(INSTRUMENT_CSV, newline='') as f:\n reader = csv.DictReader(f)\n # header = next(reader) # ignore header\n data = [int(row['instrument_token']) for row in reader]\n\n return data \n\n# instruments = [12219650, 12219906]\n# instruments = [ 256265, 260105, 10799362, 10799618, 10799874, 10800130, 10800386, 10800642, 10800898, 10801154, 10801410, 10801666, 10801922, 10802178, 10802434, 10802690, 10802946, 10803714, 10803970, 10804738, 10806786, 10807042, 10809858, 10810114, 10810882, 10811138, 10811394, 10811650, 10811906, 10812162, 10812418, 10812674, 10812930, 10813186, 10813442, 10813698, 10813954, 10814210, 10754562, 10754818, 10756098, 10756354, 11052546, 11052802, 10576898, 10577154, 10577410, 10577666, 10577922, 10578178, 10761986, 10762242, 10936322, 10936578, 10936834, 10937090, 9080834, 9081090, 9081858, 9082114, 9086466, 9086722, 9086978, 9087234, 9087490, 9088258, 9094658, 9094914, 11441154, 11441410, 10420738, 10420994, 10421250, 10421506, 10421762, 10422018, 10423298, 10423554, 10774786, 10775042, 10777602, 10777858 ]\ninstruments = get_instruments()\n\nlogging.basicConfig(level=logging.DEBUG)\n\ndb = sqlite3.connect(DB_FILE)\ncur = db.cursor()\ncur.execute(CREATE_TICK_TABLE_SCRIPT)\n\n# Timezone information for timezone conversion\nutc_tz= tz.gettz('UTC')\nindia_tz= tz.gettz('Asia/Kolkata')\n\ndef get_time_in_timezone(timezone):\n \"\"\"\n Get time in given timezone, assuming starting with UTC.\n source: https://stackoverflow.com/a/53914639\n \"\"\"\n utc = datetime.now()\n # utc = utc.replace(tzinfo=utc_tz)\n time_with_offset = utc.astimezone(timezone)\n time_without_offset = time_with_offset.replace(tzinfo=None)\n return time_without_offset\n\ndef get_IST_time():\n return get_time_in_timezone(india_tz)\n\n# Task to insert to SQLite db\ndef insert_ticks(ticks):\n c = db.cursor()\n qry = \"insert into ticks values \"\n count = 0\n time = get_IST_time()\n\n for tick in ticks:\n # c.execute(f\"insert into ticks values ('{datetime.now()}', {tick['instrument_token']}, {tick['last_price']})\")\n # c.execute(insert_tick_statement, {\n # \"date\": datetime.now(),\n # \"token\": tick[\"instrument_token\"],\n # \"price\": tick[\"last_price\"]})\n if count > 0:\n qry += \", \"\n qry += f\"('{time}', {tick['instrument_token']}, {tick['last_price']})\"\n count += 1\n\n c.execute(qry)\n # logging.debug(\"Inserting ticks to db : {}\".format(json.dumps(ticks)))\n logging.debug(f\"Inserting ticks to db {time}.\")\n\n try:\n db.commit()\n except Exception:\n db.rollback()\n logging.exception(\"Couldn't write ticks to db: \")\n\n# Initialise\n# kws = KiteTicker(\"your_api_key\", \"your_access_token\")\nkws = KiteTicker(API_KEY, ACCESS_TOKEN)\n\ndef on_ticks(ws, ticks):\n # Callback to receive ticks.\n if len(ticks) > 0:\n # on json.dumps\n # TypeError: Object of type datetime is not JSON serializable formatting issue on datetime object \n # http://127.0.0.1:7777/?token=473bbc2c11c1b9b0865b35b31c0ba704c151a06b833abce7\n logging.debug(\"Ticks: {}\".format(json.dumps(ticks, indent=4, default=str)))\n # insert_ticks(ticks)\n\ndef subscribe(ws, instruments):\n ws.subscribe(instruments)\n ws.set_mode(ws.MODE_LTP, instruments)\n\ndef on_connect(ws, response):\n # Callback on successful connect.\n # Subscribe to a list of instrument_tokens (RELIANCE and ACC here).\n # ws.subscribe([738561, 5633])\n logging.debug(f\"ws type: {type(ws)}\")\n # ws.subscribe(instruments[0:1])\n # ws.set_mode(ws.MODE_LTP, instruments[0:1])\n subscribe(ws, instruments[0:1])\n\ndef on_close(ws, code, reason):\n # On connection close stop the main loop\n # Reconnection will not happen after executing `ws.stop()`\n ws.stop()\n\n# Assign the callbacks.\nkws.on_ticks = on_ticks\nkws.on_connect = on_connect\nkws.on_close = on_close\n\nlogging.info(f\"kws type: {type(kws)}\")\n# Infinite loop on the main thread. Nothing after this will run.\n# You have to use the pre-defined callbacks to manage subscriptions.\nkws.connect(threaded=True)\n\nlogging.info(\"This is main thread. Will change webosocket mode every 5 seconds.\")\n\ncount = 1\nwhile True:\n count += 1\n \n if kws.is_connected():\n subscribe(kws, instruments[count:(count+2)])\n\n # if count % 2 == 0:\n # if kws.is_connected():\n # logging.info(\"### Set mode to LTP for all tokens\")\n # kws.set_mode(kws.MODE_LTP, tokens)\n # else:\n # if kws.is_connected():\n # logging.info(\"### Set mode to quote for all tokens\")\n # kws.set_mode(kws.MODE_QUOTE, tokens)\n\n time.sleep(3)","sub_path":"src/options_charts/ticker_dryrun.py","file_name":"ticker_dryrun.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"111092923","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport random\n\ndef is_even_len(string):\n\t\n\t\n\n\treturn len(string)%2 == 0\n\n\ndef get_num_char(string, char):\n\tcompteurChar=0\n\tfor chars in string:\n\t\tif char==chars:\n\t\t\tcompteurChar+=1\n\n\treturn compteurChar\n\n\ndef get_first_part_of_name(name):\n\tpremierNom=''\n\t\n\tfor char in name:\n\t\tif char != '-':\n\t\t\tpremierNom+=char\n\t\telse:\n\t\t\tbreak\n\t\n\tcapitalized = 'Bonjour, '+premierNom.capitalize()\n\n\treturn capitalized\n\n\ndef get_random_sentence(animals, adjectives, fruits):\n\n\n\twords = []\n\n\tfor word_set in (animals, adjectives, fruits):\n\t\twords += [word_set[random.randrange(len(word_set))]]\n\n\tphrase = 'Aujourd’hui, j’ai vu un '+ words[0]+' s’emparer d’un panier '+ words[1] +' plein de '+words[2]+'.'\n\n\n\treturn phrase\n\n\nif __name__ == \"__main__\":\n\tspam = \"Bonjour\"\n\tparity = \"pair\" if is_even_len(spam) else \"impair\"\n\tprint(f\"Le nombre de caractère dans la chaine '{spam}' est {parity}.\")\n\n\teggs = \"Hello, world!\"\n\tprint(f\"Le nombre d'occurrence de l dans '{eggs}' est : {get_num_char(eggs, 'l')}.\")\n\n\tparrot = \"jean-marc\"\n\tprint(f\"Pour {parrot}, on a '{get_first_part_of_name(parrot)}'.\")\n\n\tanimals = (\"chevreuil\", \"chien\", \"pigeon\")\n\tadjectives = (\"rouge\", \"officiel\", \"lourd\")\n\tfruits = (\"pommes\", \"kiwis\", \"bananes\")\n\tprint(get_random_sentence(animals, adjectives, fruits))\n","sub_path":"exercice.py","file_name":"exercice.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"526195768","text":"import numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom lifelines.utils import concordance_index\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom datasets.mgh.summarize_pred import get_statistics\n\nsns.set()\n\n\ndef violin_plot(value, wb_ord, name='value', subsamplings=None, name_suffix='', ax=None):\n num_bootstrap = len(wb_ord)\n if subsamplings is None:\n labels = ['N={}'.format(num_bootstrap) for _ in range(num_bootstrap)]\n values = value - np.array(wb_ord)\n data = pd.DataFrame({'bootstrap{}'.format(name_suffix): labels, name: values})\n else:\n df_list = list()\n for subsampling in subsamplings:\n if subsampling > num_bootstrap:\n continue\n labels = ['N={}'.format(subsampling) for _ in range(subsampling)]\n values = value - np.array(wb_ord[:subsampling])\n df_list.append(pd.DataFrame({'bootstrap{}'.format(name_suffix): labels, name: values}))\n data = pd.concat(df_list)\n\n sns.violinplot(x='bootstrap{}'.format(name_suffix), y=name, data=data, ax=ax)\n\n\ndef ci_rae(checkpoints_dir, suffix='', summary='p_med', subsamplings=(50, 100, 300)):\n bootstrap_dirs = checkpoints_dir.glob('bootstrap_*[0-9]')\n bootstrap_dirs = [path_ for path_ in sorted(bootstrap_dirs) if path_.is_dir()]\n\n w_ord_cis = []\n consistency_cis, test_cis = [], []\n\n w_ord_raes = []\n consistency_raes, test_raes = [], []\n\n stat_list = list()\n for bootstrap_dir in bootstrap_dirs:\n consistency_df = pd.read_csv(bootstrap_dir.joinpath('consistency{}.csv'.format(suffix)), index_col=0)\n consistency_stat = get_statistics(consistency_df)\n consistency_ci = consistency_stat['ci_' + summary]\n consistency_rae = consistency_stat['rae_' + summary]\n\n test_df = pd.read_csv(bootstrap_dir.joinpath('test{}.csv'.format(suffix)), index_col=0)\n test_stat = get_statistics(test_df)\n test_ci = test_stat['ci_' + summary]\n test_rae = test_stat['rae_' + summary]\n\n w_ord_cis.append(consistency_ci - test_ci)\n consistency_cis.append(consistency_ci)\n test_cis.append(test_ci)\n\n w_ord_raes.append(consistency_rae - test_rae)\n consistency_raes.append(consistency_rae)\n test_raes.append(test_rae)\n\n consistency_df = pd.read_csv(checkpoints_dir.joinpath('bootstrap_origin', 'consistency{}.csv'.format(suffix)),\n index_col=0)\n origin_stat = get_statistics(consistency_df)\n origin_ci = origin_stat['ci_' + summary]\n origin_rae = origin_stat['rae_' + summary]\n\n print('CI')\n print(origin_ci - np.median(w_ord_cis))\n print(origin_ci, np.median(w_ord_cis), np.median(consistency_cis), np.median(test_cis))\n\n print('RAE')\n print(origin_rae - np.median(w_ord_raes))\n print(origin_rae, np.median(w_ord_raes), np.median(consistency_raes), np.median(test_raes))\n\n fig, ax = plt.subplots(1, 2, figsize=(15, 5))\n violin_plot(origin_ci, w_ord_cis, name='CI', subsamplings=subsamplings, ax=ax[0])\n violin_plot(origin_rae, w_ord_raes, name='RAE', subsamplings=subsamplings, ax=ax[1])\n\n fig, ax = plt.subplots(1, 2, figsize=(15, 7.5))\n # w, h = plt.figaspect(1.) * 1.5\n # plt.figure(figsize=(w, h))\n for subplot in ax:\n subplot.set_xlim(0, 4500)\n subplot.set_ylim(0, 4500)\n subplot.plot((0, 4500), (0, 4500), linestyle=':')\n sns.scatterplot(x='event_time', y='p_ave', hue='observed', data=consistency_df, ax=ax[0])\n sns.scatterplot(x='event_time', y='p_med', hue='observed', data=consistency_df, ax=ax[1])\n\n\nif __name__ == \"__main__\":\n checkpoints_dir = Path('/media/data02/uemura/git-workspace/cyclegan-and-pix2pix/checkpoints/pix2surv_bootstrap_v20')\n # checkpoints_dir = Path('/media/data01/tuemura/PyCharm_remote/cyclegan-and-pix2pix/checkpoints/pix2surv_bootstrap_v8')\n\n ci_rae(checkpoints_dir)\n plt.show()\n","sub_path":"util/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"181206005","text":"import timeit\n\nimport pyarrow.parquet\n\ndef f(column):\n pyarrow.parquet.read_table(\"fixtures/pyarrow3/basic_nullable_100000.parquet\", columns=[column])\n\nseconds = timeit.Timer(lambda: f(\"int64\")).timeit(number=512) / 512\nmicroseconds = seconds * 1000 * 1000\nprint(\"read u64 100000 time: {:.2f} us\".format(microseconds))\n\nseconds = timeit.Timer(lambda: f(\"string\")).timeit(number=512) / 512\nmicroseconds = seconds * 1000 * 1000\nprint(\"read utf8 100000 time: {:.2f} us\".format(microseconds))\n","sub_path":"parquet_integration/bench_pyarrow.py","file_name":"bench_pyarrow.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"177458555","text":"# Copyright (c) 2020 fortiss GmbH\n#\n# Authors: Patrick Hart\n#\n# This work is licensed under the terms of the MIT license.\n# For a copy, see .\n\nimport unittest\nimport numpy as np\nimport os\nimport gym\nimport matplotlib\nimport time\n\n# BARK imports\nfrom bark.runtime.commons.parameters import ParameterServer\n\n# BARK-ML imports\nfrom bark_ml.environments.blueprints import \\\n DiscreteHighwayBlueprint\nfrom bark_ml.environments.single_agent_runtime import SingleAgentRuntime\nimport bark_ml.environments.gym # pylint: disable=unused-import\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.model_wrapper \\\n import pytorch_script_wrapper\nfrom bark_ml.library_wrappers.lib_fqf_iqn_qrdqn.agent import IQNAgent, FQFAgent, QRDQNAgent\n\n\nclass PyLibraryWrappersPyTorchAgentTests(unittest.TestCase):\n \"\"\"TorchAgentTests tests.\"\"\"\n\n # make sure the agent works\n def test_agent_wrapping(self):\n params = ParameterServer()\n env = gym.make(\"highway-v1\", params=params)\n env.reset()\n agent = IQNAgent(env=env, test_env=env, params=params)\n agent = FQFAgent(env=env, test_env=env, params=params)\n agent = QRDQNAgent(env=env, test_env=env, params=params)\n\n # assign as behavior model (to check if trained agent can be used)\n def test_behavior_wrapping(self):\n # create scenario\n params = ParameterServer()\n bp = DiscreteHighwayBlueprint(params, num_scenarios=10, random_seed=0)\n env = SingleAgentRuntime(blueprint=bp, render=False)\n #env = gym.make(\"highway-v1\", params=params)\n ml_behaviors = []\n ml_behaviors.append(IQNAgent(env=env, test_env=env, params=params))\n ml_behaviors.append(FQFAgent(env=env, test_env=env, params=params))\n ml_behaviors.append(QRDQNAgent(env=env, test_env=env, params=params))\n\n for ml_behavior in ml_behaviors:\n # set agent\n env.ml_behavior = ml_behavior\n env.reset()\n done = False\n while done is False:\n action = np.random.randint(low=0, high=env.action_space.n)\n observed_next_state, reward, done, info = env.step(action)\n print(\n f\"Observed state: {observed_next_state}, Reward: {reward}, Done: {done}\"\n )\n\n # action is set externally\n ml_behavior._set_action_externally = True\n agent_id = list(env._world.agents.keys())[0]\n observed_world = env._world.Observe([agent_id])[0]\n\n # do a random action and plan trajectory\n action = np.random.randint(low=1, high=env.action_space.n)\n ml_behavior.ActionToBehavior(action)\n a = ml_behavior.Plan(0.2, observed_world)\n\n # sample another different random action\n another_action = action\n while another_action == action:\n another_action = np.random.randint(low=1, high=env.action_space.n)\n\n # plan trajectory for the another action\n ml_behavior.ActionToBehavior(another_action)\n b = ml_behavior.Plan(0.2, observed_world)\n\n # the trajectory generated by two different actions shoould be different\n self.assertEqual(np.any(np.not_equal(a, b)), True)\n\n # action will be calculated within the Plan(..) fct.\n ml_behavior._set_action_externally = False\n a = ml_behavior.Plan(0.2, observed_world)\n b = ml_behavior.Plan(0.2, observed_world)\n\n # same trajectory for same state\n np.testing.assert_array_equal(a, b)\n\n def test_agents(self):\n params = ParameterServer()\n params[\"ML\"][\"BaseAgent\"][\"NumSteps\"] = 2\n params[\"ML\"][\"BaseAgent\"][\"MaxEpisodeSteps\"] = 2\n\n bp = DiscreteHighwayBlueprint(params, num_scenarios=10, random_seed=0)\n env = SingleAgentRuntime(blueprint=bp, render=False)\n\n # IQN Agent\n iqn_agent = IQNAgent(env=env, test_env=env, params=params)\n env.ml_behavior = iqn_agent\n self.assertEqual(env.ml_behavior.set_action_externally, False)\n iqn_agent.run()\n self.assertEqual(env.ml_behavior.set_action_externally, True)\n\n # FQF Agent\n fqf_agent = FQFAgent(env=env, test_env=env, params=params)\n env.ml_behavior = fqf_agent\n self.assertEqual(env.ml_behavior.set_action_externally, False)\n fqf_agent.run()\n self.assertEqual(env.ml_behavior.set_action_externally, True)\n\n # QRDQN Agent\n qrdqn_agent = QRDQNAgent(env=env, test_env=env, params=params)\n env.ml_behavior = qrdqn_agent\n self.assertEqual(env.ml_behavior.set_action_externally, False)\n qrdqn_agent.run()\n self.assertEqual(env.ml_behavior.set_action_externally, True)\n\n def test_model_loader(self):\n # env using default params\n env = gym.make(\"highway-v1\")\n\n networks = [\"iqn\", \"fqf\", \"qrdqn\"]\n\n action_space_size = env.action_space.n\n state_space_size = env.observation_space.shape[0]\n\n # a sample random state [0-1] to evaluate actions\n random_state = np.random.rand(state_space_size).tolist()\n\n # test all networks\n for network in networks:\n # Do inference using C++ wrapped model\n model = pytorch_script_wrapper.ModelLoader(\n os.path.join(\n os.path.dirname(__file__),\n \"lib_fqf_iqn_qrdqn_test_data/{}/online_net_script.pt\"\n .format(network)), action_space_size, state_space_size)\n model.LoadModel()\n\n num_iters = 1000 # Number of times to repeat experiment to calcualte runtime\n\n # Time num_iters iterations for inference using C++ model\n start = time.time()\n for _ in range(num_iters):\n actions_cpp = model.Inference(random_state)\n end = time.time()\n time_cpp = end - start # todo - how to analyze python vs cpp test time in tests?\n\n # Load and perform inference using python model\n if network == \"iqn\":\n agent = IQNAgent(env=env, test_env=env, params=ParameterServer())\n\n elif network == \"fqf\":\n agent = FQFAgent(env=env, test_env=env, params=ParameterServer())\n\n elif network == \"qrdqn\":\n agent = QRDQNAgent(env=env, test_env=env, params=ParameterServer())\n\n agent.load_models(\n os.path.join(\n os.path.dirname(__file__),\n \"lib_fqf_iqn_qrdqn_test_data\",\n network))\n\n # Time num_iters iterations for inference using python model\n start = time.time()\n for _ in range(num_iters):\n actions_py = agent.calculate_actions(random_state)\n\n end = time.time()\n time_py = end - start\n\n # assert that Python and Cpp models are close enough to 6 decimal places\n np.testing.assert_array_almost_equal(\n actions_py.flatten().numpy(),\n np.asarray(actions_cpp),\n decimal=6,\n err_msg=\"C++ and python models don't match\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"bark_ml/tests/py_library_torch_agents_tests.py","file_name":"py_library_torch_agents_tests.py","file_ext":"py","file_size_in_byte":6609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"454385913","text":"import sys\nimport os\n\nimport cv2\nimport sklearn\nimport sklearn.model_selection\nimport numpy as np\n\nprint('[LOG] Loading labels')\nf = open('pylabels.txt', 'r')\nlns = f.readlines()\nf.close()\n\nsim_labels = eval(lns[0])\n# print('Sim labels', sim_labels)\n\nf = None\nlns = None\n\nprint('[LOG] Loading all images into memory')\n\ndef naive_middle_split(img):\n vertsum = img.sum(axis=0)\n return (sum(vertsum[0:25]), sum(vertsum[25:50]))\n\ndef highest_lr_calc(img):\n left = np.asarray([(img[i] < 50).sum() for i in range(0,25)])\n right = np.asarray([(img[i] < 50).sum() for i in range(25,50)])\n return (49 - np.argmax(left > 1), 49 - np.argmax(right > 1))\n\navg_row_weights = np.arange(1,51)\ndef avg_row_dark(img):\n def calc_row(row):\n return (row*avg_row_weights).sum() / row.sum()\n tf = np.max(img) - img\n return np.asarray([calc_row(r) for r in tf])\n\ndef label_to_cat(lbl):\n if lbl == [1,0]:\n return 0\n if lbl == [0,1]:\n return 1\n else:\n return 2 # neither exclusively left nor right\n\nmiddle_split_data = []\nhighest_lr_data = []\navg_dark_data = []\nlabels = []\n\nfor sim_no in range(1,500):\n print('Loading: sim', sim_no)\n category = label_to_cat(sim_labels[sim_no-1])\n if category == 2:\n continue\n\n for step in range(1,200):\n img = cv2.imread(\n 'processed/{}/{}.jpg'.format(sim_no,step),\n cv2.IMREAD_GRAYSCALE)\n img = img.reshape(50,50)\n\n middle_split_data.append(naive_middle_split(img))\n highest_lr_data.append(highest_lr_calc(img))\n avg_dark_data.append(avg_row_dark(img))\n labels.append(category)\n\n# convert everything into numpy-land\nmiddle_split_data = np.asarray(middle_split_data)\nhighest_lr_data = np.asarray(highest_lr_data)\navg_dark_data = np.asarray(avg_dark_data)\nlabels = np.asarray(labels)\n\n(train_x1, test_x1, train_y1, test_y1) = sklearn.model_selection.train_test_split(\n middle_split_data, labels, test_size=0.2, random_state=42)\n(train_x2, test_x2, train_y2, test_y2) = sklearn.model_selection.train_test_split(\n highest_lr_data, labels, test_size=0.2, random_state=42)\n(train_x3, test_x3, train_y3, test_y3) = sklearn.model_selection.train_test_split(\n avg_dark_data, labels, test_size=0.2, random_state=42)\n\nimport sklearn.naive_bayes\nimport sklearn.tree\nmiddle_split_model = sklearn.naive_bayes.GaussianNB()\nhighest_lr_model = sklearn.naive_bayes.GaussianNB()\navg_dark_model = sklearn.tree.DecisionTreeClassifier()\n\nmiddle_split_model = middle_split_model.fit(train_x1, train_y1)\nhighest_lr_model = highest_lr_model.fit(train_x2, train_y2)\navg_dark_model = avg_dark_model.fit(train_x3, train_y3)\n\n# eval_result = classifier.evaluate(\n# test_x, test_y,\n# batch_size = BATCH_SIZE)\n\n# print('Evaluation result:', eval_result)\n","sub_path":"modelpy/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"328132882","text":"import django_filters\nfrom django_filters import DateFilter, CharFilter, TimeFilter, ChoiceFilter\n\nfrom .models import *\nSTATUS_CHOICES = (\n ('Female', 'Female'),\n ('Male', 'Male'),\n ('Either', 'Either'),\n)\nclass AdvertisementsOwnFilter(django_filters.FilterSet):\n\n gender = ChoiceFilter(choices=STATUS_CHOICES)\n\n class Meta:\n model = Advertisements\n fields = {\n 'sport':['icontains'],\n 'days':['icontains'],\n 'LatestTime':['icontains'],\n 'notes':['icontains'],\n\n }\n \n","sub_path":"accounts/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233867773","text":"from django.db import models\nfrom redactor.fields import RedactorField\n\nclass Program(models.Model):\n PROGRAMS = (\n ('BA', 'Business Administration'),\n ('LNG', 'Linguistics'),\n ('IT', 'Information Technologies'),\n ('CHN', 'Chinese'),\n ('IR', 'International Relations'),\n ('ECO', 'Economics'),\n ('PED', 'Pedagogics'),\n ('LAW', 'Law'),\n ('GEN', 'General Education'))\n program = models.CharField(max_length=3, choices=PROGRAMS)\n description = RedactorField(verbose_name=\"Program's description\")\n\n def __unicode__(self):\n return self.get_program_display()","sub_path":"apps/programs/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"27417482","text":"import smach\nimport rospy\n\nfrom butia_behavior.states import PublisherBoolState, WaitTopicBoolState, WaitTimeState\n\ndef getCloseGripperMachine():\n sm = smach.StateMachine(outcomes=['succeeded', 'error'])\n with sm:\n smach.StateMachine.add(\n 'CLOSE_GRIPPER',\n PublisherBoolState('/butia_manipulation_arm_gripper/close', True),\n transitions={\n 'succeeded': 'WAIT_CLOSE_GRIPPER',\n 'error': 'error'\n }\n )\n smach.StateMachine.add(\n 'WAIT_CLOSE_GRIPPER',\n WaitTopicBoolState('butia_manipulation_arm_gripper/close/finished'),\n transitions={\n 'succeeded': 'succeeded',\n 'error': 'error'\n }\n )\n return sm\n","sub_path":"src/butia_behavior/machines/close_gripper.py","file_name":"close_gripper.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"517945786","text":"import xml.etree.ElementTree as et\n\ndef ReturnExtantOrCreateElement(root, child):\n \"\"\"\n Returns a 2-tuple (nodeList, created) where nodeList is either a list of\n extant nodes or a single element list containing the newly created node\n and created is a bool which is True if the node was created. \n \"\"\"\n extant = root.findall(child)\n if len(extant) == 0:\n newNode = et.SubElement(root, child)\n return ([newNode], True)\n else:\n return (extant, False)","sub_path":"xml_funcs.py","file_name":"xml_funcs.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"513039751","text":"from django.contrib import messages\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.shortcuts import render, redirect, get_object_or_404\r\n\r\nfrom .forms import MovimentacaoForm\r\nfrom .models import Movimentacao\r\n\r\n# Create your views here.\r\n\r\n\r\n@login_required\r\ndef lista_movimentacoes(request):\r\n template_name = 'movimentacoes/lista_movimentacoes.html'\r\n movimentacoes = Movimentacao.objects.filter(usuario=request.user)\r\n context = {\r\n 'movimentacoes': movimentacoes,\r\n }\r\n return render(request, template_name, context)\r\n\r\n\r\n@login_required\r\ndef nova_movimentacao(request):\r\n template_name = 'movimentacoes/nova_movimentacao.html'\r\n context = {}\r\n if request.method == 'POST':\r\n form = MovimentacaoForm(data=request.POST, usuario=request.user)\r\n if form.is_valid():\r\n f = form.save(commit=False)\r\n f.usuario = request.user\r\n f.save()\r\n messages.success(request, 'Movimentação salva com sucesso')\r\n return redirect('movimentacoes:lista_movimentacoes')\r\n else:\r\n form = MovimentacaoForm(request.POST)\r\n context['form'] = form\r\n else:\r\n form = MovimentacaoForm(usuario=request.user)\r\n context['form'] = form\r\n return render(request, template_name, context)\r\n\r\n\r\n@login_required\r\ndef editar_movimentacao(request, pk):\r\n template_name = 'movimentacoes/nova_movimentacao.html'\r\n context = {}\r\n movimentacao = get_object_or_404(Movimentacao, pk=pk) # Movimentacao.objects.get(pk=pk)\r\n if request.method == 'POST':\r\n form = MovimentacaoForm(data=request.POST, instance=movimentacao, usuario=request.user)\r\n if form.is_valid():\r\n form.save()\r\n messages.success(request, 'Movimentação alterada com sucesso.')\r\n return redirect('movimentacoes:lista_movimentacoes')\r\n else:\r\n form = MovimentacaoForm(instance=movimentacao, usuario=request.user)\r\n context['form'] = form\r\n else:\r\n form = MovimentacaoForm(instance=movimentacao, usuario=request.user)\r\n context['form'] = form\r\n return render(request, template_name, context)\r\n\r\n\r\n@login_required\r\ndef apagar_movimentacao(request, pk):\r\n movimentacao = get_object_or_404(Movimentacao, pk=pk)\r\n movimentacao.delete()\r\n return redirect('movimentacoes:lista_movimentacoes')\r\n","sub_path":"apps/movimentacoes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"299387244","text":"from Edit_Distance_Formula import editDistDP\r\nimport time, sys\r\nimport random\r\n\r\nlst_of_str = ['The Ansi escape codes let you set the color of the text-background the same way it lets you set the color of the foreground.\\n',\r\n 'Matt Haig singles out LOL as one of the three most popular initialisms in Internet slang, alongside BFN (\"bye for now\") and IMHO (\"in my honest/humble opinion\"). \\nHe describes the various initialisms of Internet slang as convenient, but warns that \"as ever more obscure acronyms emerge they can also be rather confusing\".\\n'\r\n ]\r\n\r\n#type_str = 'The Ansi escape codes let you set the color of the text-background the same way it lets you set the color of the foreground.\\n'\r\ntype_str = random.choice(lst_of_str)\r\nstr_check = 0\r\na = ''\r\nchar_count = 0\r\nmistake_count = 0\r\n\r\nfor i in type_str:\r\n if i != '\\n':\r\n a += i\r\n char_count += 1\r\n\r\nscore_set = 100/char_count\r\n\r\nsys.stdout.write('\\r' + format('This is a typing game/test. It will test how accurately and fast you type.'))\r\ntime.sleep(5)\r\nsys.stdout.flush()\r\nsys.stdout.write('\\r' + 'Once the game shows you the paragraph, you will have 3 seconds to read the paragraph. Get ready!')\r\ntime.sleep(5)\r\nsys.stdout.flush()\r\nsys.stdout.write('\\r')\r\n\r\nsys.stdout.write('\\r' + format(type_str))\r\nfor i in range(1, 4):\r\n sys.stdout.write('\\r' + str(i))\r\n time.sleep(1)\r\n sys.stdout.flush()\r\nsys.stdout.flush()\r\nsys.stdout.write('\\r')\r\n\r\nkl = time.time()\r\nuser_ = input('')\r\nkp = time.time()\r\nsys.stdout.flush()\r\nsys.stdout.flush()\r\nsys.stdout.write('\\r')\r\nsys.stdout.write('\\rProcessing')\r\nsys.stdout.flush()\r\ntime.sleep(0.8)\r\nsys.stdout.write('\\rProcessing.')\r\nsys.stdout.flush()\r\ntime.sleep(0.8)\r\nsys.stdout.write('\\rProcessing..')\r\nsys.stdout.flush()\r\ntime.sleep(0.8)\r\nsys.stdout.write('\\rProcessing...')\r\nsys.stdout.flush()\r\ntime.sleep(1)\r\nsys.stdout.write('\\r')\r\n\r\nmistake_count += editDistDP(user_, a, len(user_), len(a))\r\n\r\ntimel = 'Time Taken: ' + str(round(kp - kl)) + ' seconds'\r\nif round(100 - score_set * mistake_count) >= 0:\r\n acc = 'Accuracy: ' + str(round(100 - score_set * mistake_count, 2)) + '%'\r\nelse:\r\n acc = 'Accuracy: 0.0%'\r\nprint(timel)\r\nprint(acc)","sub_path":"TypingGame.py","file_name":"TypingGame.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"576793329","text":"\"\"\"Module to train the classifiers for RSSI Localization\"\"\"\n# pylint: disable = C0103, W0201, C0111, R0913\nfrom __future__ import print_function\n\nimport argparse\nimport datetime\nimport time\nimport os\nimport pickle\nimport tensorflow as tf\nfrom sklearn.metrics import accuracy_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import Adam\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nfrom process_data import load_data, get_presence_matrix, get_labels, \\\n column_normalize_values, global_normalize_values\n\nclass Localizer(object):\n \"\"\"\n Localizer Network.\n \"\"\"\n def __init__(self, restore, y_dim, x_shape=(520,), arch_type=1, load_weight=False):\n if arch_type == 1:\n self.hidden_units = [256, 128, 32]\n elif arch_type == 2:\n self.hidden_units = [1024, 512]\n elif arch_type == 3:\n self.hidden_units = [256, 256]\n else:\n raise ValueError('arch_type (%d) should be 1, 2, or 3.' % arch_type)\n\n self.model_path = restore\n self.x_shape = x_shape\n self.y_dim = y_dim\n\n if y_dim > 16 and self.hidden_units[-1] <= 32:\n self.hidden_units[-1] = 64\n\n model = Sequential()\n for layer_idx, layer_size in enumerate(self.hidden_units):\n if layer_idx == 0:\n model.add(Dense(layer_size, activation='relu', input_shape=x_shape))\n else:\n model.add(Dense(layer_size, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(y_dim))\n\n self.model = model\n if load_weight:\n self.model.load_weights(self.model_path)\n\n def predict(self, data):\n \"\"\" wrapper function for prediction \"\"\"\n return self.model(data)\n\ndef compile_test_model(model):\n \"\"\"\n Compiling a pre-trained classifier for testing purposes.\n Do not use for training\n \"\"\"\n def loss_fn(y_true, y_pred):\n \"\"\"Loss function to train the classifier\"\"\"\n return tf.nn.softmax_cross_entropy_with_logits(\n labels=y_true, logits=y_pred)\n model.compile(\n loss=loss_fn,\n optimizer=Adam(),\n metrics=['accuracy'])\n return model\n\ndef train(\n model_path,\n train_data,\n train_label,\n test_data,\n test_label,\n num_epochs=100,\n batch_size=128,\n learning_rate=1e-3,\n model_type=1,\n verbose=1):\n \"\"\"Trains a keras model. Saves it if path is given. Returns the trained model\"\"\"\n\n if len(train_label.shape) == 1:\n num_classes = np.unique(train_label).shape[0]\n train_label = np.eye(num_classes)[train_label]\n test_label = np.eye(num_classes)[test_label]\n else:\n num_classes = np.unique(train_label.argmax(axis=1)).shape[0]\n\n model = Localizer(model_path, y_dim=num_classes, arch_type=model_type).model\n\n def loss_fn(y_true, y_pred):\n \"\"\"Loss function to train the classifier\"\"\"\n return tf.nn.softmax_cross_entropy_with_logits(\n labels=y_true, logits=y_pred)\n\n model.compile(\n loss=loss_fn,\n optimizer=Adam(lr=learning_rate),\n metrics=['accuracy'])\n\n if test_data is not None:\n model.fit(\n train_data, train_label,\n batch_size=batch_size,\n validation_data=(test_data, test_label),\n epochs=num_epochs,\n shuffle=True,\n verbose=verbose)\n else:\n model.fit(\n train_data, train_label,\n batch_size=batch_size,\n epochs=num_epochs,\n shuffle=True,\n verbose=verbose)\n\n if model_path is not None:\n if model_type > 1:\n print('Saving with model type: %d and name: %s' % \\\n (model_type, model_path + ('_%d' % model_type)))\n model.save(model_path + ('_%d' % model_type))\n else:\n print('Saving with model type: %d and name: %s' % (model_type, model_path))\n model.save(model_path)\n\n return model\n\ndef normalize_data(train_data, test_data, normalize_type):\n # Normalizing the data\n if normalize_type == 'seen':\n train_data = get_presence_matrix(train_data, sparse=False)\n test_data = get_presence_matrix(test_data, sparse=False)\n elif normalize_type == 'column':\n train_data = column_normalize_values(train_data, sparse=False)\n test_data = column_normalize_values(test_data, sparse=False)\n elif normalize_type == 'global':\n train_data = global_normalize_values(train_data, sparse=False)\n test_data = global_normalize_values(test_data, sparse=False)\n elif normalize_type != 'none':\n raise ValueError('Unrecognized normalize value %s.' + \\\n 'It should be either none/seen/global/column.' % normalize_type)\n\n return train_data, test_data\n\ndef main():\n # Parsing arguments\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--path', type=str, default=\"models/A_building\",\n help=\"path to save trained model. e.g.: 'models/A_building'\")\n parser.add_argument('--type', type=str, default='building',\n help=\"(building/floor/cluster_x(2 <= x <= 8)/all Type of class.\")\n parser.add_argument('--data_path', type=str, default='data/A.npz',\n help='Dataset path.')\n parser.add_argument('--normalize', type=str, default='seen',\n help='Type of normalization (none/seen/column/global)')\n parser.add_argument('--learning_rate', type=float, default=1e-3,\n help='Learning rate')\n parser.add_argument('--num_epochs', type=int, default=100,\n help='Number of training epochs.')\n parser.add_argument('--dropout', type=float, default=0.5,\n help='Dropout probability.')\n parser.add_argument('--arch_type', type=int, default=1, help='Architecture to use.')\n\n options = parser.parse_known_args()[0]\n\n if options.type == 'all':\n data = dict()\n data_root = 'data/'\n model_root = 'models/'\n cols = dict()\n # Reading all data\n for name in ['A', 'B', 'C']:\n data[name] = dict()\n data[name]['train_data'], data[name]['test_data'], \\\n data[name]['train_label'], data[name]['test_label'], cols = \\\n load_data(data_root + name + '.npz')\n data[name]['train_data'], data[name]['test_data'] = \\\n normalize_data(\n data[name]['train_data'],\n data[name]['test_data'],\n options.normalize)\n\n acc = dict()\n for model_type in ['floor'] + [('cluster_%d' % c) for c in range(2, 9)]:\n acc[model_type] = dict()\n for data_name in ['A', 'B', 'C']:\n acc[model_type][data_name] = dict()\n model_path = model_root + data_name + '_' + model_type\n\n cur_train_label = get_labels(data[data_name]['train_label'], cols, model_type)\n cur_test_label = get_labels(data[data_name]['test_label'], cols, model_type)\n model = train(\n model_path,\n data[data_name]['train_data'], cur_train_label,\n data[data_name]['test_data'], cur_test_label,\n learning_rate=options.learning_rate,\n num_epochs=options.num_epochs)\n\n acc[model_type][data_name]['train'] = model.evaluate(\n data[data_name]['train_data'], cur_train_label, verbose=0)[1]\n\n acc[model_type][data_name]['test'] = model.evaluate(\n data[data_name]['test_data'], cur_test_label, verbose=0)[1]\n\n for other_name in ['A', 'B', 'C']:\n if data_name == other_name:\n continue\n\n other_data = np.concatenate((\n data[other_name]['train_data'],\n data[other_name]['test_data']))\n\n other_label = np.concatenate((\n data[other_name]['train_label'],\n data[other_name]['test_label']))\n\n other_label = get_labels(other_label, cols, model_type)\n\n acc[model_type][data_name][other_name] = model.evaluate(\n other_data, other_label, verbose=0)[1]\n\n for model_type, model_acc in acc.items():\n print(model_type)\n for data_name, model_data_acc in model_acc.items():\n print('\\t', data_name, model_data_acc)\n\n time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n with open('acc_%s.pkl' % time_stamp, 'w') as acc_file:\n pickle.dump(acc, acc_file)\n else:\n train_data, test_data, train_label, test_label, cols = load_data(options.data_path)\n train_data, test_data = normalize_data(train_data, test_data, options.normalize)\n train_label = get_labels(train_label, cols, options.type)\n test_label = get_labels(test_label, cols, options.type)\n\n model = train(\n options.path, train_data, train_label, test_data, test_label,\n learning_rate=options.learning_rate,\n num_epochs=options.num_epochs,\n model_type=options.arch_type)\n\n for other in ['A', 'B', 'C']:\n if other in options.data_path:\n continue\n\n other_data_tr, other_data_ts, other_label_tr, other_label_ts, _ = \\\n load_data('data/%s.npz' % other)\n other_data = np.concatenate(\n normalize_data(other_data_tr, other_data_ts, options.normalize))\n other_label = np.concatenate((\n get_labels(other_label_tr, cols, options.type),\n get_labels(other_label_ts, cols, options.type)))\n\n print(other, model.evaluate(other_data, other_label, verbose=0)[1])\n\nif __name__ == '__main__':\n main()\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":10076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"151468344","text":"from django import forms\nfrom .models import Blog\n\nclass BlogForm(forms.ModelForm):\n class Meta:\n model = Blog\n \n\n fields = '__all__' \n exclude = ['pub_date']\n \n widgets = { # name 필드 위젯을 재정의, Textinput 태그, class는 test 지정\n 'title' : forms.TextInput(attrs={'type' : 'title','class' : 'form-control'}),\n 'body' : forms.Textarea(attrs={'class' : 'form-control'}),\n }","sub_path":"blog/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"109299039","text":"#!/usr/bin/env python\n# Filename: getdata.py\n\n\ndef main():\n\n lethal = set()\n viable = set()\n with open('data/phenotype.txt', 'r') as f:\n for line in f:\n line = line.strip().split('\\t')\n phenotype_id = line[12]\n genotype_id = line[0]\n if phenotype_id == 'PATO:0000718':\n lethal.add(genotype_id)\n else:\n viable.add(genotype_id)\n\n viable -= lethal\n\n # Reading phenotypes of zebrafish genes\n lethal_genes = set()\n viable_genes = set()\n with open('data/phenoGeneCleanData.txt', 'r') as f:\n for line in f:\n line = line.strip().split('\\t')\n gene_id = line[2] + ' ' + line[1]\n genotype_id = line[18]\n if genotype_id in lethal:\n lethal_genes.add(gene_id)\n elif genotype_id in viable:\n viable_genes.add(gene_id)\n lv_genes = set.intersection(lethal_genes, viable_genes)\n lethal_genes -= lv_genes\n viable_genes -= lv_genes\n with open('data/obtained/lethal.txt', 'w') as f:\n for gene_id in lethal_genes:\n f.write(gene_id + '\\n')\n\n with open('data/obtained/viable.txt', 'w') as f:\n for gene_id in viable_genes:\n f.write(gene_id + '\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"get_gene_data.py","file_name":"get_gene_data.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"135662372","text":"'''\n输入两个整数序列,第一个序列表示栈的压入顺序,请判断第二个序列是否可能为该栈的弹出顺序。\n假设压入栈的所有数字均不相等。例如序列1,2,3,4,5是某栈的压入顺序,\n序列4,5,3,2,1是该压栈序列对应的一个弹出序列,\n但4,3,5,1,2就不可能是该压栈序列的弹出序列。(注意:这两个序列的长度是相等的)\n'''\nclass Solution:\n def IsPopOrder(self, pushV, popV):\n stack = []\n if len(pushV)==0 or len(popV)==0:\n return False\n\n\n while popV:\n if pushV and pushV[0] == popV[0]:\n pushV.pop(0)\n popV.pop(0)\n elif stack and stack[-1] == popV[0]:\n stack.pop(-1)\n popV.pop(0)\n elif pushV:\n stack.append(pushV.pop(0))\n else:\n return False\n return True\n\nprint(Solution().IsPopOrder([1,2,3,4,5],[4,3,5,1,2]))\n\n\n\n\n","sub_path":"jianzhi_Offer/17. 是否为栈的弹出序列.py","file_name":"17. 是否为栈的弹出序列.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"224695588","text":"import json\nimport math\nimport sys\nfrom random import shuffle\n\n\n# top, right, bottom, left\ntest1 = '0,[0, 12, 2, 18]; 1,[0, 7, 6, 19]; 2,[5, 0, 0, 19]; 3,[6, 2, 9, 10]; 4,[14, 0, 5, 10]; 5,[7, 12, 0, 0]; 6,[0, 0, 18, 7]; 7,[0, 17, 9, 7]; 8,[0, 0, 14, 17]'\nsolution1 = '2,2; 1,0; 6,0; 4,2; 3,0; 0,1; 8,2; 7,2; 5,3'\n\n\nclass Cube(object):\n def __init__(self, s):\n o = json.loads('[%s]' % s)\n self._id = o[0]\n self._values = o[1]\n self._orig_values = self._values[:]\n self._rotate = 0\n self._used = False\n\n def __str__(self):\n return '%d,[%s]' % (self._id, ','.join([str(x) for x in self._values]))\n\n def show(self):\n return '%d,%d' % (self._id, self._rotate)\n\n def show1(self):\n return '%d' % self._id\n\n def zeros(self):\n return len([x for x in self._values if x == 0])\n\n def value(self, direction):\n try:\n return self._values[direction]\n except:\n pass\n return None\n\n def opposite(self, direction):\n return self._values[(direction + 2) % 4]\n\n def use(self):\n if self._used:\n return False\n self._used = True\n return True\n\n def release(self):\n self._used = False\n self._values = self._orig_values[:]\n self._rotate = 0\n\n def rotate(self):\n if self._rotate == 3:\n return False\n self._values = self._values[3:] + self._values[:3]\n self._rotate = (self._rotate + 1) % 4\n return True\n\n\nclass Cell(object):\n def __init__(self, neighbors=None):\n self._neighbors = [None] * 4\n self.place(neighbors)\n self._x = -1\n self._y = -1\n self._cubes = None\n self._tested = []\n self._cube = -1\n\n def indices(self, x, y):\n self._x = x\n self._y = y\n\n def place(self, neighbors):\n if neighbors:\n self._neighbors = neighbors # top, right, bottom, left\n\n def zeros(self):\n return len([x for x in self._neighbors if x is None])\n\n def set_options(self, cubes):\n self._cubes = cubes\n self._tested = [False for c in self._cubes]\n\n def __len__(self):\n return len(self._cubes)\n\n def get_cube(self):\n if self._cube in range(len(self._cubes)):\n return self._cubes[self._cube]\n return None\n\n def get_connected(self, direction):\n if self._neighbors[direction] is None:\n return 0\n if self._neighbors[direction].get_cube() is None:\n return -1\n return self._neighbors[direction].get_cube().opposite(direction)\n\n def clear(self):\n if self._cube >= 0:\n self.get_cube().release()\n self._cube = -1\n\n def check_match(self):\n for d in range(4):\n op = self.get_connected(d)\n # if op < 0:\n # return True\n if self.get_cube().value(d) != op:\n return False\n return True\n\n def check_match_existing(self):\n for d in range(4):\n op = self.get_connected(d)\n if op < 0:\n continue\n if self.get_cube().value(d) != op:\n return False\n return True\n\n def check_match_frame(self, include_neighbors):\n for d in range(4):\n if self._neighbors[d] is None:\n if self.get_cube().value(d) != 0:\n return False\n elif include_neighbors:\n if self._neighbors[d].zeros() > 0:\n if self.get_cube().value(d) != self.get_connected(d):\n return False\n return True\n\n def try_next(self):\n if self.get_cube():\n if self.zeros() == 0:\n if self.get_cube().rotate():\n return True\n self.clear()\n for self._cube in range(len(self._cubes)):\n if self._tested[self._cube]:\n continue\n if self.get_cube().use():\n self._tested[self._cube] = True\n if self._x == 1 and self._y == 0:\n pass\n if self.turn_to_match():\n return True\n self.get_cube().release()\n self._cube = -1\n self._tested = [False for c in self._cubes]\n return False\n\n def turn_to_match_frame(self):\n while not self.check_match_frame(False):\n if not self.get_cube().rotate():\n break\n return self.check_match_frame(False)\n\n def turn_to_match(self):\n while not self.check_match_existing():\n if not self.get_cube().rotate():\n break\n return self.check_match_existing()\n\n\nclass Board(object):\n def __init__(self, cubes):\n self._size = int(math.sqrt(len(cubes)))\n self._board = [[None]] * self._size\n self._sorted_cells = []\n self._checks = 0\n for y in range(self._size):\n self._board[y] = [None] * self._size\n for x in range(self._size):\n cell = Cell()\n cell.indices(x, y)\n self._board[y][x] = cell\n self._sorted_cells.append(cell)\n\n for y in range(self._size):\n for x in range(self._size):\n neighbors = [self._board[y-1][x] if y-1 >= 0 else None,\n self._board[y][x+1] if x+1 < self._size else None,\n self._board[y+1][x] if y+1 < self._size else None,\n self._board[y][x-1] if x-1 >= 0 else None]\n self._board[y][x].place(neighbors)\n self.set_options(cubes)\n #self._cells = sorted(self._sorted_cells, key=lambda c: len(c))\n self._cells = self._sorted_cells[::]\n\n def set_options(self, cubes):\n corners = [c for c in cubes if c.zeros() == 2]\n frame = [c for c in cubes if c.zeros() == 1]\n inner = [c for c in cubes if c.zeros() == 0]\n\n for y in range(self._size):\n for x in range(self._size):\n if (y == 0 or y == self._size-1) and (x == 0 or x == self._size-1):\n self._board[y][x].set_options(corners)\n elif (y == 0 or y == self._size-1) or (x == 0 or x == self._size-1):\n self._board[y][x].set_options(frame)\n else:\n self._board[y][x].set_options(inner)\n\n def __str__(self):\n s = ''\n for l in self._board:\n for cell in l:\n s += '[%s], ' % str(cell.get_cube())\n s += '\\n'\n return s\n\n def show(self):\n s = '[%d] ' % self._checks\n for c in self._sorted_cells:\n s += '%s; ' % (c.get_cube().show() if c.get_cube() else 'N')\n return s\n\n def show1(self):\n s = ''\n for c in self._sorted_cells:\n s += '%s ' % (c.get_cube().show1() if c.get_cube() else 'N')\n return s\n\n def show2(self):\n s = ''\n for c in self._sorted_cells:\n s += '%s ' % (str(c.get_cube()) if c.get_cube() else 'N')\n return s\n\n def check(self):\n self._checks += 1\n sys.stdout.write('\\n' + self.show1())\n sys.stdout.flush()\n for c in self._cells:\n if c.get_cube() is None:\n return False\n for c in range(len(self._cells)):\n r = self._cells[c].check_match()\n if not r:\n return False\n return True\n\n def solve(self, i):\n if i in range(len(self._cells)):\n while self._cells[i].try_next():\n if self.check():\n return True\n if self.solve(i+1):\n return True\n return False\n\n\ndef main():\n cubes = []\n if len(sys.argv) > 1:\n with open(sys.argv[1]) as f:\n data = f.read().replace('\\n', '')\n else:\n data = test1\n\n for cube in data.rstrip().split(';'):\n if cube:\n cubes.append(Cube(cube))\n\n board = Board(cubes)\n if board.solve(0):\n print('DONE!')\n print(board.show()[:-2])\n else:\n print('Failed')\n\n\nif __name__ == '__main__':\n main()","sub_path":"2018/Puzzle/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":8183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"64597857","text":"import PyPDF2\r\nimport optparse\r\n#to search the specific data\r\nfrom PyPDF2 import PdfFileReader\r\n\r\ndef printMeta(filename):\r\n #the result is the tuple\r\n pdfFile=PdfFileReader(filename,'rb')\r\n docInfo=pdfFile.getDocumentInfo()\r\n print(\"[*] PDF MetaData For:\"+str(filename))\r\n for metaItem in docInfo:\r\n print(\"[+] \"+metaItem+\":\"+docInfo[metaItem])\r\n\r\ndef main():\r\n parser=optparse.OptionParser(\"usage %prog\"+\"-F \")\r\n parser.add_option(\"-F\",dest=\"Filename\",type=\"string\",help=\"Specify the filename\")\r\n (options,args)=parser.parse_args()\r\n filename=options.Filename\r\n if filename==None:\r\n print(parser.usage)\r\n exit(0)\r\n else:\r\n printMeta(filename)\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n","sub_path":"pdf_info.py","file_name":"pdf_info.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"424021282","text":"# -- coding: utf-8 --\nfrom subprocess import call\nimport requests\nimport urllib2\nfrom subprocess import Popen, PIPE\nimport re \nimport datetime\n\ndef run(url):\n\t\n\tp = Popen([\"timeout\",\"20s\",\"ffprobe\", url], stdout=PIPE, stderr=PIPE)\n\t# \"-v\",\"error\",\n\tstdout, stderr = p.communicate()\n\ttest=re.findall(',\\s.*kb/s',stderr)\n\ttry:\n\n\t\ttest[0].split(\" \")[-2]\n\texcept IndexError:\n\t\treturn \"bad\"\n\telse:\n\t\ttest=test[0].split(\" \")[-2]\n\t\tif int(test)>50 and \"Unsupported codec\" not in stderr:\n\t\t\treturn \"good\"\n\t\telse: \n\t\t\treturn \"bad\"\n\t\ndef getm3(url=\"http://iptv.slynet.tv/FreeSlyNet.m3u\"):\n\tm3=open(\"forTest.m3u\")\n\t# m3=urllib2.urlopen(url)\n\tm3=m3.read()\t\n\tout=m3.split(\"\\n#EXTINF:-1,\")\n\tout=[i.split(\"\\n\") for i in out if \"PremiumSlyNet\" not in i]\n\tout=[i for i in out if len(i)==2]\n\tout=[i for i in out if \"===\" not in i[0]]\n\tgood={}\t \n\tfor i in out:\n\t\ttest=run(i[1].split(\" \")[0])\n\t\tprint(test)\n\t\tprint(i[1].split(\" \")[0])\n\t\tif test==\"good\":\n\t\t\tgood[i[0]]=i[1]\n\n\tf=open('nStream.xml','w')\n\tf.write('\\n\\nIPTV\\n\\n')\n\tf.close()\n\tf=open('nStream.xml','a')\n\tfor i in good:\n\t\tf.write(\"\\n<![CDATA[%s]]>\\n\\n\\n\\n\\n\" % (i,good[i])) \n\tf.close()\n\n\t# call([\"git\",\"add\",\".\"])\n\t# call([\"git\",\"commit\",\"-am\",\"'ok'\"])\n\t# call([\"ssh-add\"])\n\t# call([\"git\",\"push\",\"-u\",\"origin\",\"master\"])\n\tf=open('iptv.m3u', 'w')\n\tf.write(\"#EXTM3U\\n\")\n\tf.close()\n\tfor i in good:\n\t\tf=open('iptv.m3u', 'a')\n\t\tf.write(\"#EXTINF:-1,\"+i+\"\\n\"+good[i]+\"\\n\")\n\t\tf.close()\n\tcall([ \"git\", \"remote\", \"add\", \"origin\", \"https://github.com/taketa/xnltestt\"])\n\tcall([\"git\",\"add\",\".\"])\n\tcall([\"git\",\"commit\",\"-am\",\"ok\"])\n\n\tcall([\"git\",\"push\",\"-u\",\"origin\",\"master\"])\n\tprint(datetime.datetime.now())\n\treturn\n\t\n\n\n\n\t \n\t\n\n\t","sub_path":"ffmpeg.py","file_name":"ffmpeg.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"520519137","text":"class Solution(object):\n def matrixReshape(self, nums, r, c):\n \"\"\"\n :type nums: List[List[int]]\n :type r: int\n :type c: int\n :rtype: List[List[int]]\n \"\"\"\n def get_next(x, y):\n ret = nums[x][y]\n y += 1\n if y == width:\n y = 0\n x += 1\n return ret, x, y\n height = len(nums)\n width = len(nums[0])\n if r * c != height * width:\n return nums\n reshape = list()\n cur_x = cur_y = 0\n for i in range(r):\n cur_list = list()\n for j in range(c):\n cur, cur_x, cur_y = get_next(cur_x, cur_y)\n cur_list.append(cur)\n reshape.append(cur_list)\n return reshape","sub_path":"normal/566_reshape_the_matrix.py","file_name":"566_reshape_the_matrix.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"569982528","text":"def prikazi_rezultate(list, N):\n i = 0\n n = N\n while True:\n print(\"--\" * 70)\n print(\"HTML page\" + \" \" * 100 + \"RANG\")\n for k in range(i*n, i*n+n):\n if k < len(list):\n line = list[k]\n print(\" {} {}\".format(str(line[0]), str(line[1])))\n\n print(\"--\" * 70)\n print(\"\\nOpcije:\\n\\t(+) prikaz sledecih N stranica\\n\\t(-) prikaz prethodnih N stranica\")\n print(\"\\t(n) promena broja stranica koje se prikazuju\\n\\t(q) izlaz\")\n\n try:\n unos = str(input(\">> \"))\n except KeyboardInterrupt:\n return 0\n\n if unos == \"q\":\n return\n elif unos == \"+\":\n if i+1 <= int(len(list)/n):\n i += 1\n elif unos == \"-\":\n if i-1 >= 0:\n i -= 1\n elif unos == \"n\" :\n n = int(input(\"Unesite novo N: \"))\n i = 0","sub_path":"util/paginacija_rezultata.py","file_name":"paginacija_rezultata.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"162526655","text":"# import dependencies\nfrom flask import Flask\nfrom flask import render_template, jsonify\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy import create_engine, func, inspect\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nimport json\nimport decimal\n\nfrom sqlalchemy.sql.sqltypes import VARCHAR\n\n\nclass DecimalEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, decimal.Decimal):\n return float(o)\n super(DecimalEncoder, self).default(o)\n\n\n# setup database\nengine = create_engine(\"postgresql://localhost/SpeedDating?user=postgres&password=postgres\")\n\nBase = declarative_base()\n\n\nclass Dategrid(Base):\n __tablename__ = 'dategrid'\n id = Column(Integer, primary_key=True)\n wave = Column(Integer)\n Participant_Number = Column(Integer)\n originally_from = Column(VARCHAR)\n field = Column(VARCHAR)\n race = Column(Integer)\n exphappy = Column(Integer)\n\nclass Plotlydata(Base):\n __tablename__ = 'plotly'\n Participant_Number = Column(Integer, primary_key=True)\n originally_from = Column(VARCHAR)\n field = Column(VARCHAR)\n race = Column(Integer)\n expectations_of_happiness = Column(Integer)\n Number_of_Dates = Column(Integer)\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n# Create an instance of Flask\napp = Flask(__name__)\n\n# data routes\n@app.route(\"/data\")\ndef send_data():\n data = []\n rows = engine.execute(\"SELECT * FROM speeddatefinal\").fetchall()\n for row in rows:\n # data.append([x for x in row])\n data.append(row._asdict())\n return json.dumps(data, cls=DecimalEncoder)\n\n\n@app.route(\"/dategrid\")\ndef send_dategrid():\n dategrid = []\n rows = engine.execute(\"SELECT * FROM dategrid\").fetchall()\n for row in rows:\n # dategrid.append(lambda row: {c.name: str(getattr(row, c.name)) for c in row.__table__.columns})\n dategrid.append(row._asdict())\n return json.dumps(dategrid, cls=DecimalEncoder)\n\n@app.route(\"/plotlydata\")\ndef send_plotlydata():\n plotlydata = []\n rows = engine.execute(\"SELECT * FROM plotly\").fetchall()\n for row in rows:\n # dategrid.append(lambda row: {c.name: str(getattr(row, c.name)) for c in row.__table__.columns})\n plotlydata.append(row._asdict())\n return json.dumps(plotlydata, cls=DecimalEncoder)\n\n@app.route(\"/name\")\ndef send_name():\n name = []\n rows = engine.execute(\"SELECT * FROM name\").fetchall()\n for row in rows:\n # dategrid.append(lambda row: {c.name: str(getattr(row, c.name)) for c in row.__table__.columns})\n name.append(row._asdict())\n return json.dumps(name, cls=DecimalEncoder)\n\n@app.route(\"/\")\ndef index():\n\n # Return template and data\n return render_template(\"index.html\")\n\n# Route to render map.html template\n@app.route(\"/map\")\ndef map():\n\n # Return template and data\n return render_template(\"map.html\")\n\n\n# Route to render grid.html template\n\n\n@app.route(\"/grid\")\ndef grid():\n\n # Return template and data\n return render_template(\"grid.html\")\n\n# Route to render plotly.html template\n\n\n@app.route(\"/plotly\")\ndef plotly():\n\n # Return template and data\n return render_template(\"plotly.html\")\n\n\n@app.route(\"/line\")\ndef line():\n\n # Return template and data\n return render_template(\"line.html\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"554863192","text":"from PyWeChatSpy import WeChatSpy\nfrom PyWeChatSpy.command import *\nimport logging\n\n\nlogger = logging.getLogger(__file__)\nformatter = logging.Formatter('%(asctime)s [%(threadName)s] %(levelname)s: %(message)s')\nsh = logging.StreamHandler()\nsh.setFormatter(formatter)\nsh.setLevel(logging.DEBUG)\nlogger.addHandler(sh)\nlogger.setLevel(logging.INFO)\n\n\ndef my_proto_parser(data):\n if data.type == WECHAT_CONNECTED:\n print(\"微信连接成功\")\n elif data.type == WECHAT_LOGIN:\n print(\"微信登录成功\")\n spy.query_login_info()\n elif data.type == WECHAT_LOGOUT:\n print(\"微信登出\")\n elif data.type == LOGIN_INFO:\n print(\"登录信息\")\n print(str(data))\n spy.query_contact_list()\n elif data.type == CONTACT_LIST:\n print(\"联系人列表\")\n for contact in data.contact_list.contact:\n print(contact.wxid)\n print(contact.nickname)\n elif data.type == MESSAGE:\n # 消息\n for message in data.message_list.message:\n if message.type == 1:\n print(\"文本消息\")\n print(message.content)\n elif message.type == 3:\n print(\"图片消息\")\n print\n\n\nspy = WeChatSpy(parser=my_proto_parser, key=\"18d421169d93611a5584affac335e690\", logger=logger) # 同步处理\n# spy = WeChatSpy(parser=my_parser_async, key=\"授权Key\", logger=logger) # 异步处理\n\nif __name__ == '__main__':\n spy.run() # 异步处理","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"575510205","text":"from flask import Blueprint, render_template, abort, request\nfrom jinja2 import TemplateNotFound\nfrom htmlmin.main import minify\n\n\namp = Blueprint('amp', __name__, template_folder='templates')\n\n\ndef is_call_hours():\n \"\"\"Check if we're currently within call hours.\n Weekdays from 8 AM to 6 PM.\"\"\"\n from datetime import datetime, time\n import pytz\n now = datetime.now(pytz.timezone('Europe/London'))\n if time(8) <= now.time() <= time(18):\n return True \n return False \n\n\n@amp.after_request\ndef response_minify(response):\n \"\"\"\n minify html response to decrease site traffic\n \"\"\"\n if response.content_type == u'text/html; charset=utf-8':\n response.set_data(\n minify(response.get_data(as_text=True))\n )\n\n response.data = response.data.replace(b'